Upgrade dependencies

This commit is contained in:
Ingo Gottwald 2018-05-23 17:48:04 +02:00 committed by Traefiker Bot
parent d6d795e286
commit 83e09acc9f
177 changed files with 59841 additions and 39358 deletions

42
Gopkg.lock generated
View file

@ -3,12 +3,9 @@
[[projects]] [[projects]]
name = "cloud.google.com/go" name = "cloud.google.com/go"
packages = [ packages = ["compute/metadata"]
"compute/metadata", revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811"
"internal" version = "v0.22.0"
]
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
version = "v0.7.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -286,8 +283,8 @@
"pkg/types", "pkg/types",
"version" "version"
] ]
revision = "f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b" revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
version = "v3.2.9" version = "v3.3.5"
[[projects]] [[projects]]
name = "github.com/coreos/go-semver" name = "github.com/coreos/go-semver"
@ -607,7 +604,9 @@
[[projects]] [[projects]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
packages = [ packages = [
"gogoproto",
"proto", "proto",
"protoc-gen-gogo/descriptor",
"sortkeys" "sortkeys"
] ]
revision = "909568be09de550ed094403c2bf8a261b5bb730a" revision = "909568be09de550ed094403c2bf8a261b5bb730a"
@ -627,7 +626,8 @@
"ptypes/duration", "ptypes/duration",
"ptypes/timestamp" "ptypes/timestamp"
] ]
revision = "4bd1920723d7b7c925de087aa32e2187708897f7" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -657,11 +657,6 @@
packages = ["."] packages = ["."]
revision = "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" revision = "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
[[projects]] [[projects]]
name = "github.com/googleapis/gnostic" name = "github.com/googleapis/gnostic"
packages = [ packages = [
@ -1178,7 +1173,8 @@
[[projects]] [[projects]]
name = "github.com/ugorji/go" name = "github.com/ugorji/go"
packages = ["codec"] packages = ["codec"]
revision = "ea9cd21fa0bc41ee4bdd50ac7ed8cbc7ea2ed960" revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
version = "v1.1.1"
[[projects]] [[projects]]
name = "github.com/unrolled/render" name = "github.com/unrolled/render"
@ -1400,23 +1396,33 @@
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [ packages = [
".", ".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"channelz",
"codes", "codes",
"connectivity", "connectivity",
"credentials", "credentials",
"grpclb/grpc_lb_v1", "encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog", "grpclog",
"health/grpc_health_v1",
"internal", "internal",
"keepalive", "keepalive",
"metadata", "metadata",
"naming", "naming",
"peer", "peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats", "stats",
"status", "status",
"tap", "tap",
"transport" "transport"
] ]
revision = "b3ddf786825de56a4178401b7e174ee332173b66" revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
version = "v1.5.2" version = "v1.12.0"
[[projects]] [[projects]]
name = "gopkg.in/fsnotify.v1" name = "gopkg.in/fsnotify.v1"

View file

@ -2,11 +2,11 @@ package integration
import ( import (
"context" "context"
"crypto/rand"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"errors" "errors"
"io/ioutil" "io/ioutil"
"math/rand"
"net" "net"
"os" "os"
"time" "time"
@ -21,6 +21,8 @@ import (
var LocalhostCert []byte var LocalhostCert []byte
var LocalhostKey []byte var LocalhostKey []byte
const randCharset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
// GRPCSuite // GRPCSuite
type GRPCSuite struct{ BaseSuite } type GRPCSuite struct{ BaseSuite }
@ -42,7 +44,9 @@ func (s *myserver) SayHello(ctx context.Context, in *helloworld.HelloRequest) (*
func (s *myserver) StreamExample(in *helloworld.StreamExampleRequest, server helloworld.Greeter_StreamExampleServer) error { func (s *myserver) StreamExample(in *helloworld.StreamExampleRequest, server helloworld.Greeter_StreamExampleServer) error {
data := make([]byte, 512) data := make([]byte, 512)
rand.Read(data) for i := range data {
data[i] = randCharset[rand.Intn(len(randCharset))]
}
server.Send(&helloworld.StreamExampleReply{Data: string(data)}) server.Send(&helloworld.StreamExampleReply{Data: string(data)})
<-s.stopStreamExample <-s.stopStreamExample
return nil return nil

2
vendor/cloud.google.com/go/LICENSE generated vendored
View file

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright 2014 Google Inc. Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View file

@ -34,8 +34,6 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp" "golang.org/x/net/context/ctxhttp"
"cloud.google.com/go/internal"
) )
const ( const (
@ -48,6 +46,8 @@ const (
// This is variable name is not defined by any spec, as far as // This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package. // I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST" metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
) )
type cachedValue struct { type cachedValue struct {
@ -65,24 +65,20 @@ var (
var ( var (
metaClient = &http.Client{ metaClient = &http.Client{
Transport: &internal.Transport{ Transport: &http.Transport{
Base: &http.Transport{ Dial: (&net.Dialer{
Dial: (&net.Dialer{ Timeout: 2 * time.Second,
Timeout: 2 * time.Second, KeepAlive: 30 * time.Second,
KeepAlive: 30 * time.Second, }).Dial,
}).Dial, ResponseHeaderTimeout: 2 * time.Second,
ResponseHeaderTimeout: 2 * time.Second,
},
}, },
} }
subscribeClient = &http.Client{ subscribeClient = &http.Client{
Transport: &internal.Transport{ Transport: &http.Transport{
Base: &http.Transport{ Dial: (&net.Dialer{
Dial: (&net.Dialer{ Timeout: 2 * time.Second,
Timeout: 2 * time.Second, KeepAlive: 30 * time.Second,
KeepAlive: 30 * time.Second, }).Dial,
}).Dial,
},
}, },
} }
) )
@ -132,6 +128,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error)
url := "http://" + host + "/computeMetadata/v1/" + suffix url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil) req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req) res, err := client.Do(req)
if err != nil { if err != nil {
return "", "", err return "", "", err
@ -202,7 +199,9 @@ func testOnGCE() bool {
// Try two strategies in parallel. // Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() { go func() {
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
if err != nil { if err != nil {
resc <- false resc <- false
return return

View file

@ -1,64 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides support for the cloud packages.
//
// Users should not import this package directly.
package internal
import (
"fmt"
"net/http"
)
const userAgent = "gcloud-golang/0.1"
// Transport is an http.RoundTripper that appends Google Cloud client's
// user-agent to the original request's user-agent header.
type Transport struct {
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
// Do User-Agent some other way.
// Base is the actual http.RoundTripper
// requests will use. It must not be nil.
Base http.RoundTripper
}
// RoundTrip appends a user-agent to the existing user-agent
// header and delegates the request to the base http.RoundTripper.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req)
ua := req.Header.Get("User-Agent")
if ua == "" {
ua = userAgent
} else {
ua = fmt.Sprintf("%s %s", ua, userAgent)
}
req.Header.Set("User-Agent", ua)
return t.Base.RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}

View file

@ -1,55 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
)
// Retry calls the supplied function f repeatedly according to the provided
// backoff parameters. It returns when one of the following occurs:
// When f's first return value is true, Retry immediately returns with f's second
// return value.
// When the provided context is done, Retry returns with ctx.Err().
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
return retry(ctx, bo, f, gax.Sleep)
}
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
sleep func(context.Context, time.Duration) error) error {
var lastErr error
for {
stop, err := f()
if stop {
return err
}
// Remember the last "real" error from f.
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
lastErr = err
}
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
}
return cerr
}
}
}

View file

@ -1,6 +1,5 @@
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: auth.proto // source: auth.proto
// DO NOT EDIT!
/* /*
Package authpb is a generated protocol buffer package. Package authpb is a generated protocol buffer package.
@ -22,6 +21,8 @@ import (
math "math" math "math"
_ "github.com/gogo/protobuf/gogoproto"
io "io" io "io"
) )
@ -217,24 +218,6 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)

View file

@ -16,11 +16,10 @@ package client
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
"net/url" "net/url"
"golang.org/x/net/context"
) )
type Role struct { type Role struct {

View file

@ -16,12 +16,11 @@ package client
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"golang.org/x/net/context"
) )
var ( var (

View file

@ -15,6 +15,7 @@
package client package client
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -29,8 +30,6 @@ import (
"time" "time"
"github.com/coreos/etcd/version" "github.com/coreos/etcd/version"
"golang.org/x/net/context"
) )
var ( var (
@ -671,8 +670,15 @@ func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
} }
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
p := r.Perm(len(eps)) // copied from Go 1.9<= rand.Rand.Perm
neps := make([]url.URL, len(eps)) n := len(eps)
p := make([]int, n)
for i := 0; i < n; i++ {
j := r.Intn(i + 1)
p[i] = p[j]
p[j] = i
}
neps := make([]url.URL, n)
for i, k := range p { for i, k := range p {
neps[i] = eps[k] neps[i] = eps[k]
} }

View file

@ -19,9 +19,9 @@ Create a Config and exchange it for a Client:
import ( import (
"net/http" "net/http"
"context"
"github.com/coreos/etcd/client" "github.com/coreos/etcd/client"
"golang.org/x/net/context"
) )
cfg := client.Config{ cfg := client.Config{
@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations:
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() defer cancel()
// set a new key, ignoring it's previous state // set a new key, ignoring its previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil) _, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil { if err != nil {
if err == context.DeadlineExceeded { if err == context.DeadlineExceeded {

File diff suppressed because it is too large Load diff

View file

@ -17,6 +17,7 @@ package client
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go //go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -28,7 +29,6 @@ import (
"github.com/coreos/etcd/pkg/pathutil" "github.com/coreos/etcd/pkg/pathutil"
"github.com/ugorji/go/codec" "github.com/ugorji/go/codec"
"golang.org/x/net/context"
) )
const ( const (
@ -653,8 +653,7 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp
default: default:
err = unmarshalFailedKeysResponse(body) err = unmarshalFailedKeysResponse(body)
} }
return res, err
return
} }
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {

View file

@ -16,14 +16,13 @@ package client
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/pkg/types"
) )
@ -44,7 +43,7 @@ type Member struct {
PeerURLs []string `json:"peerURLs"` PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member // ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs. // serves its client-facing APIs.
ClientURLs []string `json:"clientURLs"` ClientURLs []string `json:"clientURLs"`
} }

View file

@ -15,12 +15,13 @@
package clientv3 package clientv3
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -100,60 +101,65 @@ type Auth interface {
} }
type auth struct { type auth struct {
remote pb.AuthClient remote pb.AuthClient
callOpts []grpc.CallOption
} }
func NewAuth(c *Client) Auth { func NewAuth(c *Client) Auth {
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())} api := &auth{remote: RetryAuthClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
return (*AuthEnableResponse)(resp), toErr(ctx, err) return (*AuthEnableResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
return (*AuthDisableResponse)(resp), toErr(ctx, err) return (*AuthDisableResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err) return (*AuthUserAddResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
return (*AuthUserGetResponse)(resp), toErr(ctx, err) return (*AuthUserGetResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
return (*AuthUserListResponse)(resp), toErr(ctx, err) return (*AuthUserListResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
return (*AuthRoleAddResponse)(resp), toErr(ctx, err) return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
} }
@ -163,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
RangeEnd: []byte(rangeEnd), RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType), PermType: authpb.Permission_Type(permType),
} }
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
return (*AuthRoleGetResponse)(resp), toErr(ctx, err) return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
return (*AuthRoleListResponse)(resp), toErr(ctx, err) return (*AuthRoleListResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
} }
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
} }
@ -196,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) {
} }
type authenticator struct { type authenticator struct {
conn *grpc.ClientConn // conn in-use conn *grpc.ClientConn // conn in-use
remote pb.AuthClient remote pb.AuthClient
callOpts []grpc.CallOption
} }
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthenticateResponse)(resp), toErr(ctx, err) return (*AuthenticateResponse)(resp), toErr(ctx, err)
} }
@ -209,14 +216,18 @@ func (auth *authenticator) close() {
auth.conn.Close() auth.conn.Close()
} }
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...) conn, err := grpc.Dial(endpoint, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &authenticator{ api := &authenticator{
conn: conn, conn: conn,
remote: pb.NewAuthClient(conn), remote: pb.NewAuthClient(conn),
}, nil }
if c != nil {
api.callOpts = c.callOpts
}
return api, nil
} }

View file

@ -1,356 +0,0 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// downc closes when grpc calls down() on pinAddr
downc chan struct{}
// stopc is closed to signal updateNotifyLoop should stop.
stopc chan struct{}
// donec closes when all goroutines are exited
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
updateAddrsC chan struct{}
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
closed bool
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan struct{}, 1),
host2ep: getHost2ep(eps),
}
close(sb.downc)
go sb.updateNotifyLoop()
return sb
}
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) getEndpoint(host string) string {
b.mu.Lock()
defer b.mu.Unlock()
return b.host2ep[host]
}
func getHost2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}
func (b *simpleBalancer) updateAddrs(eps []string) {
np := getHost2ep(eps)
b.mu.Lock()
match := len(np) == len(b.host2ep)
for k, v := range np {
if b.host2ep[k] != v {
match = false
break
}
}
if match {
// same endpoints, so no need to update address
b.mu.Unlock()
return
}
b.host2ep = np
addrs := make([]grpc.Address, 0, len(eps))
for i := range eps {
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
}
b.addrs = addrs
// updating notifyCh can trigger new connections,
// only update addrs if all connections are down
// or addrs does not include pinAddr.
update := !hasAddr(addrs, b.pinAddr)
b.mu.Unlock()
if update {
select {
case b.updateAddrsC <- struct{}{}:
case <-b.stopc:
}
}
}
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
for _, addr := range addrs {
if targetAddr == addr.Addr {
return true
}
}
return false
}
func (b *simpleBalancer) updateNotifyLoop() {
defer close(b.donec)
for {
b.mu.RLock()
upc, downc, addr := b.upc, b.downc, b.pinAddr
b.mu.RUnlock()
// downc or upc should be closed
select {
case <-downc:
downc = nil
default:
}
select {
case <-upc:
upc = nil
default:
}
switch {
case downc == nil && upc == nil:
// stale
select {
case <-b.stopc:
return
default:
}
case downc == nil:
b.notifyAddrs()
select {
case <-upc:
case <-b.updateAddrsC:
b.notifyAddrs()
case <-b.stopc:
return
}
case upc == nil:
select {
// close connections that are not the pinned address
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
case <-downc:
case <-b.stopc:
return
}
select {
case <-downc:
case <-b.updateAddrsC:
case <-b.stopc:
return
}
b.notifyAddrs()
}
}
}
func (b *simpleBalancer) notifyAddrs() {
b.mu.RLock()
addrs := b.addrs
b.mu.RUnlock()
select {
case b.notifyCh <- addrs:
case <-b.stopc:
}
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Or our simplerBalancer
// might panic since b.upc is closed.
if b.closed {
return func(err error) {}
}
// gRPC might call Up on a stale address.
// Prevent updating pinAddr with a stale address.
if !hasAddr(b.addrs, addr.Addr) {
return func(err error) {}
}
if b.pinAddr != "" {
return func(err error) {}
}
// notify waiting Get()s and pin first connected address
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
}
}
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
)
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr == "" {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-b.donec:
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr != "" {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
b.mu.Unlock()
<-b.donec
return nil
}
b.closed = true
close(b.stopc)
b.pinAddr = ""
// In the case of following scenario:
// 1. upc is not closed; no pinned address
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
// 3. clientconn.Close() calls balancer.Close(); closed = true
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
// we must close upc so Get() exits from blocking on upc
select {
case <-b.upc:
default:
// terminate all waiting Get()s
close(b.upc)
}
b.mu.Unlock()
// wait for updateNotifyLoop to finish
<-b.donec
close(b.notifyCh)
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

View file

@ -15,6 +15,7 @@
package clientv3 package clientv3
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
@ -27,11 +28,12 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
) )
var ( var (
@ -51,21 +53,22 @@ type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
dialerrc chan error dialerrc chan error
cfg Config cfg Config
creds *credentials.TransportCredentials creds *credentials.TransportCredentials
balancer *simpleBalancer balancer *healthBalancer
retryWrapper retryRpcFunc mu *sync.Mutex
retryAuthWrapper retryRpcFunc
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
// Username is a username for authentication // Username is a user name for authentication.
Username string Username string
// Password is a password for authentication // Password is a password for authentication.
Password string Password string
// tokenCred is an instance of WithPerRPCCredentials()'s argument // tokenCred is an instance of WithPerRPCCredentials()'s argument
tokenCred *authTokenCredential tokenCred *authTokenCredential
callOpts []grpc.CallOption
} }
// New creates a new etcdv3 client from a given configuration. // New creates a new etcdv3 client from a given configuration.
@ -116,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) {
// SetEndpoints updates client's endpoints. // SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) { func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
c.cfg.Endpoints = eps c.cfg.Endpoints = eps
c.balancer.updateAddrs(eps) c.mu.Unlock()
c.balancer.updateAddrs(eps...)
// updating notifyCh can trigger new connections,
// need update addrs if all connections are down
// or addrs does not include pinAddr.
c.balancer.mu.RLock()
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
c.balancer.mu.RUnlock()
if update {
select {
case c.balancer.updateAddrsC <- notifyNext:
case <-c.balancer.stopc:
}
}
} }
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. // Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
@ -144,8 +162,10 @@ func (c *Client) autoSync() {
case <-c.ctx.Done(): case <-c.ctx.Done():
return return
case <-time.After(c.cfg.AutoSyncInterval): case <-time.After(c.cfg.AutoSyncInterval):
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
logger.Println("Auto sync endpoints failed:", err) logger.Println("Auto sync endpoints failed:", err)
} }
} }
@ -174,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
host = endpoint host = endpoint
url, uerr := url.Parse(endpoint) url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") { if uerr != nil || !strings.Contains(endpoint, "://") {
return return proto, host, scheme
} }
scheme = url.Scheme scheme = url.Scheme
@ -188,7 +208,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
default: default:
proto, host = "", "" proto, host = "", ""
} }
return return proto, host, scheme
} }
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
@ -207,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
default: default:
creds = nil creds = nil
} }
return return creds
} }
// dialSetupOpts gives the dial opts prior to any authentication // dialSetupOpts gives the dial opts prior to any authentication
@ -215,10 +235,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
if c.cfg.DialTimeout > 0 { if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
} }
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
Time: c.cfg.DialKeepAliveTime,
Timeout: c.cfg.DialKeepAliveTimeout,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...) opts = append(opts, dopts...)
f := func(host string, t time.Duration) (net.Conn, error) { f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
if host == "" && endpoint != "" { if host == "" && endpoint != "" {
// dialing an endpoint not in the balancer; use // dialing an endpoint not in the balancer; use
// endpoint passed into dial // endpoint passed into dial
@ -270,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error {
endpoint := c.cfg.Endpoints[i] endpoint := c.cfg.Endpoints[i]
host := getHost(endpoint) host := getHost(endpoint)
// use dial options without dopts to avoid reusing the client balancer // use dial options without dopts to avoid reusing the client balancer
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
if err != nil { if err != nil {
continue continue
} }
@ -311,7 +338,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
if err != nil { if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() { if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = grpc.ErrClientConnTimeout err = context.DeadlineExceeded
} }
return nil, err return nil, err
} }
@ -333,7 +360,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
// when the cluster has a leader. // when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context { func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md) return metadata.NewOutgoingContext(ctx, md)
} }
func newClient(cfg *Config) (*Client, error) { func newClient(cfg *Config) (*Client, error) {
@ -360,15 +387,37 @@ func newClient(cfg *Config) (*Client, error) {
creds: creds, creds: creds,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
mu: new(sync.Mutex),
callOpts: defaultCallOpts,
} }
if cfg.Username != "" && cfg.Password != "" { if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username client.Username = cfg.Username
client.Password = cfg.Password client.Password = cfg.Password
} }
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
}
callOpts := []grpc.CallOption{
defaultFailFast,
defaultMaxCallSendMsgSize,
defaultMaxCallRecvMsgSize,
}
if cfg.MaxCallSendMsgSize > 0 {
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
}
if cfg.MaxCallRecvMsgSize > 0 {
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
}
client.callOpts = callOpts
}
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
return grpcHealthCheck(client, ep)
})
client.balancer = newSimpleBalancer(cfg.Endpoints)
// use Endpoints[0] so that for https:// without any tls config given, then // use Endpoints[0] so that for https:// without any tls config given, then
// grpc will assume the ServerName is in the endpoint. // grpc will assume the certificate server name is the endpoint host.
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil { if err != nil {
client.cancel() client.cancel()
@ -376,21 +425,19 @@ func newClient(cfg *Config) (*Client, error) {
return nil, err return nil, err
} }
client.conn = conn client.conn = conn
client.retryWrapper = client.newRetryWrapper()
client.retryAuthWrapper = client.newAuthRetryWrapper()
// wait for a connection // wait for a connection
if cfg.DialTimeout > 0 { if cfg.DialTimeout > 0 {
hasConn := false hasConn := false
waitc := time.After(cfg.DialTimeout) waitc := time.After(cfg.DialTimeout)
select { select {
case <-client.balancer.readyc: case <-client.balancer.ready():
hasConn = true hasConn = true
case <-ctx.Done(): case <-ctx.Done():
case <-waitc: case <-waitc:
} }
if !hasConn { if !hasConn {
err := grpc.ErrClientConnTimeout err := context.DeadlineExceeded
select { select {
case err = <-client.dialerrc: case err = <-client.dialerrc:
default: default:
@ -425,7 +472,7 @@ func (c *Client) checkVersion() (err error) {
errc := make(chan error, len(c.cfg.Endpoints)) errc := make(chan error, len(c.cfg.Endpoints))
ctx, cancel := context.WithCancel(c.ctx) ctx, cancel := context.WithCancel(c.ctx)
if c.cfg.DialTimeout > 0 { if c.cfg.DialTimeout > 0 {
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout) ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
} }
wg.Add(len(c.cfg.Endpoints)) wg.Add(len(c.cfg.Endpoints))
for _, ep := range c.cfg.Endpoints { for _, ep := range c.cfg.Endpoints {
@ -440,7 +487,7 @@ func (c *Client) checkVersion() (err error) {
vs := strings.Split(resp.Version, ".") vs := strings.Split(resp.Version, ".")
maj, min := 0, 0 maj, min := 0, 0
if len(vs) >= 2 { if len(vs) >= 2 {
maj, rerr = strconv.Atoi(vs[0]) maj, _ = strconv.Atoi(vs[0])
min, rerr = strconv.Atoi(vs[1]) min, rerr = strconv.Atoi(vs[1])
} }
if maj < 3 || (maj == 3 && min < 2) { if maj < 3 || (maj == 3 && min < 2) {
@ -472,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool {
if err == nil { if err == nil {
return false return false
} }
code := grpc.Code(err) ev, _ := status.FromError(err)
// Unavailable codes mean the system will be right back. // Unavailable codes mean the system will be right back.
// (e.g., can't connect, lost leader) // (e.g., can't connect, lost leader)
// Treat Internal codes as if something failed, leaving the // Treat Internal codes as if something failed, leaving the
// system in an inconsistent state, but retrying could make progress. // system in an inconsistent state, but retrying could make progress.
// (e.g., failed in middle of send, corrupted frame) // (e.g., failed in middle of send, corrupted frame)
// TODO: are permanent Internal errors possible from grpc? // TODO: are permanent Internal errors possible from grpc?
return code != codes.Unavailable && code != codes.Internal return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
} }
func toErr(ctx context.Context, err error) error { func toErr(ctx context.Context, err error) error {
@ -490,7 +537,8 @@ func toErr(ctx context.Context, err error) error {
if _, ok := err.(rpctypes.EtcdError); ok { if _, ok := err.(rpctypes.EtcdError); ok {
return err return err
} }
code := grpc.Code(err) ev, _ := status.FromError(err)
code := ev.Code()
switch code { switch code {
case codes.DeadlineExceeded: case codes.DeadlineExceeded:
fallthrough fallthrough
@ -499,7 +547,6 @@ func toErr(ctx context.Context, err error) error {
err = ctx.Err() err = ctx.Err()
} }
case codes.Unavailable: case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition: case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing err = grpc.ErrClientConnClosing
} }

View file

@ -15,8 +15,11 @@
package clientv3 package clientv3
import ( import (
"context"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context" "github.com/coreos/etcd/pkg/types"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -43,20 +46,34 @@ type Cluster interface {
} }
type cluster struct { type cluster struct {
remote pb.ClusterClient remote pb.ClusterClient
callOpts []grpc.CallOption
} }
func NewCluster(c *Client) Cluster { func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)} api := &cluster{remote: RetryClusterClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster { func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
return &cluster{remote: remote} api := &cluster{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
// fail-fast before panic in rafthttp
if _, err := types.NewURLs(peerAddrs); err != nil {
return nil, err
}
r := &pb.MemberAddRequest{PeerURLs: peerAddrs} r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r) resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -65,7 +82,7 @@ func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAdd
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id} r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r) resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -73,28 +90,25 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
} }
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update. // fail-fast before panic in rafthttp
for { if _, err := types.NewURLs(peerAddrs); err != nil {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} return nil, err
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
} }
// it is safe to retry on update.
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
return nil, toErr(ctx, err)
} }
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list. // it is safe to retry on list.
for { resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) if err == nil {
if err == nil { return (*MemberListResponse)(resp), nil
return (*MemberListResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
} }
return nil, toErr(ctx, err)
} }

View file

@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
} }
// WithCompactPhysical makes compact RPC call wait until // WithCompactPhysical makes Compact wait until all compacted entries are
// the compaction is physically applied to the local database // removed from the etcd server's storage.
// such that compacted entries are totally removed from the
// backend database.
func WithCompactPhysical() CompactOption { func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true } return func(op *CompactOp) { op.physical = true }
} }

View file

@ -60,6 +60,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD: case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
case pb.Compare_LEASE:
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
default: default:
panic("Unknown compare type") panic("Unknown compare type")
} }
@ -82,6 +84,12 @@ func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD} return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
} }
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
// LeaseID is 0, otherwise known as `NoLease`.
func LeaseValue(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
}
// KeyBytes returns the byte slice holding with the comparison key. // KeyBytes returns the byte slice holding with the comparison key.
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
@ -99,6 +107,19 @@ func (cmp *Cmp) ValueBytes() []byte {
// WithValueBytes sets the byte slice for the comparison's value. // WithValueBytes sets the byte slice for the comparison's value.
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
// WithRange sets the comparison to scan the range [key, end).
func (cmp Cmp) WithRange(end string) Cmp {
cmp.RangeEnd = []byte(end)
return cmp
}
// WithPrefix sets the comparison to scan all keys prefixed by the key.
func (cmp Cmp) WithPrefix() Cmp {
cmp.RangeEnd = getPrefix(cmp.Key)
return cmp
}
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
func mustInt64(val interface{}) int64 { func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok { if v, ok := val.(int64); ok {
return v return v
@ -108,3 +129,12 @@ func mustInt64(val interface{}) int64 {
} }
panic("bad value") panic("bad value")
} }
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
// int64 otherwise.
func mustInt64orLeaseID(val interface{}) int64 {
if v, ok := val.(LeaseID); ok {
return int64(v)
}
return mustInt64(val)
}

View file

@ -15,13 +15,13 @@
package concurrency package concurrency
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
v3 "github.com/coreos/etcd/clientv3" v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
) )
var ( var (
@ -185,12 +185,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
cancel() cancel()
return return
} }
// only accept PUTs; a DELETE will make observe() spin // only accept puts; a delete will make observe() spin
for _, ev := range wr.Events { for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT { if ev.Type == mvccpb.PUT {
hdr, kv = &wr.Header, ev.Kv hdr, kv = &wr.Header, ev.Kv
// may have multiple revs; hdr.rev = the last rev // may have multiple revs; hdr.rev = the last rev
// set to kv's rev in case batch has multiple PUTs // set to kv's rev in case batch has multiple Puts
hdr.Revision = kv.ModRevision hdr.Revision = kv.ModRevision
break break
} }
@ -213,6 +213,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
for !keyDeleted { for !keyDeleted {
wr, ok := <-wch wr, ok := <-wch
if !ok { if !ok {
cancel()
return return
} }
for _, ev := range wr.Events { for _, ev := range wr.Events {
@ -225,6 +226,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
select { select {
case ch <- *resp: case ch <- *resp:
case <-cctx.Done(): case <-cctx.Done():
cancel()
return return
} }
} }
@ -240,4 +242,4 @@ func (e *Election) Key() string { return e.leaderKey }
func (e *Election) Rev() int64 { return e.leaderRev } func (e *Election) Rev() int64 { return e.leaderRev }
// Header is the response header from the last successful election proposal. // Header is the response header from the last successful election proposal.
func (m *Election) Header() *pb.ResponseHeader { return m.hdr } func (e *Election) Header() *pb.ResponseHeader { return e.hdr }

View file

@ -15,12 +15,12 @@
package concurrency package concurrency
import ( import (
"context"
"fmt" "fmt"
v3 "github.com/coreos/etcd/clientv3" v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
) )
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {

View file

@ -15,12 +15,12 @@
package concurrency package concurrency
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
v3 "github.com/coreos/etcd/clientv3" v3 "github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
) )
// Mutex implements the sync Locker interface with etcd // Mutex implements the sync Locker interface with etcd
@ -49,7 +49,9 @@ func (m *Mutex) Lock(ctx context.Context) error {
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock // reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey) get := v3.OpGet(m.myKey)
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit() // fetch current holder to complete uncontended path with only one RPC
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
if err != nil { if err != nil {
return err return err
} }
@ -57,6 +59,12 @@ func (m *Mutex) Lock(ctx context.Context) error {
if !resp.Succeeded { if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
} }
// if no key on prefix / the minimum rev is key, already hold the lock
ownerKey := resp.Responses[1].GetResponseRange().Kvs
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
m.hdr = resp.Header
return nil
}
// wait for deletion revisions prior to myKey // wait for deletion revisions prior to myKey
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)

View file

@ -15,10 +15,10 @@
package concurrency package concurrency
import ( import (
"context"
"time" "time"
v3 "github.com/coreos/etcd/clientv3" v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
) )
const defaultSessionTTL = 60 const defaultSessionTTL = 60
@ -53,6 +53,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
ctx, cancel := context.WithCancel(ops.ctx) ctx, cancel := context.WithCancel(ops.ctx)
keepAlive, err := client.KeepAlive(ctx, id) keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil { if err != nil || keepAlive == nil {
cancel()
return nil, err return nil, err
} }

View file

@ -15,10 +15,10 @@
package concurrency package concurrency
import ( import (
"context"
"math" "math"
v3 "github.com/coreos/etcd/clientv3" v3 "github.com/coreos/etcd/clientv3"
"golang.org/x/net/context"
) )
// STM is an interface for software transactional memory. // STM is an interface for software transactional memory.
@ -46,7 +46,7 @@ const (
// SerializableSnapshot provides serializable isolation and also checks // SerializableSnapshot provides serializable isolation and also checks
// for write conflicts. // for write conflicts.
SerializableSnapshot Isolation = iota SerializableSnapshot Isolation = iota
// Serializable reads within the same transactiona attempt return data // Serializable reads within the same transaction attempt return data
// from the at the revision of the first read. // from the at the revision of the first read.
Serializable Serializable
// RepeatableReads reads within the same transaction attempt always // RepeatableReads reads within the same transaction attempt always
@ -85,7 +85,7 @@ func WithPrefetch(keys ...string) stmOption {
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
} }
// NewSTM initiates a new STM instance, using snapshot isolation by default. // NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
opts := &stmOptions{ctx: c.Ctx()} opts := &stmOptions{ctx: c.Ctx()}
for _, f := range so { for _, f := range so {

View file

@ -15,10 +15,10 @@
package clientv3 package clientv3
import ( import (
"context"
"crypto/tls" "crypto/tls"
"time" "time"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -33,10 +33,31 @@ type Config struct {
// DialTimeout is the timeout for failing to establish a connection. // DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration `json:"dial-timeout"` DialTimeout time.Duration `json:"dial-timeout"`
// DialKeepAliveTime is the time after which client pings the server to see if
// transport is alive.
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
// DialKeepAliveTimeout is the time that the client waits for a response for the
// keep-alive probe. If the response is not received in this time, the connection is closed.
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
// MaxCallSendMsgSize is the client-side request send limit in bytes.
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallSendMsgSize int
// MaxCallRecvMsgSize is the client-side response receive limit.
// If 0, it defaults to "math.MaxInt32", because range response can
// easily exceed request send limits.
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
MaxCallRecvMsgSize int
// TLS holds the client secure credentials, if any. // TLS holds the client secure credentials, if any.
TLS *tls.Config TLS *tls.Config
// Username is a username for authentication. // Username is a user name for authentication.
Username string `json:"username"` Username string `json:"username"`
// Password is a password for authentication. // Password is a password for authentication.

View file

@ -16,6 +16,22 @@
// //
// Create client using `clientv3.New`: // Create client using `clientv3.New`:
// //
// // expect dial time-out on ipv4 blackhole
// _, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"http://254.0.0.1:12345"},
// DialTimeout: 2 * time.Second
// })
//
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
// if err == context.DeadlineExceeded {
// // handle errors
// }
//
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
// if err == grpc.ErrClientConnTimeout {
// // handle errors
// }
//
// cli, err := clientv3.New(clientv3.Config{ // cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, // Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second, // DialTimeout: 5 * time.Second,
@ -28,7 +44,7 @@
// Make sure to close the client after using it. If the client is not closed, the // Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines. // connection will have leaky goroutines.
// //
// To specify client request timeout, pass context.WithTimeout to APIs: // To specify a client request timeout, wrap the context with context.WithTimeout:
// //
// ctx, cancel := context.WithTimeout(context.Background(), timeout) // ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value") // resp, err := kvc.Put(ctx, "sample_key", "sample_value")
@ -41,10 +57,11 @@
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. // The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines. // Clients are safe for concurrent use by multiple goroutines.
// //
// etcd client returns 2 types of errors: // etcd client returns 3 types of errors:
// //
// 1. context error: canceled or deadline exceeded. // 1. context error: canceled or deadline exceeded.
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go // 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
// //
// Here is the example code to handle client errors: // Here is the example code to handle client errors:
// //
@ -54,6 +71,12 @@
// // ctx is canceled by another routine // // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded { // } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded // // ctx is attached with a deadline and it exceeded
// } else if ev, ok := status.FromError(err); ok {
// code := ev.Code()
// if code == codes.DeadlineExceeded {
// // server-side context might have timed-out first (due to clock skew)
// // while original client-side context is not timed-out yet
// }
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { // } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// // process (verr.Errors) // // process (verr.Errors)
// } else { // } else {
@ -61,4 +84,14 @@
// } // }
// } // }
// //
// go func() { cli.Close() }()
// _, err := kvc.Get(ctx, "a")
// if err != nil {
// if err == context.Canceled {
// // grpc balancer calls 'Get' with an inflight client.Close
// } else if err == grpc.ErrClientConnClosing {
// // grpc balancer calls 'Get' after client.Close.
// }
// }
//
package clientv3 package clientv3

View file

@ -0,0 +1,609 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"errors"
"net/url"
"strings"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
const (
minHealthRetryDuration = 3 * time.Second
unknownService = "unknown service grpc.health.v1.Health"
)
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
// any active connection to endpoints at the time.
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
type healthCheckFunc func(ep string) (bool, error)
type notifyMsg int
const (
notifyReset notifyMsg = iota
notifyNext
)
// healthBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type healthBalancer struct {
// addrs are the client's endpoint addresses for grpc
addrs []grpc.Address
// eps holds the raw endpoints from the client
eps []string
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// healthCheck checks an endpoint's health.
healthCheck healthCheckFunc
healthCheckTimeout time.Duration
unhealthyMu sync.RWMutex
unhealthyHostPorts map[string]time.Time
// mu protects all fields below.
mu sync.RWMutex
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
upc chan struct{}
// downc closes when grpc calls down() on pinAddr
downc chan struct{}
// stopc is closed to signal updateNotifyLoop should stop.
stopc chan struct{}
stopOnce sync.Once
wg sync.WaitGroup
// donec closes when all goroutines are exited
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
updateAddrsC chan notifyMsg
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
hostPort2ep map[string]string
// pinAddr is the currently pinned address; set to the empty string on
// initialization and shutdown.
pinAddr string
closed bool
}
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
notifyCh := make(chan []grpc.Address)
addrs := eps2addrs(eps)
hb := &healthBalancer{
addrs: addrs,
eps: eps,
notifyCh: notifyCh,
readyc: make(chan struct{}),
healthCheck: hc,
unhealthyHostPorts: make(map[string]time.Time),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
updateAddrsC: make(chan notifyMsg),
hostPort2ep: getHostPort2ep(eps),
}
if timeout < minHealthRetryDuration {
timeout = minHealthRetryDuration
}
hb.healthCheckTimeout = timeout
close(hb.downc)
go hb.updateNotifyLoop()
hb.wg.Add(1)
go func() {
defer hb.wg.Done()
hb.updateUnhealthy()
}()
return hb
}
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
func (b *healthBalancer) endpoint(hostPort string) string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.hostPort2ep[hostPort]
}
func (b *healthBalancer) pinned() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.pinAddr
}
func (b *healthBalancer) hostPortError(hostPort string, err error) {
if b.endpoint(hostPort) == "" {
logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
return
}
b.unhealthyMu.Lock()
b.unhealthyHostPorts[hostPort] = time.Now()
b.unhealthyMu.Unlock()
logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
}
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
if b.endpoint(hostPort) == "" {
logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
return
}
b.unhealthyMu.Lock()
delete(b.unhealthyHostPorts, hostPort)
b.unhealthyMu.Unlock()
logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
}
func (b *healthBalancer) countUnhealthy() (count int) {
b.unhealthyMu.RLock()
count = len(b.unhealthyHostPorts)
b.unhealthyMu.RUnlock()
return count
}
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
b.unhealthyMu.RLock()
_, unhealthy = b.unhealthyHostPorts[hostPort]
b.unhealthyMu.RUnlock()
return unhealthy
}
func (b *healthBalancer) cleanupUnhealthy() {
b.unhealthyMu.Lock()
for k, v := range b.unhealthyHostPorts {
if time.Since(v) > b.healthCheckTimeout {
delete(b.unhealthyHostPorts, k)
logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
}
}
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
unhealthyCnt := b.countUnhealthy()
b.mu.RLock()
defer b.mu.RUnlock()
hbAddrs := b.addrs
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
for k := range b.hostPort2ep {
liveHostPorts[k] = struct{}{}
}
return hbAddrs, liveHostPorts
}
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
liveHostPorts := make(map[string]struct{}, len(addrs))
for _, addr := range b.addrs {
if !b.isUnhealthy(addr.Addr) {
addrs = append(addrs, addr)
liveHostPorts[addr.Addr] = struct{}{}
}
}
return addrs, liveHostPorts
}
func (b *healthBalancer) updateUnhealthy() {
for {
select {
case <-time.After(b.healthCheckTimeout):
b.cleanupUnhealthy()
pinned := b.pinned()
if pinned == "" || b.isUnhealthy(pinned) {
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
return
}
}
case <-b.stopc:
return
}
}
}
func (b *healthBalancer) updateAddrs(eps ...string) {
np := getHostPort2ep(eps)
b.mu.Lock()
defer b.mu.Unlock()
match := len(np) == len(b.hostPort2ep)
if match {
for k, v := range np {
if b.hostPort2ep[k] != v {
match = false
break
}
}
}
if match {
// same endpoints, so no need to update address
return
}
b.hostPort2ep = np
b.addrs, b.eps = eps2addrs(eps), eps
b.unhealthyMu.Lock()
b.unhealthyHostPorts = make(map[string]time.Time)
b.unhealthyMu.Unlock()
}
func (b *healthBalancer) next() {
b.mu.RLock()
downc := b.downc
b.mu.RUnlock()
select {
case b.updateAddrsC <- notifyNext:
case <-b.stopc:
}
// wait until disconnect so new RPCs are not issued on old connection
select {
case <-downc:
case <-b.stopc:
}
}
func (b *healthBalancer) updateNotifyLoop() {
defer close(b.donec)
for {
b.mu.RLock()
upc, downc, addr := b.upc, b.downc, b.pinAddr
b.mu.RUnlock()
// downc or upc should be closed
select {
case <-downc:
downc = nil
default:
}
select {
case <-upc:
upc = nil
default:
}
switch {
case downc == nil && upc == nil:
// stale
select {
case <-b.stopc:
return
default:
}
case downc == nil:
b.notifyAddrs(notifyReset)
select {
case <-upc:
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
case upc == nil:
select {
// close connections that are not the pinned address
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
case <-downc:
case <-b.stopc:
return
}
select {
case <-downc:
b.notifyAddrs(notifyReset)
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
return
}
}
}
}
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
if msg == notifyNext {
select {
case b.notifyCh <- []grpc.Address{}:
case <-b.stopc:
return
}
}
b.mu.RLock()
pinAddr := b.pinAddr
downc := b.downc
b.mu.RUnlock()
addrs, hostPorts := b.liveAddrs()
var waitDown bool
if pinAddr != "" {
_, ok := hostPorts[pinAddr]
waitDown = !ok
}
select {
case b.notifyCh <- addrs:
if waitDown {
select {
case <-downc:
case <-b.stopc:
}
}
case <-b.stopc:
}
}
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
if !b.mayPin(addr) {
return func(err error) {}
}
b.mu.Lock()
defer b.mu.Unlock()
// gRPC might call Up after it called Close. We add this check
// to "fix" it up at application layer. Otherwise, will panic
// if b.upc is already closed.
if b.closed {
return func(err error) {}
}
// gRPC might call Up on a stale address.
// Prevent updating pinAddr with a stale address.
if !hasAddr(b.addrs, addr.Addr) {
return func(err error) {}
}
if b.pinAddr != "" {
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
return func(err error) {}
}
// notify waiting Get()s and pin first connected address
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
// If connected to a black hole endpoint or a killed server, the gRPC ping
// timeout will induce a network I/O error, and retrying until success;
// finding healthy endpoint on retry could take several timeouts and redials.
// To avoid wasting retries, gray-list unhealthy endpoints.
b.hostPortError(addr.Addr, err)
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
}
}
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
if b.endpoint(addr.Addr) == "" { // stale host:port
return false
}
b.unhealthyMu.RLock()
unhealthyCnt := len(b.unhealthyHostPorts)
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
b.unhealthyMu.RUnlock()
b.mu.RLock()
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
b.mu.RUnlock()
if skip || !bad {
return true
}
// prevent isolated member's endpoint from being infinitely retried, as follows:
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
// 3. grpc-healthcheck still SERVING, thus retry to pin
// instead, return before grpc-healthcheck if failed within healthcheck timeout
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
return false
}
if ok, _ := b.healthCheck(addr.Addr); ok {
b.removeUnhealthy(addr.Addr, "health check success")
return true
}
b.hostPortError(addr.Addr, errors.New("health check failed"))
return false
}
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
)
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
// an address it has notified via Notify immediately instead of blocking.
if !opts.BlockingWait {
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr == "" {
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
}
return grpc.Address{Addr: addr}, func() {}, nil
}
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-b.donec:
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
closed = b.closed
addr = b.pinAddr
b.mu.RUnlock()
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
if closed {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if addr != "" {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *healthBalancer) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
if b.closed {
b.mu.Unlock()
<-b.donec
return nil
}
b.closed = true
b.stopOnce.Do(func() { close(b.stopc) })
b.pinAddr = ""
// In the case of following scenario:
// 1. upc is not closed; no pinned address
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
// 3. client.conn.Close() calls balancer.Close(); closed = true
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
// we must close upc so Get() exits from blocking on upc
select {
case <-b.upc:
default:
// terminate all waiting Get()s
close(b.upc)
}
b.mu.Unlock()
b.wg.Wait()
// wait for updateNotifyLoop to finish
<-b.donec
close(b.notifyCh)
return nil
}
func grpcHealthCheck(client *Client, ep string) (bool, error) {
conn, err := client.dial(ep)
if err != nil {
return false, err
}
defer conn.Close()
cli := healthpb.NewHealthClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
cancel()
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
if s.Message() == unknownService { // etcd < v3.3.0
return true, nil
}
}
return false, err
}
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
}
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
for _, addr := range addrs {
if targetAddr == addr.Addr {
return true
}
}
return false
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}
func eps2addrs(eps []string) []grpc.Address {
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
return addrs
}
func getHostPort2ep(eps []string) map[string]string {
hm := make(map[string]string, len(eps))
for i := range eps {
_, host, _ := parseEndpoint(eps[i])
hm[host] = eps[i]
}
return hm
}

View file

@ -15,8 +15,10 @@
package clientv3 package clientv3
import ( import (
"context"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -66,22 +68,46 @@ type OpResponse struct {
put *PutResponse put *PutResponse
get *GetResponse get *GetResponse
del *DeleteResponse del *DeleteResponse
txn *TxnResponse
} }
func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del } func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
type kv struct { type kv struct {
remote pb.KVClient remote pb.KVClient
callOpts []grpc.CallOption
} }
func NewKV(c *Client) KV { func NewKV(c *Client) KV {
return &kv{remote: RetryKVClient(c)} api := &kv{remote: RetryKVClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func NewKVFromKVClient(remote pb.KVClient) KV { func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
return &kv{remote: remote} api := &kv{remote: remote}
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
@ -100,7 +126,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
} }
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -109,54 +135,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C
func (kv *kv) Txn(ctx context.Context) Txn { func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{ return &txn{
kv: kv, kv: kv,
ctx: ctx, ctx: ctx,
callOpts: kv.callOpts,
} }
} }
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error var err error
switch op.t { switch op.t {
// TODO: handle other ops
case tRange: case tRange:
var resp *pb.RangeResponse var resp *pb.RangeResponse
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
if err == nil { if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil return OpResponse{get: (*GetResponse)(resp)}, nil
} }
case tPut: case tPut:
var resp *pb.PutResponse var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
resp, err = kv.remote.Put(ctx, r) resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
if err == nil { if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil return OpResponse{put: (*PutResponse)(resp)}, nil
} }
case tDeleteRange: case tDeleteRange:
var resp *pb.DeleteRangeResponse var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
resp, err = kv.remote.DeleteRange(ctx, r) resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
if err == nil { if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil return OpResponse{del: (*DeleteResponse)(resp)}, nil
} }
case tTxn:
var resp *pb.TxnResponse
resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
if err == nil {
return OpResponse{txn: (*TxnResponse)(resp)}, nil
}
default: default:
panic("Unknown op") panic("Unknown op")
} }
return OpResponse{}, err return OpResponse{}, toErr(ctx, err)
} }

View file

@ -15,12 +15,13 @@
package clientv3 package clientv3
import ( import (
"context"
"sync" "sync"
"time" "time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
) )
@ -30,7 +31,7 @@ type (
LeaseID int64 LeaseID int64
) )
// LeaseGrantResponse is used to convert the protobuf grant response. // LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
type LeaseGrantResponse struct { type LeaseGrantResponse struct {
*pb.ResponseHeader *pb.ResponseHeader
ID LeaseID ID LeaseID
@ -38,19 +39,19 @@ type LeaseGrantResponse struct {
Error string Error string
} }
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. // LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
type LeaseKeepAliveResponse struct { type LeaseKeepAliveResponse struct {
*pb.ResponseHeader *pb.ResponseHeader
ID LeaseID ID LeaseID
TTL int64 TTL int64
} }
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. // LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
type LeaseTimeToLiveResponse struct { type LeaseTimeToLiveResponse struct {
*pb.ResponseHeader *pb.ResponseHeader
ID LeaseID `json:"id"` ID LeaseID `json:"id"`
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
TTL int64 `json:"ttl"` TTL int64 `json:"ttl"`
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
@ -60,6 +61,18 @@ type LeaseTimeToLiveResponse struct {
Keys [][]byte `json:"keys"` Keys [][]byte `json:"keys"`
} }
// LeaseStatus represents a lease status.
type LeaseStatus struct {
ID LeaseID `json:"id"`
// TODO: TTL int64
}
// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
type LeaseLeasesResponse struct {
*pb.ResponseHeader
Leases []LeaseStatus `json:"leases"`
}
const ( const (
// defaultTTL is the assumed lease TTL used for the first keepalive // defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client. // deadline before the actual TTL is known to the client.
@ -98,11 +111,32 @@ type Lease interface {
// TimeToLive retrieves the lease information of the given lease ID. // TimeToLive retrieves the lease information of the given lease ID.
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
// KeepAlive keeps the given lease alive forever. // Leases retrieves all leases.
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
// KeepAlive keeps the given lease alive forever. If the keepalive response
// posted to the channel is not consumed immediately, the lease client will
// continue sending keep alive requests to the etcd server at least every
// second until latest response is consumed.
//
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
// alive stream is interrupted in some way the client cannot handle itself;
// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
// from this closed channel is nil.
//
// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
// no leader") or canceled by the caller (e.g. context.Canceled), the error
// is returned. Otherwise, it retries.
//
// TODO(v4.0): post errors to last keep alive message before closing
// (see https://github.com/coreos/etcd/pull/7866)
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive // KeepAliveOnce renews the lease once. The response corresponds to the
// should be used instead of KeepAliveOnce. // first message from calling KeepAlive. If the response has a recoverable
// error, KeepAliveOnce will retry the RPC with a new keep alive message.
//
// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
// Close releases all resources Lease keeps for efficient communication // Close releases all resources Lease keeps for efficient communication
@ -133,6 +167,8 @@ type lessor struct {
// firstKeepAliveOnce ensures stream starts after first KeepAlive call. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
firstKeepAliveOnce sync.Once firstKeepAliveOnce sync.Once
callOpts []grpc.CallOption
} }
// keepAlive multiplexes a keepalive for a lease over multiple channels // keepAlive multiplexes a keepalive for a lease over multiple channels
@ -148,10 +184,10 @@ type keepAlive struct {
} }
func NewLease(c *Client) Lease { func NewLease(c *Client) Lease {
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second) return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
} }
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease { func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
l := &lessor{ l := &lessor{
donec: make(chan struct{}), donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive), keepAlives: make(map[LeaseID]*keepAlive),
@ -161,62 +197,64 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati
if l.firstKeepAliveTimeout == time.Second { if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL l.firstKeepAliveTimeout = defaultTTL
} }
if c != nil {
l.callOpts = c.callOpts
}
reqLeaderCtx := WithRequireLeader(context.Background()) reqLeaderCtx := WithRequireLeader(context.Background())
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
return l return l
} }
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
for { r := &pb.LeaseGrantRequest{TTL: ttl}
r := &pb.LeaseGrantRequest{TTL: ttl} resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
resp, err := l.remote.LeaseGrant(ctx, r) if err == nil {
if err == nil { gresp := &LeaseGrantResponse{
gresp := &LeaseGrantResponse{ ResponseHeader: resp.GetHeader(),
ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID),
ID: LeaseID(resp.ID), TTL: resp.TTL,
TTL: resp.TTL, Error: resp.Error,
Error: resp.Error,
}
return gresp, nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
} }
return gresp, nil
} }
return nil, toErr(ctx, err)
} }
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
for { r := &pb.LeaseRevokeRequest{ID: int64(id)}
r := &pb.LeaseRevokeRequest{ID: int64(id)} resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
resp, err := l.remote.LeaseRevoke(ctx, r) if err == nil {
return (*LeaseRevokeResponse)(resp), nil
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
} }
return nil, toErr(ctx, err)
} }
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
for { r := toLeaseTimeToLiveRequest(id, opts...)
r := toLeaseTimeToLiveRequest(id, opts...) resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false)) if err == nil {
if err == nil { gresp := &LeaseTimeToLiveResponse{
gresp := &LeaseTimeToLiveResponse{ ResponseHeader: resp.GetHeader(),
ResponseHeader: resp.GetHeader(), ID: LeaseID(resp.ID),
ID: LeaseID(resp.ID), TTL: resp.TTL,
TTL: resp.TTL, GrantedTTL: resp.GrantedTTL,
GrantedTTL: resp.GrantedTTL, Keys: resp.Keys,
Keys: resp.Keys,
}
return gresp, nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
} }
return gresp, nil
} }
return nil, toErr(ctx, err)
}
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
if err == nil {
leases := make([]LeaseStatus, len(resp.Leases))
for i := range resp.Leases {
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
}
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
}
return nil, toErr(ctx, err)
} }
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
@ -314,7 +352,7 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
} }
} }
// closeRequireLeader scans all keep alives for ctxs that have require leader // closeRequireLeader scans keepAlives for ctxs that have require leader
// and closes the associated channels. // and closes the associated channels.
func (l *lessor) closeRequireLeader() { func (l *lessor) closeRequireLeader() {
l.mu.Lock() l.mu.Lock()
@ -323,7 +361,7 @@ func (l *lessor) closeRequireLeader() {
reqIdxs := 0 reqIdxs := 0
// find all required leader channels, close, mark as nil // find all required leader channels, close, mark as nil
for i, ctx := range ka.ctxs { for i, ctx := range ka.ctxs {
md, ok := metadata.FromContext(ctx) md, ok := metadata.FromOutgoingContext(ctx)
if !ok { if !ok {
continue continue
} }
@ -357,7 +395,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
cctx, cancel := context.WithCancel(ctx) cctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -386,7 +424,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
close(l.donec) close(l.donec)
l.loopErr = gerr l.loopErr = gerr
for _, ka := range l.keepAlives { for _, ka := range l.keepAlives {
ka.Close() ka.close()
} }
l.keepAlives = make(map[LeaseID]*keepAlive) l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock() l.mu.Unlock()
@ -401,7 +439,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
} else { } else {
for { for {
resp, err := stream.Recv() resp, err := stream.Recv()
if err != nil { if err != nil {
if canceledByCaller(l.stopCtx, err) { if canceledByCaller(l.stopCtx, err) {
return err return err
@ -426,10 +463,10 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
} }
} }
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests // resetRecv opens a new lease stream and starts sending keep alive requests.
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx) sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
if err != nil { if err != nil {
cancel() cancel()
return nil, err return nil, err
@ -467,7 +504,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
if karesp.TTL <= 0 { if karesp.TTL <= 0 {
// lease expired; close all keep alive channels // lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID) delete(l.keepAlives, karesp.ID)
ka.Close() ka.close()
return return
} }
@ -497,7 +534,7 @@ func (l *lessor) deadlineLoop() {
for id, ka := range l.keepAlives { for id, ka := range l.keepAlives {
if ka.deadline.Before(now) { if ka.deadline.Before(now) {
// waited too long for response; lease may be expired // waited too long for response; lease may be expired
ka.Close() ka.close()
delete(l.keepAlives, id) delete(l.keepAlives, id)
} }
} }
@ -505,7 +542,7 @@ func (l *lessor) deadlineLoop() {
} }
} }
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream // sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for { for {
var tosend []LeaseID var tosend []LeaseID
@ -539,7 +576,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
} }
} }
func (ka *keepAlive) Close() { func (ka *keepAlive) close() {
close(ka.donec) close(ka.donec)
for _, ch := range ka.chs { for _, ch := range ka.chs {
close(ch) close(ch)

View file

@ -16,67 +16,120 @@ package clientv3
import ( import (
"io/ioutil" "io/ioutil"
"log"
"sync" "sync"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
// Logger is the logger used by client library. // Logger is the logger used by client library.
// It implements grpclog.Logger interface. // It implements grpclog.LoggerV2 interface.
type Logger grpclog.Logger type Logger interface {
grpclog.LoggerV2
// Lvl returns logger if logger's verbosity level >= "lvl".
// Otherwise, logger that discards all logs.
Lvl(lvl int) Logger
// to satisfy capnslog
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
}
var ( var (
logger settableLogger loggerMu sync.RWMutex
logger Logger
) )
type settableLogger struct { type settableLogger struct {
l grpclog.Logger l grpclog.LoggerV2
mu sync.RWMutex mu sync.RWMutex
} }
func init() { func init() {
// disable client side logs by default // disable client side logs by default
logger.mu.Lock() logger = &settableLogger{}
logger.l = log.New(ioutil.Discard, "", 0) SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
// logger has to override the grpclog at initialization so that
// any changes to the grpclog go through logger with locking
// instead of through SetLogger
//
// now updates only happen through settableLogger.set
grpclog.SetLogger(&logger)
logger.mu.Unlock()
} }
// SetLogger sets client-side Logger. By default, logs are disabled. // SetLogger sets client-side Logger.
func SetLogger(l Logger) { func SetLogger(l grpclog.LoggerV2) {
logger.set(l) loggerMu.Lock()
logger = NewLogger(l)
// override grpclog so that any changes happen with locking
grpclog.SetLoggerV2(logger)
loggerMu.Unlock()
} }
// GetLogger returns the current logger. // GetLogger returns the current logger.
func GetLogger() Logger { func GetLogger() Logger {
return logger.get() loggerMu.RLock()
l := logger
loggerMu.RUnlock()
return l
} }
func (s *settableLogger) set(l Logger) { // NewLogger returns a new Logger with grpclog.LoggerV2.
s.mu.Lock() func NewLogger(gl grpclog.LoggerV2) Logger {
logger.l = l return &settableLogger{l: gl}
s.mu.Unlock()
} }
func (s *settableLogger) get() Logger { func (s *settableLogger) get() grpclog.LoggerV2 {
s.mu.RLock() s.mu.RLock()
l := logger.l l := s.l
s.mu.RUnlock() s.mu.RUnlock()
return l return l
} }
// implement the grpclog.Logger interface // implement the grpclog.LoggerV2 interface
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
func (s *settableLogger) Warningf(format string, args ...interface{}) {
s.get().Warningf(format, args...)
}
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
func (s *settableLogger) Errorf(format string, args ...interface{}) {
s.get().Errorf(format, args...)
}
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
func (s *settableLogger) Lvl(lvl int) Logger {
s.mu.RLock()
l := s.l
s.mu.RUnlock()
if l.V(lvl) {
return s
}
return &noLogger{}
}
type noLogger struct{}
func (*noLogger) Info(args ...interface{}) {}
func (*noLogger) Infof(format string, args ...interface{}) {}
func (*noLogger) Infoln(args ...interface{}) {}
func (*noLogger) Warning(args ...interface{}) {}
func (*noLogger) Warningf(format string, args ...interface{}) {}
func (*noLogger) Warningln(args ...interface{}) {}
func (*noLogger) Error(args ...interface{}) {}
func (*noLogger) Errorf(format string, args ...interface{}) {}
func (*noLogger) Errorln(args ...interface{}) {}
func (*noLogger) Fatal(args ...interface{}) {}
func (*noLogger) Fatalf(format string, args ...interface{}) {}
func (*noLogger) Fatalln(args ...interface{}) {}
func (*noLogger) Print(args ...interface{}) {}
func (*noLogger) Printf(format string, args ...interface{}) {}
func (*noLogger) Println(args ...interface{}) {}
func (*noLogger) V(l int) bool { return false }
func (ng *noLogger) Lvl(lvl int) Logger { return ng }

View file

@ -15,11 +15,11 @@
package clientv3 package clientv3
import ( import (
"context"
"io" "io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -28,6 +28,8 @@ type (
AlarmResponse pb.AlarmResponse AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse StatusResponse pb.StatusResponse
HashKVResponse pb.HashKVResponse
MoveLeaderResponse pb.MoveLeaderResponse
) )
type Maintenance interface { type Maintenance interface {
@ -37,7 +39,7 @@ type Maintenance interface {
// AlarmDisarm disarms a given alarm. // AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint. // Defragment releases wasted space from internal fragmentation on a given etcd member.
// Defragment is only needed when deleting a large number of keys and want to reclaim // Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources. // the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members // Defragment is an expensive operation. User should avoid defragmenting multiple members
@ -49,36 +51,54 @@ type Maintenance interface {
// Status gets the status of the endpoint. // Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error) Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend. // HashKV returns a hash of the KV state at the time of the RPC.
// If revision is zero, the hash is computed on all keys. If the revision
// is non-zero, the hash is computed on all keys at or below the given revision.
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
// Snapshot provides a reader for a point-in-time snapshot of etcd.
Snapshot(ctx context.Context) (io.ReadCloser, error) Snapshot(ctx context.Context) (io.ReadCloser, error)
// MoveLeader requests current leader to transfer its leadership to the transferee.
// Request must be made to the leader.
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
} }
type maintenance struct { type maintenance struct {
dial func(endpoint string) (pb.MaintenanceClient, func(), error) dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient remote pb.MaintenanceClient
callOpts []grpc.CallOption
} }
func NewMaintenance(c *Client) Maintenance { func NewMaintenance(c *Client) Maintenance {
return &maintenance{ api := &maintenance{
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.dial(endpoint) conn, err := c.dial(endpoint)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
cancel := func() { conn.Close() } cancel := func() { conn.Close() }
return pb.NewMaintenanceClient(conn), cancel, nil return RetryMaintenanceClient(c, conn), cancel, nil
}, },
remote: pb.NewMaintenanceClient(c.conn), remote: RetryMaintenanceClient(c, c.conn),
} }
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance { func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
return &maintenance{ api := &maintenance{
dial: func(string) (pb.MaintenanceClient, func(), error) { dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil return remote, func() {}, nil
}, },
remote: remote, remote: remote,
} }
if c != nil {
api.callOpts = c.callOpts
}
return api
} }
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
@ -87,15 +107,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
MemberID: 0, // all MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all Alarm: pb.AlarmType_NONE, // all
} }
for { resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) if err == nil {
if err == nil { return (*AlarmResponse)(resp), nil
return (*AlarmResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
} }
return nil, toErr(ctx, err)
} }
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
@ -121,7 +137,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
return &ret, nil return &ret, nil
} }
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
if err == nil { if err == nil {
return (*AlarmResponse)(resp), nil return (*AlarmResponse)(resp), nil
} }
@ -134,7 +150,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
defer cancel() defer cancel()
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -147,15 +163,28 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
defer cancel() defer cancel()
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
return (*StatusResponse)(resp), nil return (*StatusResponse)(resp), nil
} }
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
if err != nil {
return nil, toErr(ctx, err)
}
return (*HashKVResponse)(resp), nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
if err != nil { if err != nil {
return nil, toErr(ctx, err) return nil, toErr(ctx, err)
} }
@ -178,5 +207,20 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
} }
pw.Close() pw.Close()
}() }()
return pr, nil return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}
type snapshotReadCloser struct {
ctx context.Context
io.ReadCloser
}
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
n, err = rc.ReadCloser.Read(p)
return n, toErr(rc.ctx, err)
}
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
} }

View file

@ -23,6 +23,7 @@ const (
tRange opType = iota + 1 tRange opType = iota + 1
tPut tPut
tDeleteRange tDeleteRange
tTxn
) )
var ( var (
@ -67,9 +68,17 @@ type Op struct {
// for put // for put
val []byte val []byte
leaseID LeaseID leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
} }
// accesors / mutators // accessors / mutators
func (op Op) IsTxn() bool { return op.t == tTxn }
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
// KeyBytes returns the byte slice holding the Op's key. // KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key } func (op Op) KeyBytes() []byte { return op.key }
@ -80,6 +89,39 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key }
// RangeBytes returns the byte slice holding with the Op's range end, if any. // RangeBytes returns the byte slice holding with the Op's range end, if any.
func (op Op) RangeBytes() []byte { return op.end } func (op Op) RangeBytes() []byte { return op.end }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
// IsPut returns true iff the operation is a Put.
func (op Op) IsPut() bool { return op.t == tPut }
// IsGet returns true iff the operation is a Get.
func (op Op) IsGet() bool { return op.t == tRange }
// IsDelete returns true iff the operation is a Delete.
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
func (op Op) IsSerializable() bool { return op.serializable == true }
// IsKeysOnly returns whether keysOnly is set.
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
// IsCountOnly returns whether countOnly is set.
func (op Op) IsCountOnly() bool { return op.countOnly == true }
// MinModRev returns the operation's minimum modify revision.
func (op Op) MinModRev() int64 { return op.minModRev }
// MaxModRev returns the operation's maximum modify revision.
func (op Op) MaxModRev() int64 { return op.maxModRev }
// MinCreateRev returns the operation's minimum create revision.
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
// MaxCreateRev returns the operation's maximum create revision.
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// WithRangeBytes sets the byte slice for the Op's range end. // WithRangeBytes sets the byte slice for the Op's range end.
func (op *Op) WithRangeBytes(end []byte) { op.end = end } func (op *Op) WithRangeBytes(end []byte) { op.end = end }
@ -113,6 +155,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest {
return r return r
} }
func (op Op) toTxnRequest() *pb.TxnRequest {
thenOps := make([]*pb.RequestOp, len(op.thenOps))
for i, tOp := range op.thenOps {
thenOps[i] = tOp.toRequestOp()
}
elseOps := make([]*pb.RequestOp, len(op.elseOps))
for i, eOp := range op.elseOps {
elseOps[i] = eOp.toRequestOp()
}
cmps := make([]*pb.Compare, len(op.cmps))
for i := range op.cmps {
cmps[i] = (*pb.Compare)(&op.cmps[i])
}
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
}
func (op Op) toRequestOp() *pb.RequestOp { func (op Op) toRequestOp() *pb.RequestOp {
switch op.t { switch op.t {
case tRange: case tRange:
@ -123,12 +181,27 @@ func (op Op) toRequestOp() *pb.RequestOp {
case tDeleteRange: case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
case tTxn:
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
default: default:
panic("Unknown Op") panic("Unknown Op")
} }
} }
func (op Op) isWrite() bool { func (op Op) isWrite() bool {
if op.t == tTxn {
for _, tOp := range op.thenOps {
if tOp.isWrite() {
return true
}
}
for _, tOp := range op.elseOps {
if tOp.isWrite() {
return true
}
}
return false
}
return op.t != tRange return op.t != tRange
} }
@ -194,6 +267,10 @@ func OpPut(key, val string, opts ...OpOption) Op {
return ret return ret
} }
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
func opWatch(key string, opts ...OpOption) Op { func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)} ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts) ret.applyOpts(opts)
@ -247,9 +324,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
if target == SortByKey && order == SortAscend { if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space, // If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided. // and then applies the sort and limit, if provided.
// Since current mvcc.Range implementation returns results // Since by default the server returns results sorted by keys
// sorted by keys in lexicographically ascending order, // in lexicographically ascending order, the client should ignore
// client should ignore SortOrder if the target is SortByKey. // SortOrder if the target is SortByKey.
order = SortNone order = SortNone
} }
op.sort = &SortOption{target, order} op.sort = &SortOption{target, order}
@ -390,7 +467,7 @@ func WithPrevKV() OpOption {
} }
// WithIgnoreValue updates the key using its current value. // WithIgnoreValue updates the key using its current value.
// Empty value should be passed when ignore_value is set. // This option can not be combined with non-empty values.
// Returns an error if the key does not exist. // Returns an error if the key does not exist.
func WithIgnoreValue() OpOption { func WithIgnoreValue() OpOption {
return func(op *Op) { return func(op *Op) {
@ -399,7 +476,7 @@ func WithIgnoreValue() OpOption {
} }
// WithIgnoreLease updates the key using its current lease. // WithIgnoreLease updates the key using its current lease.
// Empty lease should be passed when ignore_lease is set. // This option can not be combined with WithLease.
// Returns an error if the key does not exist. // Returns an error if the key does not exist.
func WithIgnoreLease() OpOption { func WithIgnoreLease() OpOption {
return func(op *Op) { return func(op *Op) {
@ -424,8 +501,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
} }
} }
// WithAttachedKeys requests lease timetolive API to return // WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
// attached keys of given lease ID.
func WithAttachedKeys() LeaseOption { func WithAttachedKeys() LeaseOption {
return func(op *LeaseOp) { op.attachedKeys = true } return func(op *LeaseOp) { op.attachedKeys = true }
} }

49
vendor/github.com/coreos/etcd/clientv3/options.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"math"
"google.golang.org/grpc"
)
var (
// Disable gRPC internal retrial logic
// TODO: enable when gRPC retry is stable (FailFast=false)
// Reference:
// - https://github.com/grpc/grpc-go/issues/1532
// - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
defaultFailFast = grpc.FailFast(true)
// client-side request send limit, gRPC default is math.MaxInt32
// Make sure that "client-side send limit < server-side default send/recv limit"
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
// client-side response receive limit, gRPC default is 4MB
// Make sure that "client-side receive limit >= server-side default send/recv limit"
// because range response can easily exceed request send limits
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
)
// defaultCallOpts defines a list of default "gRPC.CallOption".
// Some options are exposed to "clientv3.Config".
// Defaults will be overridden by the settings in "clientv3.Config".
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
// MaxLeaseTTL is the maximum lease TTL value
const MaxLeaseTTL = 9000000000

30
vendor/github.com/coreos/etcd/clientv3/ready_wait.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import "context"
// TODO: remove this when "FailFast=false" is fixed.
// See https://github.com/grpc/grpc-go/issues/1532.
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
select {
case <-ready:
return nil
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-clientCtx.Done():
return clientCtx.Err()
}
}

View file

@ -15,279 +15,482 @@
package clientv3 package clientv3
import ( import (
"context"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type retryPolicy uint8
const (
repeatable retryPolicy = iota
nonRepeatable
) )
type rpcFunc func(ctx context.Context) error type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc) error type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
type retryStopErrFunc func(error) bool
func (c *Client) newRetryWrapper() retryRpcFunc { // immutable requests (e.g. Get) should be retried unless it's
return func(rpcCtx context.Context, f rpcFunc) error { // an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
//
// "isRepeatableStopError" returns "true" when an immutable request
// is interrupted by server-side or gRPC-side error and its status
// code is not transient (!= codes.Unavailable).
//
// Returning "true" means retry should stop, since client cannot
// handle itself even with retries.
func isRepeatableStopError(err error) bool {
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
return true
}
// only retry if unavailable
ev, _ := status.FromError(err)
return ev.Code() != codes.Unavailable
}
// mutable requests (e.g. Put, Delete, Txn) should only be retried
// when the status code is codes.Unavailable when initial connection
// has not been established (no pinned endpoint).
//
// "isNonRepeatableStopError" returns "true" when a mutable request
// is interrupted by non-transient error that client cannot handle itself,
// or transient error while the connection has already been established
// (pinned endpoint exists).
//
// Returning "true" means retry should stop, otherwise it violates
// write-at-most-once semantics.
func isNonRepeatableStopError(err error) bool {
ev, _ := status.FromError(err)
if ev.Code() != codes.Unavailable {
return true
}
desc := rpctypes.ErrorDesc(err)
return desc != "there is no address available" && desc != "there is no connection available"
}
func (c *Client) newRetryWrapper() retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
var isStop retryStopErrFunc
switch rp {
case repeatable:
isStop = isRepeatableStopError
case nonRepeatable:
isStop = isNonRepeatableStopError
}
for { for {
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
return err
}
pinned := c.balancer.pinned()
err := f(rpcCtx) err := f(rpcCtx)
if err == nil { if err == nil {
return nil return nil
} }
logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
eErr := rpctypes.Error(err) if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
// always stop retry on etcd errors // mark this before endpoint switch is triggered
if _, ok := eErr.(rpctypes.EtcdError); ok { c.balancer.hostPortError(pinned, err)
return err c.balancer.next()
logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
} }
// only retry if unavailable if isStop(err) {
if grpc.Code(err) != codes.Unavailable {
return err return err
} }
select {
case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done():
return rpcCtx.Err()
case <-c.ctx.Done():
return c.ctx.Err()
}
} }
} }
} }
func (c *Client) newAuthRetryWrapper() retryRpcFunc { func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
return func(rpcCtx context.Context, f rpcFunc) error { return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
for { for {
err := f(rpcCtx) pinned := c.balancer.pinned()
err := retryf(rpcCtx, f, rp)
if err == nil { if err == nil {
return nil return nil
} }
logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
// always stop retry on etcd errors other than invalid auth token // always stop retry on etcd errors other than invalid auth token
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := c.getToken(rpcCtx) gterr := c.getToken(rpcCtx)
if gterr != nil { if gterr != nil {
logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
return err // return the original error for simplicity return err // return the original error for simplicity
} }
continue continue
} }
return err return err
} }
} }
} }
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
func RetryKVClient(c *Client) pb.KVClient {
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
}
type retryKVClient struct { type retryKVClient struct {
*retryWriteKVClient kc pb.KVClient
retryf retryRPCFunc
} }
// RetryKVClient implements a KVClient.
func RetryKVClient(c *Client) pb.KVClient {
return &retryKVClient{
kc: pb.NewKVClient(c.conn),
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
}
}
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) resp, err = rkv.kc.Range(rctx, in, opts...)
return err return err
}) }, repeatable)
return resp, err return resp, err
} }
type retryWriteKVClient struct { func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
pb.KVClient
retryf retryRpcFunc
}
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Put(rctx, in, opts...) resp, err = rkv.kc.Put(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
// TODO: "repeatable" for read-only txn
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Txn(rctx, in, opts...) resp, err = rkv.kc.Txn(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Compact(rctx, in, opts...) resp, err = rkv.kc.Compact(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
type retryLeaseClient struct { type retryLeaseClient struct {
pb.LeaseClient lc pb.LeaseClient
retryf retryRpcFunc retryf retryRPCFunc
} }
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. // RetryLeaseClient implements a LeaseClient.
func RetryLeaseClient(c *Client) pb.LeaseClient { func RetryLeaseClient(c *Client) pb.LeaseClient {
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} return &retryLeaseClient{
return &retryLeaseClient{retry, c.retryAuthWrapper} lc: pb.NewLeaseClient(c.conn),
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
}
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
return err
}, repeatable)
return resp, err
} }
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error { err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
return err return err
}) }, repeatable)
return resp, err return resp, err
} }
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
err = rlc.retryf(ctx, func(rctx context.Context) error { err = rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
return err return err
}) }, repeatable)
return resp, err return resp, err
} }
type retryClusterClient struct { func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
pb.ClusterClient err = rlc.retryf(ctx, func(rctx context.Context) error {
retryf retryRpcFunc stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
return err
}, repeatable)
return stream, err
} }
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. type retryClusterClient struct {
cc pb.ClusterClient
retryf retryRPCFunc
}
// RetryClusterClient implements a ClusterClient.
func RetryClusterClient(c *Client) pb.ClusterClient { func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} return &retryClusterClient{
cc: pb.NewClusterClient(c.conn),
retryf: c.newRetryWrapper(),
}
}
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.cc.MemberList(rctx, in, opts...)
return err
}, repeatable)
return resp, err
} }
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error { err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error { err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
err = rcc.retryf(ctx, func(rctx context.Context) error { err = rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err
}
type retryMaintenanceClient struct {
mc pb.MaintenanceClient
retryf retryRPCFunc
}
// RetryMaintenanceClient implements a Maintenance.
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
return &retryMaintenanceClient{
mc: pb.NewMaintenanceClient(conn),
retryf: c.newRetryWrapper(),
}
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Alarm(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Status(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Hash(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.HashKV(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
return err
}, repeatable)
return stream, err
}
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
err = rmc.retryf(ctx, func(rctx context.Context) error {
resp, err = rmc.mc.Defragment(rctx, in, opts...)
return err
}, nonRepeatable)
return resp, err return resp, err
} }
type retryAuthClient struct { type retryAuthClient struct {
pb.AuthClient ac pb.AuthClient
retryf retryRpcFunc retryf retryRPCFunc
} }
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. // RetryAuthClient implements a AuthClient.
func RetryAuthClient(c *Client) pb.AuthClient { func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} return &retryAuthClient{
ac: pb.NewAuthClient(c.conn),
retryf: c.newRetryWrapper(),
}
}
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserList(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.ac.UserGet(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleGet(rctx, in, opts...)
return err
}, repeatable)
return resp, err
}
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.ac.RoleList(rctx, in, opts...)
return err
}, repeatable)
return resp, err
} }
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) resp, err = rac.ac.AuthEnable(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) resp, err = rac.ac.AuthDisable(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) resp, err = rac.ac.UserAdd(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) resp, err = rac.ac.UserDelete(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) resp, err = rac.ac.RoleAdd(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) resp, err = rac.ac.RoleDelete(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err return resp, err
} }
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error { err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
return err return err
}) }, nonRepeatable)
return resp, err
}
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
err = rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.ac.Authenticate(rctx, in, opts...)
return err
}, nonRepeatable)
return resp, err return resp, err
} }

View file

@ -15,16 +15,17 @@
package clientv3 package clientv3
import ( import (
"context"
"sync" "sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
// Txn is the interface that wraps mini-transactions. // Txn is the interface that wraps mini-transactions.
// //
// Tx.If( // Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1), // Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2) // Compare(Version(k1), "=", 2)
// ).Then( // ).Then(
@ -66,6 +67,8 @@ type txn struct {
sus []*pb.RequestOp sus []*pb.RequestOp
fas []*pb.RequestOp fas []*pb.RequestOp
callOpts []grpc.CallOption
} }
func (txn *txn) If(cs ...Cmp) Txn { func (txn *txn) If(cs ...Cmp) Txn {
@ -135,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn {
func (txn *txn) Commit() (*TxnResponse, error) { func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock() txn.mu.Lock()
defer txn.mu.Unlock() defer txn.mu.Unlock()
for {
resp, err := txn.commit()
if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
var opts []grpc.CallOption var resp *pb.TxnResponse
if !txn.isWrite { var err error
opts = []grpc.CallOption{grpc.FailFast(false)} resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
}
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
if err != nil { if err != nil {
return nil, err return nil, toErr(txn.ctx, err)
} }
return (*TxnResponse)(resp), nil return (*TxnResponse)(resp), nil
} }

View file

@ -15,6 +15,7 @@
package clientv3 package clientv3
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -22,9 +23,11 @@ import (
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
) )
const ( const (
@ -40,10 +43,9 @@ type WatchChan <-chan WatchResponse
type Watcher interface { type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned // Watch watches on a key or prefix. The watched events will be returned
// through the returned channel. // through the returned channel. If revisions waiting to be sent over the
// If the watch is slow or the required rev is compacted, the watch request // watch are compacted, then the watch will be canceled by the server, the
// might be canceled from the server-side and the chan will be closed. // client will post a compacted error watch response, and the channel will close.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests. // Close closes the watcher and cancels all watch requests.
@ -90,7 +92,7 @@ func (wr *WatchResponse) Err() error {
return v3rpc.ErrCompacted return v3rpc.ErrCompacted
case wr.Canceled: case wr.Canceled:
if len(wr.cancelReason) != 0 { if len(wr.cancelReason) != 0 {
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason)) return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
} }
return v3rpc.ErrFutureRev return v3rpc.ErrFutureRev
} }
@ -104,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool {
// watcher implements the Watcher interface // watcher implements the Watcher interface
type watcher struct { type watcher struct {
remote pb.WatchClient remote pb.WatchClient
callOpts []grpc.CallOption
// mu protects the grpc streams map // mu protects the grpc streams map
mu sync.RWMutex mu sync.RWMutex
@ -115,8 +118,9 @@ type watcher struct {
// watchGrpcStream tracks all watch resources attached to a single grpc stream. // watchGrpcStream tracks all watch resources attached to a single grpc stream.
type watchGrpcStream struct { type watchGrpcStream struct {
owner *watcher owner *watcher
remote pb.WatchClient remote pb.WatchClient
callOpts []grpc.CallOption
// ctx controls internal remote.Watch requests // ctx controls internal remote.Watch requests
ctx context.Context ctx context.Context
@ -135,7 +139,7 @@ type watchGrpcStream struct {
respc chan *pb.WatchResponse respc chan *pb.WatchResponse
// donec closes to broadcast shutdown // donec closes to broadcast shutdown
donec chan struct{} donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconn logic // errc transmits errors from grpc Recv to the watch stream reconnect logic
errc chan error errc chan error
// closingc gets the watcherStream of closing watchers // closingc gets the watcherStream of closing watchers
closingc chan *watcherStream closingc chan *watcherStream
@ -187,14 +191,18 @@ type watcherStream struct {
} }
func NewWatcher(c *Client) Watcher { func NewWatcher(c *Client) Watcher {
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
} }
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
return &watcher{ w := &watcher{
remote: wc, remote: wc,
streams: make(map[string]*watchGrpcStream), streams: make(map[string]*watchGrpcStream),
} }
if c != nil {
w.callOpts = c.callOpts
}
return w
} }
// never closes // never closes
@ -213,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
wgs := &watchGrpcStream{ wgs := &watchGrpcStream{
owner: w, owner: w,
remote: w.remote, remote: w.remote,
callOpts: w.callOpts,
ctx: ctx, ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx), ctxKey: streamKeyFromCtx(inctx),
cancel: cancel, cancel: cancel,
substreams: make(map[int64]*watcherStream), substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
respc: make(chan *pb.WatchResponse), reqc: make(chan *watchRequest),
reqc: make(chan *watchRequest), donec: make(chan struct{}),
donec: make(chan struct{}), errc: make(chan error, 1),
errc: make(chan error, 1), closingc: make(chan *watcherStream),
closingc: make(chan *watcherStream), resumec: make(chan struct{}),
resumec: make(chan struct{}),
} }
go wgs.run() go wgs.run()
return wgs return wgs
@ -254,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
} }
ok := false ok := false
ctxKey := fmt.Sprintf("%v", ctx) ctxKey := streamKeyFromCtx(ctx)
// find or allocate appropriate grpc watch stream // find or allocate appropriate grpc watch stream
w.mu.Lock() w.mu.Lock()
@ -317,14 +325,14 @@ func (w *watcher) Close() (err error) {
w.streams = nil w.streams = nil
w.mu.Unlock() w.mu.Unlock()
for _, wgs := range streams { for _, wgs := range streams {
if werr := wgs.Close(); werr != nil { if werr := wgs.close(); werr != nil {
err = werr err = werr
} }
} }
return err return err
} }
func (w *watchGrpcStream) Close() (err error) { func (w *watchGrpcStream) close() (err error) {
w.cancel() w.cancel()
<-w.donec <-w.donec
select { select {
@ -435,7 +443,7 @@ func (w *watchGrpcStream) run() {
initReq: *wreq, initReq: *wreq,
id: -1, id: -1,
outc: outc, outc: outc,
// unbufffered so resumes won't cause repeat events // unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse), recvc: make(chan *WatchResponse),
} }
@ -462,7 +470,7 @@ func (w *watchGrpcStream) run() {
if ws := w.nextResume(); ws != nil { if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB()) wc.Send(ws.initReq.toPB())
} }
case pbresp.Canceled: case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId) delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok { if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc // signal to stream goroutine to update closingc
@ -487,7 +495,7 @@ func (w *watchGrpcStream) run() {
req := &pb.WatchRequest{RequestUnion: cr} req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req) wc.Send(req)
} }
// watch client failed to recv; spawn another if possible // watch client failed on Recv; spawn another if possible
case err := <-w.errc: case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err closeErr = err
@ -749,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
return donec return donec
} }
// joinSubstream waits for all substream goroutines to complete // joinSubstreams waits for all substream goroutines to complete.
func (w *watchGrpcStream) joinSubstreams() { func (w *watchGrpcStream) joinSubstreams() {
for _, ws := range w.substreams { for _, ws := range w.substreams {
<-ws.donec <-ws.donec
@ -761,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() {
} }
} }
// openWatchClient retries opening a watchclient until retryConnection fails // openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for { for {
select { select {
@ -772,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
return nil, err return nil, err
default: default:
} }
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
break break
} }
if isHaltErr(w.ctx, err) { if isHaltErr(w.ctx, err) {
@ -782,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
return ws, nil return ws, nil
} }
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) // toPB converts an internal watch request structure to its protobuf WatchRequest structure.
func (wr *watchRequest) toPB() *pb.WatchRequest { func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{ req := &pb.WatchCreateRequest{
StartRevision: wr.rev, StartRevision: wr.rev,
@ -795,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr} return &pb.WatchRequest{RequestUnion: cr}
} }
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
}
return ""
}

View file

@ -15,106 +15,114 @@
package rpctypes package rpctypes
import ( import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
// server-side error
var ( var (
// server-side error ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist") ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists") ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members") ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid") ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found") ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests") ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist") ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role") ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists") ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty") ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found") ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists") ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found") ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password") ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied") ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user") ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure") ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
ErrGRPCTimeoutDueToConnectionLost = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost") ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
errStringToError = map[string]error{ errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
grpc.ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
} }
)
// client-side error // client-side error
var (
ErrEmptyKey = Error(ErrGRPCEmptyKey) ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrKeyNotFound = Error(ErrGRPCKeyNotFound) ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
ErrValueProvided = Error(ErrGRPCValueProvided) ErrValueProvided = Error(ErrGRPCValueProvided)
@ -125,8 +133,9 @@ var (
ErrFutureRev = Error(ErrGRPCFutureRev) ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace) ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist) ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
ErrMemberExist = Error(ErrGRPCMemberExist) ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist) ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
@ -153,12 +162,14 @@ var (
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
ErrNoLeader = Error(ErrGRPCNoLeader) ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotLeader = Error(ErrGRPCNotLeader)
ErrNotCapable = Error(ErrGRPCNotCapable) ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped) ErrStopped = Error(ErrGRPCStopped)
ErrTimeout = Error(ErrGRPCTimeout) ErrTimeout = Error(ErrGRPCTimeout)
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrUnhealthy = Error(ErrGRPCUnhealthy)
ErrCorrupt = Error(ErrGRPCCorrupt)
) )
// EtcdError defines gRPC server errors. // EtcdError defines gRPC server errors.
@ -182,9 +193,23 @@ func Error(err error) error {
if err == nil { if err == nil {
return nil return nil
} }
verr, ok := errStringToError[grpc.ErrorDesc(err)] verr, ok := errStringToError[ErrorDesc(err)]
if !ok { // not gRPC error if !ok { // not gRPC error
return err return err
} }
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} ev, ok := status.FromError(verr)
var desc string
if ok {
desc = ev.Message()
} else {
desc = verr.Error()
}
return EtcdError{code: ev.Code(), desc: desc}
}
func ErrorDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
} }

View file

@ -1,6 +1,5 @@
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: etcdserver.proto // source: etcdserver.proto
// DO NOT EDIT!
/* /*
Package etcdserverpb is a generated protocol buffer package. Package etcdserverpb is a generated protocol buffer package.
@ -32,6 +31,8 @@
CompactionRequest CompactionRequest
CompactionResponse CompactionResponse
HashRequest HashRequest
HashKVRequest
HashKVResponse
HashResponse HashResponse
SnapshotRequest SnapshotRequest
SnapshotResponse SnapshotResponse
@ -47,6 +48,9 @@
LeaseKeepAliveResponse LeaseKeepAliveResponse
LeaseTimeToLiveRequest LeaseTimeToLiveRequest
LeaseTimeToLiveResponse LeaseTimeToLiveResponse
LeaseLeasesRequest
LeaseStatus
LeaseLeasesResponse
Member Member
MemberAddRequest MemberAddRequest
MemberAddResponse MemberAddResponse
@ -58,6 +62,8 @@
MemberListResponse MemberListResponse
DefragmentRequest DefragmentRequest
DefragmentResponse DefragmentResponse
MoveLeaderRequest
MoveLeaderResponse
AlarmRequest AlarmRequest
AlarmMember AlarmMember
AlarmResponse AlarmResponse
@ -105,6 +111,8 @@ import (
math "math" math "math"
_ "github.com/gogo/protobuf/gogoproto"
io "io" io "io"
) )
@ -311,24 +319,6 @@ func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)

View file

@ -1,6 +1,5 @@
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: raft_internal.proto // source: raft_internal.proto
// DO NOT EDIT!
package etcdserverpb package etcdserverpb
@ -11,6 +10,8 @@ import (
math "math" math "math"
_ "github.com/gogo/protobuf/gogoproto"
io "io" io "io"
) )
@ -505,24 +506,6 @@ func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,5 @@
// Code generated by protoc-gen-gogo. // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: kv.proto // source: kv.proto
// DO NOT EDIT!
/* /*
Package mvccpb is a generated protocol buffer package. Package mvccpb is a generated protocol buffer package.
@ -21,6 +20,8 @@ import (
math "math" math "math"
_ "github.com/gogo/protobuf/gogoproto"
io "io" io "io"
) )
@ -198,24 +199,6 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintKv(dAtA []byte, offset int, v uint64) int { func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)

View file

@ -71,9 +71,10 @@ func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error)
// SRV records have a trailing dot but URL shouldn't. // SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".") shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port) urlHost := net.JoinHostPort(shortHost, port)
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
if ok && url.Scheme != scheme { if ok && url.Scheme != scheme {
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
} else {
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
} }
} }
if len(stringParts) == 0 { if len(stringParts) == 0 {

View file

@ -61,7 +61,7 @@ func (us *unsafeSet) Remove(value string) {
// Contains returns whether the set contains the given value // Contains returns whether the set contains the given value
func (us *unsafeSet) Contains(value string) (exists bool) { func (us *unsafeSet) Contains(value string) (exists bool) {
_, exists = us.d[value] _, exists = us.d[value]
return return exists
} }
// ContainsAll returns whether the set contains all given values // ContainsAll returns whether the set contains all given values
@ -94,7 +94,7 @@ func (us *unsafeSet) Values() (values []string) {
for val := range us.d { for val := range us.d {
values = append(values, val) values = append(values, val)
} }
return return values
} }
// Copy creates a new Set containing the values of the first // Copy creates a new Set containing the values of the first

View file

@ -26,7 +26,7 @@ import (
var ( var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with. // MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0" MinClusterVersion = "3.0.0"
Version = "3.2.9" Version = "3.3.5"
APIVersion = "unknown" APIVersion = "unknown"
// Git SHA Value will be set during build // Git SHA Value will be set during build

168
vendor/github.com/gogo/protobuf/gogoproto/doc.go generated vendored Normal file
View file

@ -0,0 +1,168 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package gogoproto provides extensions for protocol buffers to achieve:
- fast marshalling and unmarshalling.
- peace of mind by optionally generating test and benchmark code.
- more canonical Go structures.
- less typing by optionally generating extra helper code.
- goprotobuf compatibility
More Canonical Go Structures
A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
- nullable, if false, a field is generated without a pointer (see warning below).
- embed, if true, the field is generated as an embedded field.
- customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
- customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
- casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums.
- castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
- castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps.
Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
Let us look at:
github.com/gogo/protobuf/test/example/example.proto
for a quicker overview.
The following message:
package test;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
message A {
optional string Description = 1 [(gogoproto.nullable) = false];
optional int64 Number = 2 [(gogoproto.nullable) = false];
optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
}
Will generate a go struct which looks a lot like this:
type A struct {
Description string
Number int64
Id github_com_gogo_protobuf_test_custom.Uuid
}
You will see there are no pointers, since all fields are non-nullable.
You will also see a custom type which marshals to a string.
Be warned it is your responsibility to test your custom types thoroughly.
You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
Next we will embed the message A in message B.
message B {
optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
}
See below that A is embedded in B.
type B struct {
A
G []github_com_gogo_protobuf_test_custom.Uint128
}
Also see the repeated custom type.
type Uint128 [2]uint64
Next we will create a custom name for one of our fields.
message C {
optional int64 size = 1 [(gogoproto.customname) = "MySize"];
}
See below that the field's name is MySize and not Size.
type C struct {
MySize *int64
}
The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
Using customname you can fix this error without changing the field name.
This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
Gogoprotobuf also has some more subtle changes, these could be changed back:
- the generated package name for imports do not have the extra /filename.pb,
but are actually the imports specified in the .proto file.
Gogoprotobuf also has lost some features which should be brought back with time:
- Marshalling and unmarshalling with reflect and without the unsafe package,
this requires work in pointer_reflect.go
Why does nullable break protocol buffer specifications:
The protocol buffer specification states, somewhere, that you should be able to tell whether a
field is set or unset. With the option nullable=false this feature is lost,
since your non-nullable fields will always be set. It can be seen as a layer on top of
protocol buffers, where before and after marshalling all non-nullable fields are set
and they cannot be unset.
Goprotobuf Compatibility:
Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
- gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
- goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
- goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
- goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
- goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
- goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
- goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
- github.com/gogo/protobuf/plugin/<extension_name>
If you do not use any of these extension the code that is generated
will be the same as if goprotobuf has generated it.
The most complete way to see examples is to look at
github.com/gogo/protobuf/test/thetest.proto
Gogoprototest is a seperate project,
because we want to keep gogoprotobuf independant of goprotobuf,
but we still want to test it thoroughly.
*/
package gogoproto

665
vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go generated vendored Normal file
View file

@ -0,0 +1,665 @@
// Code generated by protoc-gen-gogo.
// source: gogo.proto
// DO NOT EDIT!
/*
Package gogoproto is a generated protocol buffer package.
It is generated from these files:
gogo.proto
It has these top-level messages:
*/
package gogoproto
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62001,
Name: "gogoproto.goproto_enum_prefix",
Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix",
}
var E_GoprotoEnumStringer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62021,
Name: "gogoproto.goproto_enum_stringer",
Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer",
}
var E_EnumStringer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 62022,
Name: "gogoproto.enum_stringer",
Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer",
}
var E_EnumCustomname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumOptions)(nil),
ExtensionType: (*string)(nil),
Field: 62023,
Name: "gogoproto.enum_customname",
Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname",
}
var E_EnumvalueCustomname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.EnumValueOptions)(nil),
ExtensionType: (*string)(nil),
Field: 66001,
Name: "gogoproto.enumvalue_customname",
Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname",
}
var E_GoprotoGettersAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63001,
Name: "gogoproto.goproto_getters_all",
Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll",
}
var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63002,
Name: "gogoproto.goproto_enum_prefix_all",
Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll",
}
var E_GoprotoStringerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63003,
Name: "gogoproto.goproto_stringer_all",
Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll",
}
var E_VerboseEqualAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63004,
Name: "gogoproto.verbose_equal_all",
Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll",
}
var E_FaceAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63005,
Name: "gogoproto.face_all",
Tag: "varint,63005,opt,name=face_all,json=faceAll",
}
var E_GostringAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63006,
Name: "gogoproto.gostring_all",
Tag: "varint,63006,opt,name=gostring_all,json=gostringAll",
}
var E_PopulateAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63007,
Name: "gogoproto.populate_all",
Tag: "varint,63007,opt,name=populate_all,json=populateAll",
}
var E_StringerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63008,
Name: "gogoproto.stringer_all",
Tag: "varint,63008,opt,name=stringer_all,json=stringerAll",
}
var E_OnlyoneAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63009,
Name: "gogoproto.onlyone_all",
Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll",
}
var E_EqualAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63013,
Name: "gogoproto.equal_all",
Tag: "varint,63013,opt,name=equal_all,json=equalAll",
}
var E_DescriptionAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63014,
Name: "gogoproto.description_all",
Tag: "varint,63014,opt,name=description_all,json=descriptionAll",
}
var E_TestgenAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63015,
Name: "gogoproto.testgen_all",
Tag: "varint,63015,opt,name=testgen_all,json=testgenAll",
}
var E_BenchgenAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63016,
Name: "gogoproto.benchgen_all",
Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll",
}
var E_MarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63017,
Name: "gogoproto.marshaler_all",
Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll",
}
var E_UnmarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63018,
Name: "gogoproto.unmarshaler_all",
Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll",
}
var E_StableMarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63019,
Name: "gogoproto.stable_marshaler_all",
Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll",
}
var E_SizerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63020,
Name: "gogoproto.sizer_all",
Tag: "varint,63020,opt,name=sizer_all,json=sizerAll",
}
var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63021,
Name: "gogoproto.goproto_enum_stringer_all",
Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll",
}
var E_EnumStringerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63022,
Name: "gogoproto.enum_stringer_all",
Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll",
}
var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63023,
Name: "gogoproto.unsafe_marshaler_all",
Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll",
}
var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63024,
Name: "gogoproto.unsafe_unmarshaler_all",
Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll",
}
var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63025,
Name: "gogoproto.goproto_extensions_map_all",
Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll",
}
var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63026,
Name: "gogoproto.goproto_unrecognized_all",
Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll",
}
var E_GogoprotoImport = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63027,
Name: "gogoproto.gogoproto_import",
Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport",
}
var E_ProtosizerAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63028,
Name: "gogoproto.protosizer_all",
Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll",
}
var E_CompareAll = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FileOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 63029,
Name: "gogoproto.compare_all",
Tag: "varint,63029,opt,name=compare_all,json=compareAll",
}
var E_GoprotoGetters = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64001,
Name: "gogoproto.goproto_getters",
Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters",
}
var E_GoprotoStringer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64003,
Name: "gogoproto.goproto_stringer",
Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer",
}
var E_VerboseEqual = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64004,
Name: "gogoproto.verbose_equal",
Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual",
}
var E_Face = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64005,
Name: "gogoproto.face",
Tag: "varint,64005,opt,name=face",
}
var E_Gostring = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64006,
Name: "gogoproto.gostring",
Tag: "varint,64006,opt,name=gostring",
}
var E_Populate = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64007,
Name: "gogoproto.populate",
Tag: "varint,64007,opt,name=populate",
}
var E_Stringer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 67008,
Name: "gogoproto.stringer",
Tag: "varint,67008,opt,name=stringer",
}
var E_Onlyone = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64009,
Name: "gogoproto.onlyone",
Tag: "varint,64009,opt,name=onlyone",
}
var E_Equal = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64013,
Name: "gogoproto.equal",
Tag: "varint,64013,opt,name=equal",
}
var E_Description = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64014,
Name: "gogoproto.description",
Tag: "varint,64014,opt,name=description",
}
var E_Testgen = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64015,
Name: "gogoproto.testgen",
Tag: "varint,64015,opt,name=testgen",
}
var E_Benchgen = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64016,
Name: "gogoproto.benchgen",
Tag: "varint,64016,opt,name=benchgen",
}
var E_Marshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64017,
Name: "gogoproto.marshaler",
Tag: "varint,64017,opt,name=marshaler",
}
var E_Unmarshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64018,
Name: "gogoproto.unmarshaler",
Tag: "varint,64018,opt,name=unmarshaler",
}
var E_StableMarshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64019,
Name: "gogoproto.stable_marshaler",
Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler",
}
var E_Sizer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64020,
Name: "gogoproto.sizer",
Tag: "varint,64020,opt,name=sizer",
}
var E_UnsafeMarshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64023,
Name: "gogoproto.unsafe_marshaler",
Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler",
}
var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64024,
Name: "gogoproto.unsafe_unmarshaler",
Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler",
}
var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64025,
Name: "gogoproto.goproto_extensions_map",
Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap",
}
var E_GoprotoUnrecognized = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64026,
Name: "gogoproto.goproto_unrecognized",
Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized",
}
var E_Protosizer = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64028,
Name: "gogoproto.protosizer",
Tag: "varint,64028,opt,name=protosizer",
}
var E_Compare = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MessageOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 64029,
Name: "gogoproto.compare",
Tag: "varint,64029,opt,name=compare",
}
var E_Nullable = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 65001,
Name: "gogoproto.nullable",
Tag: "varint,65001,opt,name=nullable",
}
var E_Embed = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*bool)(nil),
Field: 65002,
Name: "gogoproto.embed",
Tag: "varint,65002,opt,name=embed",
}
var E_Customtype = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65003,
Name: "gogoproto.customtype",
Tag: "bytes,65003,opt,name=customtype",
}
var E_Customname = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65004,
Name: "gogoproto.customname",
Tag: "bytes,65004,opt,name=customname",
}
var E_Jsontag = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65005,
Name: "gogoproto.jsontag",
Tag: "bytes,65005,opt,name=jsontag",
}
var E_Moretags = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65006,
Name: "gogoproto.moretags",
Tag: "bytes,65006,opt,name=moretags",
}
var E_Casttype = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65007,
Name: "gogoproto.casttype",
Tag: "bytes,65007,opt,name=casttype",
}
var E_Castkey = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65008,
Name: "gogoproto.castkey",
Tag: "bytes,65008,opt,name=castkey",
}
var E_Castvalue = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.FieldOptions)(nil),
ExtensionType: (*string)(nil),
Field: 65009,
Name: "gogoproto.castvalue",
Tag: "bytes,65009,opt,name=castvalue",
}
func init() {
proto.RegisterExtension(E_GoprotoEnumPrefix)
proto.RegisterExtension(E_GoprotoEnumStringer)
proto.RegisterExtension(E_EnumStringer)
proto.RegisterExtension(E_EnumCustomname)
proto.RegisterExtension(E_EnumvalueCustomname)
proto.RegisterExtension(E_GoprotoGettersAll)
proto.RegisterExtension(E_GoprotoEnumPrefixAll)
proto.RegisterExtension(E_GoprotoStringerAll)
proto.RegisterExtension(E_VerboseEqualAll)
proto.RegisterExtension(E_FaceAll)
proto.RegisterExtension(E_GostringAll)
proto.RegisterExtension(E_PopulateAll)
proto.RegisterExtension(E_StringerAll)
proto.RegisterExtension(E_OnlyoneAll)
proto.RegisterExtension(E_EqualAll)
proto.RegisterExtension(E_DescriptionAll)
proto.RegisterExtension(E_TestgenAll)
proto.RegisterExtension(E_BenchgenAll)
proto.RegisterExtension(E_MarshalerAll)
proto.RegisterExtension(E_UnmarshalerAll)
proto.RegisterExtension(E_StableMarshalerAll)
proto.RegisterExtension(E_SizerAll)
proto.RegisterExtension(E_GoprotoEnumStringerAll)
proto.RegisterExtension(E_EnumStringerAll)
proto.RegisterExtension(E_UnsafeMarshalerAll)
proto.RegisterExtension(E_UnsafeUnmarshalerAll)
proto.RegisterExtension(E_GoprotoExtensionsMapAll)
proto.RegisterExtension(E_GoprotoUnrecognizedAll)
proto.RegisterExtension(E_GogoprotoImport)
proto.RegisterExtension(E_ProtosizerAll)
proto.RegisterExtension(E_CompareAll)
proto.RegisterExtension(E_GoprotoGetters)
proto.RegisterExtension(E_GoprotoStringer)
proto.RegisterExtension(E_VerboseEqual)
proto.RegisterExtension(E_Face)
proto.RegisterExtension(E_Gostring)
proto.RegisterExtension(E_Populate)
proto.RegisterExtension(E_Stringer)
proto.RegisterExtension(E_Onlyone)
proto.RegisterExtension(E_Equal)
proto.RegisterExtension(E_Description)
proto.RegisterExtension(E_Testgen)
proto.RegisterExtension(E_Benchgen)
proto.RegisterExtension(E_Marshaler)
proto.RegisterExtension(E_Unmarshaler)
proto.RegisterExtension(E_StableMarshaler)
proto.RegisterExtension(E_Sizer)
proto.RegisterExtension(E_UnsafeMarshaler)
proto.RegisterExtension(E_UnsafeUnmarshaler)
proto.RegisterExtension(E_GoprotoExtensionsMap)
proto.RegisterExtension(E_GoprotoUnrecognized)
proto.RegisterExtension(E_Protosizer)
proto.RegisterExtension(E_Compare)
proto.RegisterExtension(E_Nullable)
proto.RegisterExtension(E_Embed)
proto.RegisterExtension(E_Customtype)
proto.RegisterExtension(E_Customname)
proto.RegisterExtension(E_Jsontag)
proto.RegisterExtension(E_Moretags)
proto.RegisterExtension(E_Casttype)
proto.RegisterExtension(E_Castkey)
proto.RegisterExtension(E_Castvalue)
}
func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) }
var fileDescriptorGogo = []byte{
// 1098 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45,
0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9,
0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87,
0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21,
0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0,
0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7,
0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e,
0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff,
0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20,
0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62,
0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3,
0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06,
0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18,
0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66,
0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31,
0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63,
0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35,
0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69,
0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb,
0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce,
0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb,
0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e,
0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2,
0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97,
0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf,
0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b,
0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a,
0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9,
0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc,
0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2,
0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7,
0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7,
0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83,
0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e,
0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52,
0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7,
0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d,
0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28,
0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c,
0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8,
0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68,
0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42,
0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4,
0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b,
0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35,
0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14,
0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6,
0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94,
0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a,
0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae,
0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c,
0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2,
0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1,
0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06,
0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7,
0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd,
0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d,
0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c,
0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16,
0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18,
0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab,
0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35,
0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f,
0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53,
0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87,
0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38,
0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae,
0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00,
0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00,
}

310
vendor/github.com/gogo/protobuf/gogoproto/helper.go generated vendored Normal file
View file

@ -0,0 +1,310 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gogoproto
import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
import proto "github.com/gogo/protobuf/proto"
func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
return proto.GetBoolExtension(field.Options, E_Embed, false)
}
func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
return proto.GetBoolExtension(field.Options, E_Nullable, true)
}
func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
nullable := IsNullable(field)
if field.IsMessage() || IsCustomType(field) {
return nullable
}
if proto3 {
return false
}
return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
}
func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
typ := GetCustomType(field)
if len(typ) > 0 {
return true
}
return false
}
func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
typ := GetCastType(field)
if len(typ) > 0 {
return true
}
return false
}
func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
typ := GetCastKey(field)
if len(typ) > 0 {
return true
}
return false
}
func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
typ := GetCastValue(field)
if len(typ) > 0 {
return true
}
return false
}
func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Customtype)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Casttype)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Castkey)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Castvalue)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
name := GetCustomName(field)
if len(name) > 0 {
return true
}
return false
}
func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
name := GetEnumCustomName(field)
if len(name) > 0 {
return true
}
return false
}
func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
name := GetEnumValueCustomName(field)
if len(name) > 0 {
return true
}
return false
}
func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Customname)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_EnumCustomname)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
if err == nil && v.(*string) != nil {
return *(v.(*string))
}
}
return ""
}
func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Jsontag)
if err == nil && v.(*string) != nil {
return (v.(*string))
}
}
return nil
}
func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
if field.Options != nil {
v, err := proto.GetExtension(field.Options, E_Moretags)
if err == nil && v.(*string) != nil {
return (v.(*string))
}
}
return nil
}
type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
}
func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
}
func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
}
func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
}
func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
}
func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
}
func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
}
func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
}
func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
}
func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
}
func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
}
func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
}
func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
}
func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
}
func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
}
func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
}
func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
}
func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
}
func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
}
func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
}
func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
}
func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
}
func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
}
func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
if IsProto3(file) {
return false
}
return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
}
func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
return file.GetSyntax() == "proto3"
}
func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
}
func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,658 @@
// Code generated by protoc-gen-gogo.
// source: descriptor.proto
// DO NOT EDIT!
/*
Package descriptor is a generated protocol buffer package.
It is generated from these files:
descriptor.proto
It has these top-level messages:
FileDescriptorSet
FileDescriptorProto
DescriptorProto
FieldDescriptorProto
OneofDescriptorProto
EnumDescriptorProto
EnumValueDescriptorProto
ServiceDescriptorProto
MethodDescriptorProto
FileOptions
MessageOptions
FieldOptions
EnumOptions
EnumValueOptions
ServiceOptions
MethodOptions
UninterpretedOption
SourceCodeInfo
*/
package descriptor
import fmt "fmt"
import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import proto "github.com/gogo/protobuf/proto"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func (this *FileDescriptorSet) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&descriptor.FileDescriptorSet{")
if this.File != nil {
s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *FileDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 16)
s = append(s, "&descriptor.FileDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Package != nil {
s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
}
if this.Dependency != nil {
s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
}
if this.PublicDependency != nil {
s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
}
if this.WeakDependency != nil {
s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
}
if this.MessageType != nil {
s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
}
if this.EnumType != nil {
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
}
if this.Service != nil {
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
}
if this.Extension != nil {
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.SourceCodeInfo != nil {
s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
}
if this.Syntax != nil {
s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *DescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 14)
s = append(s, "&descriptor.DescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Field != nil {
s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
}
if this.Extension != nil {
s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
}
if this.NestedType != nil {
s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
}
if this.EnumType != nil {
s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
}
if this.ExtensionRange != nil {
s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
}
if this.OneofDecl != nil {
s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.ReservedRange != nil {
s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
}
if this.ReservedName != nil {
s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *DescriptorProto_ExtensionRange) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
if this.Start != nil {
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
}
if this.End != nil {
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *DescriptorProto_ReservedRange) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
if this.Start != nil {
s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
}
if this.End != nil {
s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *FieldDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 14)
s = append(s, "&descriptor.FieldDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Number != nil {
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
}
if this.Label != nil {
s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n")
}
if this.Type != nil {
s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n")
}
if this.TypeName != nil {
s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
}
if this.Extendee != nil {
s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
}
if this.DefaultValue != nil {
s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
}
if this.OneofIndex != nil {
s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
}
if this.JsonName != nil {
s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *OneofDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&descriptor.OneofDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EnumDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&descriptor.EnumDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Value != nil {
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EnumValueDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&descriptor.EnumValueDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Number != nil {
s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ServiceDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&descriptor.ServiceDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.Method != nil {
s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *MethodDescriptorProto) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 10)
s = append(s, "&descriptor.MethodDescriptorProto{")
if this.Name != nil {
s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
}
if this.InputType != nil {
s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
}
if this.OutputType != nil {
s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
}
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
if this.ClientStreaming != nil {
s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
}
if this.ServerStreaming != nil {
s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *FileOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 20)
s = append(s, "&descriptor.FileOptions{")
if this.JavaPackage != nil {
s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
}
if this.JavaOuterClassname != nil {
s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
}
if this.JavaMultipleFiles != nil {
s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
}
if this.JavaGenerateEqualsAndHash != nil {
s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
}
if this.JavaStringCheckUtf8 != nil {
s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
}
if this.OptimizeFor != nil {
s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n")
}
if this.GoPackage != nil {
s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
}
if this.CcGenericServices != nil {
s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
}
if this.JavaGenericServices != nil {
s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
}
if this.PyGenericServices != nil {
s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
}
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.CcEnableArenas != nil {
s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
}
if this.ObjcClassPrefix != nil {
s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
}
if this.CsharpNamespace != nil {
s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
}
if this.JavananoUseDeprecatedPackage != nil {
s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *MessageOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&descriptor.MessageOptions{")
if this.MessageSetWireFormat != nil {
s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
}
if this.NoStandardDescriptorAccessor != nil {
s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
}
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.MapEntry != nil {
s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *FieldOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 11)
s = append(s, "&descriptor.FieldOptions{")
if this.Ctype != nil {
s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n")
}
if this.Packed != nil {
s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
}
if this.Jstype != nil {
s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n")
}
if this.Lazy != nil {
s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
}
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.Weak != nil {
s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EnumOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 7)
s = append(s, "&descriptor.EnumOptions{")
if this.AllowAlias != nil {
s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
}
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *EnumValueOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.EnumValueOptions{")
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *ServiceOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.ServiceOptions{")
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *MethodOptions) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.MethodOptions{")
if this.Deprecated != nil {
s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
}
if this.UninterpretedOption != nil {
s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
}
s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *UninterpretedOption) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 11)
s = append(s, "&descriptor.UninterpretedOption{")
if this.Name != nil {
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
}
if this.IdentifierValue != nil {
s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
}
if this.PositiveIntValue != nil {
s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
}
if this.NegativeIntValue != nil {
s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
}
if this.DoubleValue != nil {
s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
}
if this.StringValue != nil {
s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
}
if this.AggregateValue != nil {
s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *UninterpretedOption_NamePart) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&descriptor.UninterpretedOption_NamePart{")
if this.NamePart != nil {
s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
}
if this.IsExtension != nil {
s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *SourceCodeInfo) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&descriptor.SourceCodeInfo{")
if this.Location != nil {
s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func (this *SourceCodeInfo_Location) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 9)
s = append(s, "&descriptor.SourceCodeInfo_Location{")
if this.Path != nil {
s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
}
if this.Span != nil {
s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
}
if this.LeadingComments != nil {
s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
}
if this.TrailingComments != nil {
s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
}
if this.LeadingDetachedComments != nil {
s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
}
if this.XXX_unrecognized != nil {
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringDescriptor(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
if e == nil {
return "nil"
}
s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "})"
return s
}

View file

@ -0,0 +1,357 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package descriptor
import (
"strings"
)
func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
if !msg.GetOptions().GetMapEntry() {
return nil, nil
}
return msg.GetField()[0], msg.GetField()[1]
}
func dotToUnderscore(r rune) rune {
if r == '.' {
return '_'
}
return r
}
func (field *FieldDescriptorProto) WireType() (wire int) {
switch *field.Type {
case FieldDescriptorProto_TYPE_DOUBLE:
return 1
case FieldDescriptorProto_TYPE_FLOAT:
return 5
case FieldDescriptorProto_TYPE_INT64:
return 0
case FieldDescriptorProto_TYPE_UINT64:
return 0
case FieldDescriptorProto_TYPE_INT32:
return 0
case FieldDescriptorProto_TYPE_UINT32:
return 0
case FieldDescriptorProto_TYPE_FIXED64:
return 1
case FieldDescriptorProto_TYPE_FIXED32:
return 5
case FieldDescriptorProto_TYPE_BOOL:
return 0
case FieldDescriptorProto_TYPE_STRING:
return 2
case FieldDescriptorProto_TYPE_GROUP:
return 2
case FieldDescriptorProto_TYPE_MESSAGE:
return 2
case FieldDescriptorProto_TYPE_BYTES:
return 2
case FieldDescriptorProto_TYPE_ENUM:
return 0
case FieldDescriptorProto_TYPE_SFIXED32:
return 5
case FieldDescriptorProto_TYPE_SFIXED64:
return 1
case FieldDescriptorProto_TYPE_SINT32:
return 0
case FieldDescriptorProto_TYPE_SINT64:
return 0
}
panic("unreachable")
}
func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
packed := field.IsPacked()
wireType := field.WireType()
fieldNumber := field.GetNumber()
if packed {
wireType = 2
}
x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
return x
}
func (field *FieldDescriptorProto) GetKey() []byte {
x := field.GetKeyUint64()
i := 0
keybuf := make([]byte, 0)
for i = 0; x > 127; i++ {
keybuf = append(keybuf, 0x80|uint8(x&0x7F))
x >>= 7
}
keybuf = append(keybuf, uint8(x))
return keybuf
}
func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
msg := desc.GetMessage(packageName, messageName)
if msg == nil {
return nil
}
for _, field := range msg.GetField() {
if field.GetName() == fieldName {
return field
}
}
return nil
}
func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
for _, msg := range file.GetMessageType() {
if msg.GetName() == typeName {
return msg
}
nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
if nes != nil {
return nes
}
}
return nil
}
func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return nes
}
res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
if res != nil {
return res
}
}
return nil
}
func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
for _, file := range desc.GetFile() {
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
continue
}
for _, msg := range file.GetMessageType() {
if msg.GetName() == typeName {
return msg
}
}
for _, msg := range file.GetMessageType() {
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return nes
}
if msg.GetName()+"."+nes.GetName() == typeName {
return nes
}
}
}
}
return nil
}
func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
for _, file := range desc.GetFile() {
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
continue
}
for _, msg := range file.GetMessageType() {
if msg.GetName() == typeName {
return file.GetSyntax() == "proto3"
}
}
for _, msg := range file.GetMessageType() {
for _, nes := range msg.GetNestedType() {
if nes.GetName() == typeName {
return file.GetSyntax() == "proto3"
}
if msg.GetName()+"."+nes.GetName() == typeName {
return file.GetSyntax() == "proto3"
}
}
}
}
return false
}
func (msg *DescriptorProto) IsExtendable() bool {
return len(msg.GetExtensionRange()) > 0
}
func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
parent := desc.GetMessage(packageName, typeName)
if parent == nil {
return "", nil
}
if !parent.IsExtendable() {
return "", nil
}
extendee := "." + packageName + "." + typeName
for _, file := range desc.GetFile() {
for _, ext := range file.GetExtension() {
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
continue
}
} else {
if ext.GetExtendee() != extendee {
continue
}
}
if ext.GetName() == fieldName {
return file.GetPackage(), ext
}
}
}
return "", nil
}
func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
parent := desc.GetMessage(packageName, typeName)
if parent == nil {
return "", nil
}
if !parent.IsExtendable() {
return "", nil
}
extendee := "." + packageName + "." + typeName
for _, file := range desc.GetFile() {
for _, ext := range file.GetExtension() {
if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
continue
}
} else {
if ext.GetExtendee() != extendee {
continue
}
}
if ext.GetNumber() == fieldNum {
return file.GetPackage(), ext
}
}
}
return "", nil
}
func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
parent := desc.GetMessage(packageName, typeName)
if parent == nil {
return "", ""
}
field := parent.GetFieldDescriptor(fieldName)
if field == nil {
var extPackageName string
extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
if field == nil {
return "", ""
}
packageName = extPackageName
}
typeNames := strings.Split(field.GetTypeName(), ".")
if len(typeNames) == 1 {
msg := desc.GetMessage(packageName, typeName)
if msg == nil {
return "", ""
}
return packageName, msg.GetName()
}
if len(typeNames) > 2 {
for i := 1; i < len(typeNames)-1; i++ {
packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
msg := desc.GetMessage(packageName, typeName)
if msg != nil {
typeNames := strings.Split(msg.GetName(), ".")
if len(typeNames) == 1 {
return packageName, msg.GetName()
}
return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
}
}
}
return "", ""
}
func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
for _, field := range msg.GetField() {
if field.GetName() == fieldName {
return field
}
}
return nil
}
func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
for _, file := range desc.GetFile() {
if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
continue
}
for _, enum := range file.GetEnumType() {
if enum.GetName() == typeName {
return enum
}
}
}
return nil
}
func (f *FieldDescriptorProto) IsEnum() bool {
return *f.Type == FieldDescriptorProto_TYPE_ENUM
}
func (f *FieldDescriptorProto) IsMessage() bool {
return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
}
func (f *FieldDescriptorProto) IsBytes() bool {
return *f.Type == FieldDescriptorProto_TYPE_BYTES
}
func (f *FieldDescriptorProto) IsRepeated() bool {
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
}
func (f *FieldDescriptorProto) IsString() bool {
return *f.Type == FieldDescriptorProto_TYPE_STRING
}
func (f *FieldDescriptorProto) IsBool() bool {
return *f.Type == FieldDescriptorProto_TYPE_BOOL
}
func (f *FieldDescriptorProto) IsRequired() bool {
return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
}
func (f *FieldDescriptorProto) IsPacked() bool {
return f.Options != nil && f.GetOptions().GetPacked()
}
func (m *DescriptorProto) HasExtension() bool {
return len(m.ExtensionRange) > 0
}

View file

@ -35,22 +35,39 @@
package proto package proto
import ( import (
"fmt"
"log" "log"
"reflect" "reflect"
"strings" "strings"
) )
// Clone returns a deep copy of a protocol buffer. // Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message { func Clone(src Message) Message {
in := reflect.ValueOf(pb) in := reflect.ValueOf(src)
if in.IsNil() { if in.IsNil() {
return pb return src
} }
out := reflect.New(in.Type().Elem()) out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy. dst := out.Interface().(Message)
mergeStruct(out.Elem(), in.Elem()) Merge(dst, src)
return out.Interface().(Message) return dst
}
// Merger is the interface representing objects that can merge messages of the same type.
type Merger interface {
// Merge merges src into this message.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
//
// Merge may panic if called with a different argument type than the receiver.
Merge(src Message)
}
// generatedMerger is the custom merge method that generated protos will have.
// We must add this method since a generate Merge method will conflict with
// many existing protos that have a Merge data field already defined.
type generatedMerger interface {
XXX_Merge(src Message)
} }
// Merge merges src into dst. // Merge merges src into dst.
@ -58,17 +75,24 @@ func Clone(pb Message) Message {
// Elements of repeated fields will be appended. // Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil. // Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) { func Merge(dst, src Message) {
if m, ok := dst.(Merger); ok {
m.Merge(src)
return
}
in := reflect.ValueOf(src) in := reflect.ValueOf(src)
out := reflect.ValueOf(dst) out := reflect.ValueOf(dst)
if out.IsNil() { if out.IsNil() {
panic("proto: nil destination") panic("proto: nil destination")
} }
if in.Type() != out.Type() { if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
panic("proto: type mismatch")
} }
if in.IsNil() { if in.IsNil() {
// Merging nil into non-nil is a quiet no-op return // Merge from nil src is a noop
}
if m, ok := dst.(generatedMerger); ok {
m.XXX_Merge(src)
return return
} }
mergeStruct(out.Elem(), in.Elem()) mergeStruct(out.Elem(), in.Elem())
@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
} }
if emIn, ok := extendable(in.Addr().Interface()); ok { if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface()) emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead() mIn, muIn := emIn.extensionsRead()
if mIn != nil { if mIn != nil {

View file

@ -39,8 +39,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os"
"reflect"
) )
// errOverflow is returned when an integer is too large to be represented. // errOverflow is returned when an integer is too large to be represented.
@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
// wire type is encountered. It does not get returned to user code. // wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice. // DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or // It returns the integer and the number of bytes consumed, or
// zero if there is not enough. // zero if there is not enough.
@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
return return
} }
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer // This is the format used for the bytes protocol buffer
// type and for embedded messages. // type and for embedded messages.
@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
return string(buf), nil return string(buf), nil
} }
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can // Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before // unmarshal themselves. The argument points to data that may be
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the // overwritten, so implementations should not keep references to the
// buffer. // buffer.
// Unmarshal implementations should not clear the receiver.
// Any unmarshaled data should be merged into the receiver.
// Callers of Unmarshal that do not want to retain existing data
// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface { type Unmarshaler interface {
Unmarshal([]byte) error Unmarshal([]byte) error
} }
// newUnmarshaler is the interface representing objects that can
// unmarshal themselves. The semantics are identical to Unmarshaler.
//
// This exists to support protoc-gen-go generated messages.
// The proto package will stop type-asserting to this interface in the future.
//
// DO NOT DEPEND ON THIS.
type newUnmarshaler interface {
XXX_Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the // Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match // decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable. // the data in buf, the results can be unpredictable.
@ -395,7 +334,13 @@ type Unmarshaler interface {
// to preserve and append to existing data. // to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error { func Unmarshal(buf []byte, pb Message) error {
pb.Reset() pb.Reset()
return UnmarshalMerge(buf, pb) if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
} }
// UnmarshalMerge parses the protocol buffer representation in buf and // UnmarshalMerge parses the protocol buffer representation in buf and
@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
// UnmarshalMerge merges into existing data in pb. // UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead. // Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error { func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it. if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok { if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf) return u.Unmarshal(buf)
} }
return NewBuffer(buf).Unmarshal(pb) return NewBuffer(buf).Unmarshal(pb)
@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
} }
// DecodeGroup reads a tag-delimited group from the Buffer. // DecodeGroup reads a tag-delimited group from the Buffer.
// StartGroup tag is already consumed. This function consumes
// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error { func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb) b := p.buf[p.index:]
if err != nil { x, y := findEndGroup(b)
return err if x < 0 {
return io.ErrUnexpectedEOF
} }
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) err := Unmarshal(b[:x], pb)
p.index += y
return err
} }
// Unmarshal parses the protocol buffer representation in the // Unmarshal parses the protocol buffer representation in the
@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error { func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it. // If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
if u, ok := pb.(Unmarshaler); ok { if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:]) err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf) p.index = len(p.buf)
return err return err
} }
typ, base, err := getbase(pb) // Slow workaround for messages that aren't Unmarshalers.
if err != nil { // This includes some hand-coded .pb.go files and
return err // bootstrap protos.
} // TODO: fix all of those and then add Unmarshal to
// the Message interface. Then:
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) // The cast above and code below can be deleted.
// The old unmarshaler can be deleted.
if collectStats { // Clients can call Unmarshal directly (can already do that, actually).
stats.Decode++ var info InternalMessageInfo
} err := info.Unmarshal(pb, p.buf[p.index:])
p.index = len(p.buf)
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
extmap[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err return err
} }

350
vendor/github.com/golang/protobuf/proto/discard.go generated vendored Normal file
View file

@ -0,0 +1,350 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type generatedDiscarder interface {
XXX_DiscardUnknown()
}
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
if m, ok := m.(generatedDiscarder); ok {
m.XXX_DiscardUnknown()
return
}
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
// but the master branch has no implementation for InternalMessageInfo,
// so it would be more work to replicate that approach.
discardLegacy(m)
}
// DiscardUnknown recursively discards all unknown fields.
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
di := atomicLoadDiscardInfo(&a.discard)
if di == nil {
di = getDiscardInfo(reflect.TypeOf(m).Elem())
atomicStoreDiscardInfo(&a.discard, di)
}
di.discard(toPointer(&m))
}
type discardInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []discardFieldInfo
unrecognized field
}
type discardFieldInfo struct {
field field // Offset of field, guaranteed to be valid
discard func(src pointer)
}
var (
discardInfoMap = map[reflect.Type]*discardInfo{}
discardInfoLock sync.Mutex
)
func getDiscardInfo(t reflect.Type) *discardInfo {
discardInfoLock.Lock()
defer discardInfoLock.Unlock()
di := discardInfoMap[t]
if di == nil {
di = &discardInfo{typ: t}
discardInfoMap[t] = di
}
return di
}
func (di *discardInfo) discard(src pointer) {
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&di.initialized) == 0 {
di.computeDiscardInfo()
}
for _, fi := range di.fields {
sfp := src.offset(fi.field)
fi.discard(sfp)
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
// Ignore lock since DiscardUnknown is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
DiscardUnknown(m)
}
}
}
if di.unrecognized.IsValid() {
*src.offset(di.unrecognized).toBytes() = nil
}
}
func (di *discardInfo) computeDiscardInfo() {
di.lock.Lock()
defer di.lock.Unlock()
if di.initialized != 0 {
return
}
t := di.typ
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
dfi := discardFieldInfo{field: toField(&f)}
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
case isSlice: // E.g., []*pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sps := src.getPointerSlice()
for _, sp := range sps {
if !sp.isNil() {
di.discard(sp)
}
}
}
default: // E.g., *pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sp := src.getPointer()
if !sp.isNil() {
di.discard(sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
default: // E.g., map[K]V
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
dfi.discard = func(src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
DiscardUnknown(val.Interface().(Message))
}
}
} else {
dfi.discard = func(pointer) {} // Noop
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
default: // E.g., interface{}
// TODO: Make this faster?
dfi.discard = func(src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
DiscardUnknown(sv.Interface().(Message))
}
}
}
}
default:
continue
}
di.fields = append(di.fields, dfi)
}
di.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
di.unrecognized = toField(&f)
}
atomic.StoreInt32(&di.initialized, 1)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
}
}
}
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(m); err == nil {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
// set/unset mismatch // set/unset mismatch
return false return false
} }
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem() f1, f2 = f1.Elem(), f2.Elem()
} }
if !equalAny(f1, f2, sprop.Prop[i]) { if !equalAny(f1, f2, sprop.Prop[i]) {
@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
u1 := uf.Bytes() u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) { return bytes.Equal(u1, u2)
return false
}
return true
} }
// v1 and v2 are known to have the same type. // v1 and v2 are known to have the same type.
@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
m1, m2 := e1.value, e2.value m1, m2 := e1.value, e2.value
if m1 == nil && m2 == nil {
// Both have only encoded form.
if bytes.Equal(e1.enc, e2.enc) {
continue
}
// The bytes are different, but the extensions might still be
// equal. We need to decode them to compare.
}
if m1 != nil && m2 != nil { if m1 != nil && m2 != nil {
// Both are unencoded. // Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
desc = m[extNum] desc = m[extNum]
} }
if desc == nil { if desc == nil {
// If both have only encoded form and the bytes are the same,
// it is handled above. We get here when the bytes are different.
// We don't know how to decode it, so just compare them as byte
// slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue return false
} }
var err error var err error
if m1 == nil { if m1 == nil {

View file

@ -38,6 +38,7 @@ package proto
import ( import (
"errors" "errors"
"fmt" "fmt"
"io"
"reflect" "reflect"
"strconv" "strconv"
"sync" "sync"
@ -91,14 +92,29 @@ func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message. // extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements // If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface. // the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) { func extendable(p interface{}) (extendableProto, error) {
if ep, ok := p.(extendableProto); ok { switch p := p.(type) {
return ep, ok case extendableProto:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return p, nil
case extendableProtoV1:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return extensionAdapter{p}, nil
} }
if ep, ok := p.(extendableProtoV1); ok { // Don't allocate a specific error containing %T:
return extensionAdapter{ep}, ok // this is the hot path for Clone and MarshalText.
} return nil, errNotExtendable
return nil, false }
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
func isNilPtr(x interface{}) bool {
v := reflect.ValueOf(x)
return v.Kind() == reflect.Ptr && v.IsNil()
} }
// XXX_InternalExtensions is an internal representation of proto extensions. // XXX_InternalExtensions is an internal representation of proto extensions.
@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
return e.p.extensionMap, &e.p.mu return e.p.extensionMap, &e.p.mu
} }
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
// ExtensionDesc represents an extension specification. // ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler. // Used in generated code from the protocol compiler.
type ExtensionDesc struct { type ExtensionDesc struct {
@ -154,6 +167,7 @@ type ExtensionDesc struct {
Field int32 // field number Field int32 // field number
Name string // fully-qualified name of extension, for text formatting Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
} }
func (ed *ExtensionDesc) repeated() bool { func (ed *ExtensionDesc) repeated() bool {
@ -178,8 +192,8 @@ type Extension struct {
// SetRawExtension is for testing only. // SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) { func SetRawExtension(base Message, id int32, b []byte) {
epb, ok := extendable(base) epb, err := extendable(base)
if !ok { if err != nil {
return return
} }
extmap := epb.extensionsWrite() extmap := epb.extensionsWrite()
@ -204,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
pbi = ea.extendableProtoV1 pbi = ea.extendableProtoV1
} }
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
} }
// Check the range. // Check the range.
if !isExtensionField(pb, extension.Field) { if !isExtensionField(pb, extension.Field) {
@ -249,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop return prop
} }
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb. // HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool { func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return false return false
} }
extmap, mu := epb.extensionsRead() extmap, mu := epb.extensionsRead()
@ -335,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false return false
} }
mu.Lock() mu.Lock()
_, ok = extmap[extension.Field] _, ok := extmap[extension.Field]
mu.Unlock() mu.Unlock()
return ok return ok
} }
// ClearExtension removes the given extension from pb. // ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) { func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return return
} }
// TODO: Check types, field numbers, etc.? // TODO: Check types, field numbers, etc.?
@ -351,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
delete(extmap, extension.Field) delete(extmap, extension.Field)
} }
// GetExtension parses and returns the given extension of pb. // GetExtension retrieves a proto2 extended field from pb.
// If the extension is not present and has no default value it returns ErrMissingExtension. //
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return nil, errors.New("proto: not an extendable proto") return nil, err
} }
if err := checkExtensionTypes(epb, extension); err != nil { if extension.ExtendedType != nil {
return nil, err // can only check type if this is a complete descriptor
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
} }
emap, mu := epb.extensionsRead() emap, mu := epb.extensionsRead()
@ -387,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
return e.value, nil return e.value, nil
} }
if extension.ExtensionType == nil {
// incomplete descriptor
return e.enc, nil
}
v, err := decodeExtension(e.enc, extension) v, err := decodeExtension(e.enc, extension)
if err != nil { if err != nil {
return nil, err return nil, err
@ -404,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// defaultExtensionValue returns the default value for extension. // defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned. // If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
if extension.ExtensionType == nil {
// incomplete descriptor, so no default
return nil, ErrMissingExtension
}
t := reflect.TypeOf(extension.ExtensionType) t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension) props := extensionProperties(extension)
@ -438,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
// decodeExtension decodes an extension encoded in b. // decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType) t := reflect.TypeOf(extension.ExtensionType)
unmarshal := typeUnmarshaler(t, extension.Tag)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice. // t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the // Allocate space to store the pointer/slice.
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem() value := reflect.New(t).Elem()
var err error
for { for {
// Discard wire type and field number varint. It isn't needed. x, n := decodeVarint(b)
if _, err := o.DecodeVarint(); err != nil { if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
wire := int(x) & 7
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
if err != nil {
return nil, err return nil, err
} }
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { if len(b) == 0 {
return nil, err
}
if o.index >= len(o.buf) {
break break
} }
} }
@ -472,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es. // GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements. // The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return nil, errors.New("proto: not an extendable proto") return nil, err
} }
extensions = make([]interface{}, len(es)) extensions = make([]interface{}, len(es))
for i, e := range es { for i, e := range es {
@ -493,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number. // just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) return nil, err
} }
registeredExtensions := RegisteredExtensions(pb) registeredExtensions := RegisteredExtensions(pb)
@ -522,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value. // SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return errors.New("proto: not an extendable proto") return err
} }
if err := checkExtensionTypes(epb, extension); err != nil { if err := checkExtensionTypes(epb, extension); err != nil {
return err return err
@ -549,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
// ClearAllExtensions clears all extensions from pb. // ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) { func ClearAllExtensions(pb Message) {
epb, ok := extendable(pb) epb, err := extendable(pb)
if !ok { if err != nil {
return return
} }
m := epb.extensionsWrite() m := epb.extensionsWrite()

View file

@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences: When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers. - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method. - Enum types do not get an Enum method.
The simplest way to describe this is to see an example. The simplest way to describe this is to see an example.
@ -266,6 +265,7 @@ package proto
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"log" "log"
"reflect" "reflect"
@ -274,6 +274,8 @@ import (
"sync" "sync"
) )
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
// Message is implemented by generated protocol buffer messages. // Message is implemented by generated protocol buffer messages.
type Message interface { type Message interface {
Reset() Reset()
@ -310,16 +312,7 @@ type Buffer struct {
buf []byte // encode/decode byte stream buf []byte // encode/decode byte stream
index int // read point index int // read point
// pools of basic types to amortize allocation. deterministic bool
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
} }
// NewBuffer allocates a new Buffer and initializes its internal data to // NewBuffer allocates a new Buffer and initializes its internal data to
@ -344,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer. // Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf } func (p *Buffer) Bytes() []byte { return p.buf }
// SetDeterministic sets whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
}
/* /*
* Helper routines for simplifying the creation of optional fields of basic type. * Helper routines for simplifying the creation of optional fields of basic type.
*/ */
@ -832,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
return sf, false, nil return sf, false, nil
} }
// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums. // Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface { func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{ s := mapKeySorter{vs: vs}
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
// numeric keys are sorted numerically.
if len(vs) == 0 { if len(vs) == 0 {
return s return s
} }
@ -856,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64: case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
case reflect.Bool:
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
case reflect.String:
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
default:
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
} }
return s return s
@ -896,3 +909,13 @@ const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package. // to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true const ProtoPackageIsVersion1 = true
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
// This type is not subject to any compatibility guarantee.
type InternalMessageInfo struct {
marshal *marshalInfo
unmarshal *unmarshalInfo
merge *mergeInfo
discard *discardInfo
}

View file

@ -42,6 +42,7 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"sync"
) )
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
} }
func (ms *messageSet) Has(pb Message) bool { func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil { return ms.find(pb) != nil
return true
}
return false
} }
func (ms *messageSet) Unmarshal(pb Message) error { func (ms *messageSet) Unmarshal(pb Message) error {
@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte {
// MarshalMessageSet encodes the extension map represented by m in the message set wire format. // MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) { func MarshalMessageSet(exts interface{}) ([]byte, error) {
var m map[int32]Extension return marshalMessageSet(exts, false)
}
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) { switch exts := exts.(type) {
case *XXX_InternalExtensions: case *XXX_InternalExtensions:
if err := encodeExtensions(exts); err != nil { var u marshalInfo
return nil, err siz := u.sizeMessageSet(exts)
} b := make([]byte, 0, siz)
m, _ = exts.extensionsRead() return u.appendMessageSet(b, exts, deterministic)
case map[int32]Extension: case map[int32]Extension:
if err := encodeExtensionsMap(exts); err != nil { // This is an old-style extension map.
return nil, err // Wrap it in a new-style XXX_InternalExtensions.
ie := XXX_InternalExtensions{
p: &struct {
mu sync.Mutex
extensionMap map[int32]Extension
}{
extensionMap: exts,
},
} }
m = exts
var u marshalInfo
siz := u.sizeMessageSet(&ie)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, &ie, deterministic)
default: default:
return nil, errors.New("proto: not an extension map") return nil, errors.New("proto: not an extension map")
} }
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
} }
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error { func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension var m map[int32]Extension
switch exts := exts.(type) { switch exts := exts.(type) {
@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension var m map[int32]Extension
switch exts := exts.(type) { switch exts := exts.(type) {
case *XXX_InternalExtensions: case *XXX_InternalExtensions:
m, _ = exts.extensionsRead() var mu sync.Locker
m, mu = exts.extensionsRead()
if m != nil {
// Keep the extensions map locked until we're done marshaling to prevent
// races between marshaling and unmarshaling the lazily-{en,de}coded
// values.
mu.Lock()
defer mu.Unlock()
}
case map[int32]Extension: case map[int32]Extension:
m = exts m = exts
default: default:
@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
for i, id := range ids { for i, id := range ids {
ext := m[id] ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id] msd, ok := messageSetMap[id]
if !ok { if !ok {
// Unknown type; we can't render it, so skip it. // Unknown type; we can't render it, so skip it.
continue continue
} }
if i > 0 && b.Len() > 1 {
b.WriteByte(',')
}
fmt.Fprintf(&b, `"[%s]":`, msd.name) fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value x := ext.value

View file

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js // +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect. // This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@ -38,32 +38,13 @@
package proto package proto
import ( import (
"math"
"reflect" "reflect"
"sync"
) )
// A structPointer is a pointer to a struct. const unsafeAllowed = false
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value. // A field identifies a field in a struct, accessible from a pointer.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices // In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex. // passed to reflect's FieldByIndex.
type field []int type field []int
@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier. // invalidField is an invalid field identifier.
var invalidField = field(nil) var invalidField = field(nil)
// zeroField is a noop when calling pointer.offset.
var zeroField = field([]int{})
// IsValid reports whether the field identifier is valid. // IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil } func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value. // The pointer type is for the table-driven decoder.
func structPointer_field(p structPointer, f field) reflect.Value { // The implementation here uses a reflect.Value of pointer type to
// Special case: an extension map entry with a value of type T // create a generic pointer. In pointer_unsafe.go we use unsafe
// passes a *T to the struct-handling code with a zero field, // instead of reflect to implement the same (but faster) interface.
// expecting that it will be treated as equivalent to *struct{ X T }, type pointer struct {
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value v reflect.Value
} }
func (p structPointerSlice) Len() int { return p.v.Len() } // toPointer converts an interface of pointer type to a pointer
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } // that points to the same target.
func (p structPointerSlice) Append(q structPointer) { func toPointer(i *Message) pointer {
p.v.Set(reflect.Append(p.v, q.v)) return pointer{v: reflect.ValueOf(*i)}
} }
var ( // toAddrPointer converts an interface to a pointer that points to
int32Type = reflect.TypeOf(int32(0)) // the interface data.
uint32Type = reflect.TypeOf(uint32(0)) func toAddrPointer(i *interface{}, isptr bool) pointer {
float32Type = reflect.TypeOf(float32(0)) v := reflect.ValueOf(*i)
int64Type = reflect.TypeOf(int64(0)) u := reflect.New(v.Type())
uint64Type = reflect.TypeOf(uint64(0)) u.Elem().Set(v)
float64Type = reflect.TypeOf(float64(0)) return pointer{v: u}
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
} }
// IsNil reports whether p is nil. // valToPointer converts v to a pointer. v must be of pointer type.
func word32_IsNil(p word32) bool { func valToPointer(v reflect.Value) pointer {
return pointer{v: v}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
func (p pointer) isNil() bool {
return p.v.IsNil() return p.v.IsNil()
} }
// Set sets p to point at a newly allocated word with bits set to x. // grow updates the slice s in place to make it one element longer.
func word32_Set(p word32, o *Buffer, x uint32) { // s must be addressable.
t := p.v.Type().Elem() // Returns the (addressable) new element.
switch t { func grow(s reflect.Value) reflect.Value {
case int32Type: n, m := s.Len(), s.Cap()
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m { if n < m {
p.v.SetLen(n + 1) s.SetLen(n + 1)
} else { } else {
t := p.v.Type().Elem() s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
} }
elem := p.v.Index(n) return s.Index(n)
switch elem.Kind() { }
case reflect.Int32:
elem.SetInt(int64(int32(x))) func (p pointer) toInt64() *int64 {
case reflect.Uint32: return p.v.Interface().(*int64)
elem.SetUint(uint64(x)) }
case reflect.Float32: func (p pointer) toInt64Ptr() **int64 {
elem.SetFloat(float64(math.Float32frombits(x))) return p.v.Interface().(**int64)
}
func (p pointer) toInt64Slice() *[]int64 {
return p.v.Interface().(*[]int64)
}
var int32ptr = reflect.TypeOf((*int32)(nil))
func (p pointer) toInt32() *int32 {
return p.v.Convert(int32ptr).Interface().(*int32)
}
// The toInt32Ptr/Slice methods don't work because of enums.
// Instead, we must use set/get methods for the int32ptr/slice case.
/*
func (p pointer) toInt32Ptr() **int32 {
return p.v.Interface().(**int32)
}
func (p pointer) toInt32Slice() *[]int32 {
return p.v.Interface().(*[]int32)
}
*/
func (p pointer) getInt32Ptr() *int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().(*int32)
} }
// an enum
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
}
func (p pointer) setInt32Ptr(v int32) {
// Allocate value in a *int32. Possibly convert that to a *enum.
// Then assign it to a **int32 or **enum.
// Note: we can convert *int32 to *enum, but we can't convert
// **int32 to **enum!
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
} }
func (p word32Slice) Len() int { // getInt32Slice copies []int32 from p as a new slice.
return p.v.Len() // This behavior differs from the implementation in pointer_unsafe.go.
} func (p pointer) getInt32Slice() []int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
func (p word32Slice) Index(i int) uint32 { // raw int32 type
elem := p.v.Index(i) return p.v.Elem().Interface().([]int32)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
} }
panic("unreachable") // an enum
// Allocate a []int32, then assign []enum's values into it.
// Note: we can't convert []enum to []int32.
slice := p.v.Elem()
s := make([]int32, slice.Len())
for i := 0; i < slice.Len(); i++ {
s[i] = int32(slice.Index(i).Int())
}
return s
} }
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. // setInt32Slice copies []int32 into p as a new slice.
func structPointer_Word32Slice(p structPointer, f field) word32Slice { // This behavior differs from the implementation in pointer_unsafe.go.
return word32Slice{structPointer_field(p, f)} func (p pointer) setInt32Slice(v []int32) {
} if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
// word64 is like word32 but for 64-bit values. p.v.Elem().Set(reflect.ValueOf(v))
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return return
} }
panic("unreachable") // an enum
} // Allocate a []enum, then assign []int32's values into it.
// Note: we can't convert []enum to []int32.
func word64_IsNil(p word64) bool { slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
return p.v.IsNil() for i, x := range v {
} slice.Index(i).SetInt(int64(x))
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
} }
panic("unreachable") p.v.Elem().Set(slice)
}
func (p pointer) appendInt32Slice(v int32) {
grow(p.v.Elem()).SetInt(int64(v))
} }
func structPointer_Word64(p structPointer, f field) word64 { func (p pointer) toUint64() *uint64 {
return word64{structPointer_field(p, f)} return p.v.Interface().(*uint64)
}
func (p pointer) toUint64Ptr() **uint64 {
return p.v.Interface().(**uint64)
}
func (p pointer) toUint64Slice() *[]uint64 {
return p.v.Interface().(*[]uint64)
}
func (p pointer) toUint32() *uint32 {
return p.v.Interface().(*uint32)
}
func (p pointer) toUint32Ptr() **uint32 {
return p.v.Interface().(**uint32)
}
func (p pointer) toUint32Slice() *[]uint32 {
return p.v.Interface().(*[]uint32)
}
func (p pointer) toBool() *bool {
return p.v.Interface().(*bool)
}
func (p pointer) toBoolPtr() **bool {
return p.v.Interface().(**bool)
}
func (p pointer) toBoolSlice() *[]bool {
return p.v.Interface().(*[]bool)
}
func (p pointer) toFloat64() *float64 {
return p.v.Interface().(*float64)
}
func (p pointer) toFloat64Ptr() **float64 {
return p.v.Interface().(**float64)
}
func (p pointer) toFloat64Slice() *[]float64 {
return p.v.Interface().(*[]float64)
}
func (p pointer) toFloat32() *float32 {
return p.v.Interface().(*float32)
}
func (p pointer) toFloat32Ptr() **float32 {
return p.v.Interface().(**float32)
}
func (p pointer) toFloat32Slice() *[]float32 {
return p.v.Interface().(*[]float32)
}
func (p pointer) toString() *string {
return p.v.Interface().(*string)
}
func (p pointer) toStringPtr() **string {
return p.v.Interface().(**string)
}
func (p pointer) toStringSlice() *[]string {
return p.v.Interface().(*[]string)
}
func (p pointer) toBytes() *[]byte {
return p.v.Interface().(*[]byte)
}
func (p pointer) toBytesSlice() *[][]byte {
return p.v.Interface().(*[][]byte)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return p.v.Interface().(*XXX_InternalExtensions)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return p.v.Interface().(*map[int32]Extension)
}
func (p pointer) getPointer() pointer {
return pointer{v: p.v.Elem()}
}
func (p pointer) setPointer(q pointer) {
p.v.Elem().Set(q.v)
}
func (p pointer) appendPointer(q pointer) {
grow(p.v.Elem()).Set(q.v)
} }
// word64Val is like word32Val but for 64-bit values. // getPointerSlice copies []*T from p as a new []pointer.
type word64Val struct { // This behavior differs from the implementation in pointer_unsafe.go.
v reflect.Value func (p pointer) getPointerSlice() []pointer {
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
} }
func word64Val_Set(p word64Val, o *Buffer, x uint64) { // setPointerSlice copies []pointer into p as a new []*T.
switch p.v.Type() { // This behavior differs from the implementation in pointer_unsafe.go.
case int64Type: func (p pointer) setPointerSlice(v []pointer) {
p.v.SetInt(int64(x)) if v == nil {
return p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return return
} }
panic("unreachable") s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
} for _, p := range v {
s = reflect.Append(s, p.v)
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
} }
panic("unreachable") p.v.Elem().Set(s)
} }
func structPointer_Word64Val(p structPointer, f field) word64Val { // getInterfacePointer returns a pointer that points to the
return word64Val{structPointer_field(p, f)} // interface data of the interface pointed by p.
} func (p pointer) getInterfacePointer() pointer {
if p.v.Elem().IsNil() {
type word64Slice struct { return pointer{v: p.v.Elem()}
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
} }
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
} }
func (p word64Slice) Len() int { func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return p.v.Len() // TODO: check that p.v.Type().Elem() == t?
return p.v
} }
func (p word64Slice) Index(i int) uint64 { func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
elem := p.v.Index(i) atomicLock.Lock()
switch elem.Kind() { defer atomicLock.Unlock()
case reflect.Int64: return *p
return uint64(elem.Int()) }
case reflect.Uint64: func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
return uint64(elem.Uint()) atomicLock.Lock()
case reflect.Float64: defer atomicLock.Unlock()
return math.Float64bits(float64(elem.Float())) *p = v
} }
panic("unreachable") func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
} }
func structPointer_Word64Slice(p structPointer, f field) word64Slice { var atomicLock sync.Mutex
return word64Slice{structPointer_field(p, f)}
}

View file

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js // +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe. // This file contains the implementation of the proto field accesses using package unsafe.
@ -37,38 +37,13 @@ package proto
import ( import (
"reflect" "reflect"
"sync/atomic"
"unsafe" "unsafe"
) )
// NOTE: These type_Foo functions would more idiomatically be methods, const unsafeAllowed = true
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct. // A field identifies a field in a struct, accessible from a pointer.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct. // In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr type field uintptr
@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier. // invalidField is an invalid field identifier.
const invalidField = ^field(0) const invalidField = ^field(0)
// zeroField is a noop when calling pointer.offset.
const zeroField = field(0)
// IsValid reports whether the field identifier is valid. // IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { func (f field) IsValid() bool {
return f != ^field(0) return f != invalidField
} }
// Bytes returns the address of a []byte field in the struct. // The pointer type below is for the new table-driven encoder/decoder.
func structPointer_Bytes(p structPointer, f field) *[]byte { // The implementation here uses unsafe.Pointer to create a generic pointer.
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) // In pointer_reflect.go we use reflect instead of unsafe to implement
// the same (but slower) interface.
type pointer struct {
p unsafe.Pointer
} }
// BytesSlice returns the address of a [][]byte field in the struct. // size of pointer
func structPointer_BytesSlice(p structPointer, f field) *[][]byte { var ptrSize = unsafe.Sizeof(uintptr(0))
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
// Super-tricky - read pointer out of data word of interface value.
// Saves ~25ns over the equivalent:
// return valToPointer(reflect.ValueOf(*i))
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
} }
// Bool returns the address of a *bool field in the struct. // toAddrPointer converts an interface to a pointer that points to
func structPointer_Bool(p structPointer, f field) **bool { // the interface data.
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) func toAddrPointer(i *interface{}, isptr bool) pointer {
} // Super-tricky - read or get the address of data word of interface value.
if isptr {
// BoolVal returns the address of a bool field in the struct. // The interface is of pointer type, thus it is a direct interface.
func structPointer_BoolVal(p structPointer, f field) *bool { // The data word is the pointer data itself. We take its address.
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
} }
o.uint32s[0] = x // The interface is not of pointer type. The data word is the pointer
*p = &o.uint32s[0] // to the data.
o.uint32s = o.uint32s[1:] return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
} }
// Get gets the value pointed at by *v. // valToPointer converts v to a pointer. v must be of pointer type.
func word32_Get(p word32) uint32 { func valToPointer(v reflect.Value) pointer {
return **p return pointer{p: unsafe.Pointer(v.Pointer())}
} }
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. // offset converts from a pointer to a structure to a pointer to
func structPointer_Word32(p structPointer, f field) word32 { // one of its fields.
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) func (p pointer) offset(f field) pointer {
// For safety, we should panic if !f.IsValid, however calling panic causes
// this to no longer be inlineable, which is a serious performance cost.
/*
if !f.IsValid() {
panic("invalid field")
}
*/
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
} }
// A word32Val is the address of a 32-bit value field. func (p pointer) isNil() bool {
type word32Val *uint32 return p.p == nil
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
} }
// Get gets the value pointed at by p. func (p pointer) toInt64() *int64 {
func word32Val_Get(p word32Val) uint32 { return (*int64)(p.p)
return *p }
func (p pointer) toInt64Ptr() **int64 {
return (**int64)(p.p)
}
func (p pointer) toInt64Slice() *[]int64 {
return (*[]int64)(p.p)
}
func (p pointer) toInt32() *int32 {
return (*int32)(p.p)
} }
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. // See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
func structPointer_Word32Val(p structPointer, f field) word32Val { /*
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) func (p pointer) toInt32Ptr() **int32 {
} return (**int32)(p.p)
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
} }
o.uint64s[0] = x func (p pointer) toInt32Slice() *[]int32 {
*p = &o.uint64s[0] return (*[]int32)(p.p)
o.uint64s = o.uint64s[1:] }
*/
func (p pointer) getInt32Ptr() *int32 {
return *(**int32)(p.p)
}
func (p pointer) setInt32Ptr(v int32) {
*(**int32)(p.p) = &v
} }
func word64_IsNil(p word64) bool { // getInt32Slice loads a []int32 from p.
return *p == nil // The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getInt32Slice() []int32 {
return *(*[]int32)(p.p)
} }
func word64_Get(p word64) uint64 { // setInt32Slice stores a []int32 to p.
return **p // The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setInt32Slice(v []int32) {
*(*[]int32)(p.p) = v
} }
func structPointer_Word64(p structPointer, f field) word64 { // TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) func (p pointer) appendInt32Slice(v int32) {
s := (*[]int32)(p.p)
*s = append(*s, v)
} }
// word64Val is like word32Val but for 64-bit values. func (p pointer) toUint64() *uint64 {
type word64Val *uint64 return (*uint64)(p.p)
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) { func (p pointer) toUint64Ptr() **uint64 {
*p = x return (**uint64)(p.p)
}
func (p pointer) toUint64Slice() *[]uint64 {
return (*[]uint64)(p.p)
}
func (p pointer) toUint32() *uint32 {
return (*uint32)(p.p)
}
func (p pointer) toUint32Ptr() **uint32 {
return (**uint32)(p.p)
}
func (p pointer) toUint32Slice() *[]uint32 {
return (*[]uint32)(p.p)
}
func (p pointer) toBool() *bool {
return (*bool)(p.p)
}
func (p pointer) toBoolPtr() **bool {
return (**bool)(p.p)
}
func (p pointer) toBoolSlice() *[]bool {
return (*[]bool)(p.p)
}
func (p pointer) toFloat64() *float64 {
return (*float64)(p.p)
}
func (p pointer) toFloat64Ptr() **float64 {
return (**float64)(p.p)
}
func (p pointer) toFloat64Slice() *[]float64 {
return (*[]float64)(p.p)
}
func (p pointer) toFloat32() *float32 {
return (*float32)(p.p)
}
func (p pointer) toFloat32Ptr() **float32 {
return (**float32)(p.p)
}
func (p pointer) toFloat32Slice() *[]float32 {
return (*[]float32)(p.p)
}
func (p pointer) toString() *string {
return (*string)(p.p)
}
func (p pointer) toStringPtr() **string {
return (**string)(p.p)
}
func (p pointer) toStringSlice() *[]string {
return (*[]string)(p.p)
}
func (p pointer) toBytes() *[]byte {
return (*[]byte)(p.p)
}
func (p pointer) toBytesSlice() *[][]byte {
return (*[][]byte)(p.p)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(p.p)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return (*map[int32]Extension)(p.p)
} }
func word64Val_Get(p word64Val) uint64 { // getPointerSlice loads []*T from p as a []pointer.
return *p // The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getPointerSlice() []pointer {
// Super-tricky - p should point to a []*T where T is a
// message type. We load it as []pointer.
return *(*[]pointer)(p.p)
} }
func structPointer_Word64Val(p structPointer, f field) word64Val { // setPointerSlice stores []pointer into p as a []*T.
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) // The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setPointerSlice(v []pointer) {
// Super-tricky - p should point to a []*T where T is a
// message type. We store it as []pointer.
*(*[]pointer)(p.p) = v
} }
// word64Slice is like word32Slice but for 64-bit values. // getPointer loads the pointer at p and returns it.
type word64Slice []uint64 func (p pointer) getPointer() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } // setPointer stores the pointer q at p.
func (p pointer) setPointer(q pointer) {
func structPointer_Word64Slice(p structPointer, f field) *word64Slice { *(*unsafe.Pointer)(p.p) = q.p
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) }
// append q to the slice pointed to by p.
func (p pointer) appendPointer(q pointer) {
s := (*[]unsafe.Pointer)(p.p)
*s = append(*s, q.p)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
// Super-tricky - read pointer out of data word of interface value.
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
}
// asPointerTo returns a reflect.Value that is a pointer to an
// object of type t stored at p.
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return reflect.NewAt(t, p.p)
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
} }

View file

@ -58,42 +58,6 @@ const (
WireFixed32 = 5 WireFixed32 = 5
) )
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer // tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag // use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers. // numbers.
@ -140,13 +104,6 @@ type StructProperties struct {
decoderTags tagMap // map from proto tag to struct field number decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message. // OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field. // It is keyed by the original name of a field.
@ -187,36 +144,19 @@ type Properties struct {
Default string // default value Default string // default value
HasDefault bool // whether an explicit default was provided HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder stype reflect.Type // set for struct types only
valEnc valueEncoder // set for bool and numeric types only sprop *StructProperties // set for struct types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
} }
// String formats the properties in the protobuf struct field tag style. // String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string { func (p *Properties) String() string {
s := p.Wire s := p.Wire
s = "," s += ","
s += strconv.Itoa(p.Tag) s += strconv.Itoa(p.Tag)
if p.Required { if p.Required {
s += ",req" s += ",req"
@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
switch p.Wire { switch p.Wire {
case "varint": case "varint":
p.WireType = WireVarint p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32": case "fixed32":
p.WireType = WireFixed32 p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64": case "fixed64":
p.WireType = WireFixed64 p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32": case "zigzag32":
p.WireType = WireVarint p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64": case "zigzag64":
p.WireType = WireVarint p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group": case "bytes", "group":
p.WireType = WireBytes p.WireType = WireBytes
// no numeric converter for non-numeric types // no numeric converter for non-numeric types
@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
return return
} }
outer:
for i := 2; i < len(fields); i++ { for i := 2; i < len(fields); i++ {
f := fields[i] f := fields[i]
switch { switch {
@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) {
if i+1 < len(fields) { if i+1 < len(fields) {
// Commas aren't escaped, and def is always last. // Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",") p.Default += "," + strings.Join(fields[i+1:], ",")
break break outer
} }
} }
} }
} }
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding. // setFieldProps initializes the field properties for submessages and maps.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() { switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr: case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() { if t1.Elem().Kind() == reflect.Struct {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem() p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
} }
case reflect.Slice: case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() { if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
default: p.stype = t2.Elem()
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.dec = (*Buffer).dec_slice_byte
if p.proto3 {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
} else {
p.enc = (*Buffer).enc_slice_byte
p.size = size_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
} }
case reflect.Map: case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1 p.mtype = t1
p.mkeyprop = &Properties{} p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
} }
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil { if p.stype != nil {
if lockGetProp { if lockGetProp {
p.sprop = GetProperties(p.stype) p.sprop = GetProperties(p.stype)
@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
} }
var ( var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
) )
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag. // Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true) p.init(typ, name, tag, f, true)
@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
// "bytes,49,opt,def=hello!" // "bytes,49,opt,def=hello!"
p.Name = name p.Name = name
p.OrigName = name p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" { if tag == "" {
return return
} }
p.Parse(tag) p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp) p.setFieldProps(typ, f, lockGetProp)
} }
var ( var (
@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
propertiesMap[t] = prop propertiesMap[t] = prop
// build properties // build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
reflect.PtrTo(t).Implements(extendableProtoV1Type)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField()) prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField()) prop.order = make([]int, t.NumField())
@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_InternalExtensions" { // special case
p.enc = (*Buffer).enc_exts
p.dec = nil // not needed
p.size = size_exts
} else if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
} else if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") // special case oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" { if oneof != "" {
// Oneof fields don't use the traditional protobuf tag. // Oneof fields don't use the traditional protobuf tag.
@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
} }
print("\n") print("\n")
} }
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
} }
// Re-order prop.order. // Re-order prop.order.
@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
} }
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{} var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() _, _, _, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata. // Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties) prop.OneofTypes = make(map[string]*OneofProperties)
@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
return prop return prop
} }
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types. // A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum. // The generated code will register the generated maps by calling RegisterEnum.
@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 {
// A registry of all linked message types. // A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message"). // The string is a fully-qualified proto name ("pkg.Message").
var ( var (
protoTypes = make(map[string]reflect.Type) protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
revProtoTypes = make(map[reflect.Type]string) protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
revProtoTypes = make(map[reflect.Type]string)
) )
// RegisterType is called from generated code and maps from the fully qualified // RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer. // proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) { func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok { if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic. // TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name) log.Printf("proto: duplicate proto type registered: %s", name)
return return
} }
t := reflect.TypeOf(x) t := reflect.TypeOf(x)
protoTypes[name] = t if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
// Generated code always calls RegisterType with nil x.
// This check is just for extra safety.
protoTypedNils[name] = x
} else {
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
}
revProtoTypes[t] = name
}
// RegisterMapType is called from generated code and maps from the fully qualified
// proto name to the native map type of the proto map definition.
func RegisterMapType(x interface{}, name string) {
if reflect.TypeOf(x).Kind() != reflect.Map {
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
}
if _, ok := protoMapTypes[name]; ok {
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoMapTypes[name] = t
revProtoTypes[t] = name revProtoTypes[t] = name
} }
@ -855,7 +520,14 @@ func MessageName(x Message) string {
} }
// MessageType returns the message type (pointer to struct) for a named message. // MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] } // The type is not guaranteed to implement proto.Message if the name refers to a
// map entry.
func MessageType(name string) reflect.Type {
if t, ok := protoTypedNils[name]; ok {
return reflect.TypeOf(t)
}
return protoMapTypes[name]
}
// A registry of all linked proto files. // A registry of all linked proto files.
var ( var (

2681
vendor/github.com/golang/protobuf/proto/table_marshal.go generated vendored Normal file

File diff suppressed because it is too large Load diff

654
vendor/github.com/golang/protobuf/proto/table_merge.go generated vendored Normal file
View file

@ -0,0 +1,654 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// Merge merges the src message into dst.
// This assumes that dst and src of the same type and are non-nil.
func (a *InternalMessageInfo) Merge(dst, src Message) {
mi := atomicLoadMergeInfo(&a.merge)
if mi == nil {
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
atomicStoreMergeInfo(&a.merge, mi)
}
mi.merge(toPointer(&dst), toPointer(&src))
}
type mergeInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []mergeFieldInfo
unrecognized field // Offset of XXX_unrecognized
}
type mergeFieldInfo struct {
field field // Offset of field, guaranteed to be valid
// isPointer reports whether the value in the field is a pointer.
// This is true for the following situations:
// * Pointer to struct
// * Pointer to basic type (proto2 only)
// * Slice (first value in slice header is a pointer)
// * String (first value in string header is a pointer)
isPointer bool
// basicWidth reports the width of the field assuming that it is directly
// embedded in the struct (as is the case for basic types in proto3).
// The possible values are:
// 0: invalid
// 1: bool
// 4: int32, uint32, float32
// 8: int64, uint64, float64
basicWidth int
// Where dst and src are pointers to the types being merged.
merge func(dst, src pointer)
}
var (
mergeInfoMap = map[reflect.Type]*mergeInfo{}
mergeInfoLock sync.Mutex
)
func getMergeInfo(t reflect.Type) *mergeInfo {
mergeInfoLock.Lock()
defer mergeInfoLock.Unlock()
mi := mergeInfoMap[t]
if mi == nil {
mi = &mergeInfo{typ: t}
mergeInfoMap[t] = mi
}
return mi
}
// merge merges src into dst assuming they are both of type *mi.typ.
func (mi *mergeInfo) merge(dst, src pointer) {
if dst.isNil() {
panic("proto: nil destination")
}
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&mi.initialized) == 0 {
mi.computeMergeInfo()
}
for _, fi := range mi.fields {
sfp := src.offset(fi.field)
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
continue
}
if fi.basicWidth > 0 {
switch {
case fi.basicWidth == 1 && !*sfp.toBool():
continue
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
continue
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
continue
}
}
}
dfp := dst.offset(fi.field)
fi.merge(dfp, sfp)
}
// TODO: Make this faster?
out := dst.asPointerTo(mi.typ).Elem()
in := src.asPointerTo(mi.typ).Elem()
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
if mi.unrecognized.IsValid() {
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
}
}
}
func (mi *mergeInfo) computeMergeInfo() {
mi.lock.Lock()
defer mi.lock.Unlock()
if mi.initialized != 0 {
return
}
t := mi.typ
n := t.NumField()
props := GetProperties(t)
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mfi := mergeFieldInfo{field: toField(&f)}
tf := f.Type
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
switch tf.Kind() {
case reflect.Ptr, reflect.Slice, reflect.String:
// As a special case, we assume slices and strings are pointers
// since we know that the first field in the SliceSlice or
// StringHeader is a data pointer.
mfi.isPointer = true
case reflect.Bool:
mfi.basicWidth = 1
case reflect.Int32, reflect.Uint32, reflect.Float32:
mfi.basicWidth = 4
case reflect.Int64, reflect.Uint64, reflect.Float64:
mfi.basicWidth = 8
}
}
// Unwrap tf to get at its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic("both pointer and slice for basic type in " + tf.Name())
}
switch tf.Kind() {
case reflect.Int32:
switch {
case isSlice: // E.g., []int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
/*
sfsp := src.toInt32Slice()
if *sfsp != nil {
dfsp := dst.toInt32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
*/
sfs := src.getInt32Slice()
if sfs != nil {
dfs := dst.getInt32Slice()
dfs = append(dfs, sfs...)
if dfs == nil {
dfs = []int32{}
}
dst.setInt32Slice(dfs)
}
}
case isPointer: // E.g., *int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
/*
sfpp := src.toInt32Ptr()
if *sfpp != nil {
dfpp := dst.toInt32Ptr()
if *dfpp == nil {
*dfpp = Int32(**sfpp)
} else {
**dfpp = **sfpp
}
}
*/
sfp := src.getInt32Ptr()
if sfp != nil {
dfp := dst.getInt32Ptr()
if dfp == nil {
dst.setInt32Ptr(*sfp)
} else {
*dfp = *sfp
}
}
}
default: // E.g., int32
mfi.merge = func(dst, src pointer) {
if v := *src.toInt32(); v != 0 {
*dst.toInt32() = v
}
}
}
case reflect.Int64:
switch {
case isSlice: // E.g., []int64
mfi.merge = func(dst, src pointer) {
sfsp := src.toInt64Slice()
if *sfsp != nil {
dfsp := dst.toInt64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
}
case isPointer: // E.g., *int64
mfi.merge = func(dst, src pointer) {
sfpp := src.toInt64Ptr()
if *sfpp != nil {
dfpp := dst.toInt64Ptr()
if *dfpp == nil {
*dfpp = Int64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., int64
mfi.merge = func(dst, src pointer) {
if v := *src.toInt64(); v != 0 {
*dst.toInt64() = v
}
}
}
case reflect.Uint32:
switch {
case isSlice: // E.g., []uint32
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint32Slice()
if *sfsp != nil {
dfsp := dst.toUint32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint32{}
}
}
}
case isPointer: // E.g., *uint32
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint32Ptr()
if *sfpp != nil {
dfpp := dst.toUint32Ptr()
if *dfpp == nil {
*dfpp = Uint32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint32
mfi.merge = func(dst, src pointer) {
if v := *src.toUint32(); v != 0 {
*dst.toUint32() = v
}
}
}
case reflect.Uint64:
switch {
case isSlice: // E.g., []uint64
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint64Slice()
if *sfsp != nil {
dfsp := dst.toUint64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint64{}
}
}
}
case isPointer: // E.g., *uint64
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint64Ptr()
if *sfpp != nil {
dfpp := dst.toUint64Ptr()
if *dfpp == nil {
*dfpp = Uint64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint64
mfi.merge = func(dst, src pointer) {
if v := *src.toUint64(); v != 0 {
*dst.toUint64() = v
}
}
}
case reflect.Float32:
switch {
case isSlice: // E.g., []float32
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat32Slice()
if *sfsp != nil {
dfsp := dst.toFloat32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float32{}
}
}
}
case isPointer: // E.g., *float32
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat32Ptr()
if *sfpp != nil {
dfpp := dst.toFloat32Ptr()
if *dfpp == nil {
*dfpp = Float32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float32
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat32(); v != 0 {
*dst.toFloat32() = v
}
}
}
case reflect.Float64:
switch {
case isSlice: // E.g., []float64
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat64Slice()
if *sfsp != nil {
dfsp := dst.toFloat64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float64{}
}
}
}
case isPointer: // E.g., *float64
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat64Ptr()
if *sfpp != nil {
dfpp := dst.toFloat64Ptr()
if *dfpp == nil {
*dfpp = Float64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float64
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat64(); v != 0 {
*dst.toFloat64() = v
}
}
}
case reflect.Bool:
switch {
case isSlice: // E.g., []bool
mfi.merge = func(dst, src pointer) {
sfsp := src.toBoolSlice()
if *sfsp != nil {
dfsp := dst.toBoolSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []bool{}
}
}
}
case isPointer: // E.g., *bool
mfi.merge = func(dst, src pointer) {
sfpp := src.toBoolPtr()
if *sfpp != nil {
dfpp := dst.toBoolPtr()
if *dfpp == nil {
*dfpp = Bool(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., bool
mfi.merge = func(dst, src pointer) {
if v := *src.toBool(); v {
*dst.toBool() = v
}
}
}
case reflect.String:
switch {
case isSlice: // E.g., []string
mfi.merge = func(dst, src pointer) {
sfsp := src.toStringSlice()
if *sfsp != nil {
dfsp := dst.toStringSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []string{}
}
}
}
case isPointer: // E.g., *string
mfi.merge = func(dst, src pointer) {
sfpp := src.toStringPtr()
if *sfpp != nil {
dfpp := dst.toStringPtr()
if *dfpp == nil {
*dfpp = String(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., string
mfi.merge = func(dst, src pointer) {
if v := *src.toString(); v != "" {
*dst.toString() = v
}
}
}
case reflect.Slice:
isProto3 := props.Prop[i].proto3
switch {
case isPointer:
panic("bad pointer in byte slice case in " + tf.Name())
case tf.Elem().Kind() != reflect.Uint8:
panic("bad element kind in byte slice case in " + tf.Name())
case isSlice: // E.g., [][]byte
mfi.merge = func(dst, src pointer) {
sbsp := src.toBytesSlice()
if *sbsp != nil {
dbsp := dst.toBytesSlice()
for _, sb := range *sbsp {
if sb == nil {
*dbsp = append(*dbsp, nil)
} else {
*dbsp = append(*dbsp, append([]byte{}, sb...))
}
}
if *dbsp == nil {
*dbsp = [][]byte{}
}
}
}
default: // E.g., []byte
mfi.merge = func(dst, src pointer) {
sbp := src.toBytes()
if *sbp != nil {
dbp := dst.toBytes()
if !isProto3 || len(*sbp) > 0 {
*dbp = append([]byte{}, *sbp...)
}
}
}
}
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("message field %s without pointer", tf))
case isSlice: // E.g., []*pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sps := src.getPointerSlice()
if sps != nil {
dps := dst.getPointerSlice()
for _, sp := range sps {
var dp pointer
if !sp.isNil() {
dp = valToPointer(reflect.New(tf))
mi.merge(dp, sp)
}
dps = append(dps, dp)
}
if dps == nil {
dps = []pointer{}
}
dst.setPointerSlice(dps)
}
}
default: // E.g., *pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sp := src.getPointer()
if !sp.isNil() {
dp := dst.getPointer()
if dp.isNil() {
dp = valToPointer(reflect.New(tf))
dst.setPointer(dp)
}
mi.merge(dp, sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic("bad pointer or slice in map case in " + tf.Name())
default: // E.g., map[K]V
mfi.merge = func(dst, src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
dm := dst.asPointerTo(tf).Elem()
if dm.IsNil() {
dm.Set(reflect.MakeMap(tf))
}
switch tf.Elem().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(Clone(val.Interface().(Message)))
dm.SetMapIndex(key, val)
}
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
dm.SetMapIndex(key, val)
}
default: // Basic type (e.g., string)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
dm.SetMapIndex(key, val)
}
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic("bad pointer or slice in interface case in " + tf.Name())
default: // E.g., interface{}
// TODO: Make this faster?
mfi.merge = func(dst, src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
du := dst.asPointerTo(tf).Elem()
typ := su.Elem().Type()
if du.IsNil() || du.Elem().Type() != typ {
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
}
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
dv := du.Elem().Elem().Field(0)
if dv.Kind() == reflect.Ptr && dv.IsNil() {
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
Merge(dv.Interface().(Message), sv.Interface().(Message))
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
default: // Basic type (e.g., string)
dv.Set(sv)
}
}
}
}
default:
panic(fmt.Sprintf("merger not found for type:%s", tf))
}
mi.fields = append(mi.fields, mfi)
}
mi.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
mi.unrecognized = toField(&f)
}
atomic.StoreInt32(&mi.initialized, 1)
}

File diff suppressed because it is too large Load diff

View file

@ -50,7 +50,6 @@ import (
var ( var (
newline = []byte("\n") newline = []byte("\n")
spaces = []byte(" ") spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n") endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'} backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'} backslashR = []byte{'\\', 'r'}
@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
return nil return nil
} }
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func requiresQuotes(u string) bool { func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u { for _, ch := range u {
@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
props := sprops.Prop[i] props := sprops.Prop[i]
name := st.Field(i).Name name := st.Field(i).Name
if name == "XXX_NoUnkeyedLiteral" {
continue
}
if strings.HasPrefix(name, "XXX_") { if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields: // There are two XXX_ fields:
// XXX_unrecognized []byte // XXX_unrecognized []byte
@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err return err
} }
} }
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine. // Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil { if err := tm.writeAny(w, fv, props); err != nil {
@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field). // Extensions (the XXX_extensions field).
pv := sv.Addr() pv := sv.Addr()
if _, ok := extendable(pv.Interface()); ok { if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil { if err := tm.writeExtensions(w, pv); err != nil {
return err return err
} }
@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil return nil
} }
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field. // writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v) v = reflect.Indirect(v)
@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
} }
} }
w.indent() w.indent()
if v.CanAddr() {
// Calling v.Interface on a struct causes the reflect package to
// copy the entire struct. This is racy with the new Marshaler
// since we atomically update the XXX_sizecache.
//
// Thus, we retrieve a pointer to the struct if possible to avoid
// a race since v.Interface on the pointer doesn't copy the struct.
//
// If v is not addressable, then we are not worried about a race
// since it implies that the binary Marshaler cannot possibly be
// mutating this value.
v = v.Addr()
}
if etm, ok := v.Interface().(encoding.TextMarshaler); ok { if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText() text, err := etm.MarshalText()
if err != nil { if err != nil {
@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if _, err = w.Write(text); err != nil { if _, err = w.Write(text); err != nil {
return err return err
} }
} else if err := tm.writeStruct(w, v); err != nil { } else {
return err if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if err := tm.writeStruct(w, v); err != nil {
return err
}
} }
w.unindent() w.unindent()
if err := w.WriteByte(ket); err != nil { if err := w.WriteByte(ket); err != nil {

View file

@ -206,7 +206,6 @@ func (p *textParser) advance() {
var ( var (
errBadUTF8 = errors.New("proto: bad UTF-8") errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
) )
func unquoteC(s string, quote rune) (string, error) { func unquoteC(s string, quote rune) (string, error) {
@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
return "?", s, nil // trigraph workaround return "?", s, nil // trigraph workaround
case '\'', '"', '\\': case '\'', '"', '\\':
return string(r), s, nil return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 { if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
} }
base := 8 ss := string(r) + s[:2]
ss := s[:2]
s = s[2:] s = s[2:]
if r == 'x' || r == 'X' { i, err := strconv.ParseUint(ss, 8, 8)
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil { if err != nil {
return "", "", err return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
} }
return string([]byte{byte(i)}), s, nil return string([]byte{byte(i)}), s, nil
case 'u', 'U': case 'x', 'X', 'u', 'U':
n := 4 var n int
if r == 'U' { switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8 n = 8
} }
if len(s) < n { if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
} }
ss := s[:n]
s = s[n:] s = s[n:]
return string(bs), s, nil i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
} }
return "", "", fmt.Errorf(`unknown escape \%c`, r) return "", "", fmt.Errorf(`unknown escape \%c`, r)
} }
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next(). // Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op. // It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true } func (p *textParser) back() { p.backed = true }
@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
if tok.err != nil { if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
} }
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
} }
return strings.Join(parts, ""), nil return strings.Join(parts, ""), nil
} }
@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
// UnmarshalText returns *RequiredNotSetError. // UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error { func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok { if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s)) return um.UnmarshalText([]byte(s))
return err
} }
pb.Reset() pb.Reset()
v := reflect.ValueOf(pb) v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { return newTextParser(s).readStruct(v.Elem(), "")
return pe
}
return nil
} }

View file

@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/"
// function. AnyMessageName is provided for less common use cases like filtering a // function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names. // sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) { func AnyMessageName(any *any.Any) (string, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/") slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 { if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)

View file

@ -1,17 +1,7 @@
// Code generated by protoc-gen-go. // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto // source: google/protobuf/any.proto
// DO NOT EDIT!
/* package any // import "github.com/golang/protobuf/ptypes/any"
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto" import proto "github.com/golang/protobuf/proto"
import fmt "fmt" import fmt "fmt"
@ -63,6 +53,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo) // any.Unpack(foo)
// ... // ...
// //
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use // The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack // 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/' // methods only use the fully qualified type name after the last '/'
@ -123,33 +123,69 @@ type Any struct {
// //
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type. // Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Any) Reset() { *m = Any{} } func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) } func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {} func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Any) Descriptor() ([]byte, []int) {
func (*Any) XXX_WellKnownType() string { return "Any" } return fileDescriptor_any_744b9ca530f228db, []int{0}
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (dst *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(dst, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
}
func (m *Any) XXX_DiscardUnknown() {
xxx_messageInfo_Any.DiscardUnknown(m)
}
var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() { func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any") proto.RegisterType((*Any)(nil), "google.protobuf.Any")
} }
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
var fileDescriptor0 = []byte{ var fileDescriptor_any_744b9ca530f228db = []byte{
// 187 bytes of a gzipped FileDescriptorProto // 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
} }

View file

@ -1,17 +1,7 @@
// Code generated by protoc-gen-go. // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto // source: google/protobuf/duration.proto
// DO NOT EDIT!
/* package duration // import "github.com/golang/protobuf/ptypes/duration"
Package duration is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/duration/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto" import proto "github.com/golang/protobuf/proto"
import fmt "fmt" import fmt "fmt"
@ -35,6 +25,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// two Timestamp values is a Duration and it can be added or subtracted // two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years. // from a Timestamp. Range is approximately +-10,000 years.
// //
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code. // Example 1: Compute Duration from two Timestamps in pseudo code.
// //
// Timestamp start = ...; // Timestamp start = ...;
@ -69,10 +61,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// end.nanos -= 1000000000; // end.nanos -= 1000000000;
// } // }
// //
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
// //
type Duration struct { type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000 // Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. // to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span // Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0 // of time. Durations less than one second are represented with a 0
@ -80,35 +89,71 @@ type Duration struct {
// of one second or more, a non-zero value for the `nanos` field must be // of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999 // of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive. // to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) } func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {} func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Duration) Descriptor() ([]byte, []int) {
func (*Duration) XXX_WellKnownType() string { return "Duration" } return fileDescriptor_duration_e7d612259e3f0613, []int{0}
}
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
func (dst *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(dst, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
}
func (m *Duration) XXX_DiscardUnknown() {
xxx_messageInfo_Duration.DiscardUnknown(m)
}
var xxx_messageInfo_Duration proto.InternalMessageInfo
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() { func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
} }
func init() { func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
} }
var fileDescriptor0 = []byte{ var fileDescriptor_duration_e7d612259e3f0613 = []byte{
// 189 bytes of a gzipped FileDescriptorProto // 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
} }

View file

@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
return t, validateTimestamp(ts) return t, validateTimestamp(ts)
} }
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid. // It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) { func TimestampProto(t time.Time) (*tspb.Timestamp, error) {

View file

@ -1,17 +1,7 @@
// Code generated by protoc-gen-go. // Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto // source: google/protobuf/timestamp.proto
// DO NOT EDIT!
/* package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
Package timestamp is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto" import proto "github.com/golang/protobuf/proto"
import fmt "fmt" import fmt "fmt"
@ -40,6 +30,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// and from RFC 3339 date strings. // and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
// //
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`. // Example 1: Compute Timestamp from POSIX `time()`.
// //
// Timestamp timestamp; // Timestamp timestamp;
@ -77,51 +69,107 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// //
// Example 5: Compute Timestamp from current time in Python. // Example 5: Compute Timestamp from current time in Python.
// //
// now = time.time() // timestamp = Timestamp()
// seconds = int(now) // timestamp.GetCurrentTime()
// nanos = int((now - seconds) * 10**9) //
// timestamp = Timestamp(seconds=seconds, nanos=nanos) // # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
// //
// //
type Timestamp struct { type Timestamp struct {
// Represents seconds of UTC time since Unix epoch // Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive. // 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative // Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values // second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999 // that count forward in time. Must be from 0 to 999,999,999
// inclusive. // inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {} func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Timestamp) Descriptor() ([]byte, []int) {
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
}
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
func (dst *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(dst, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
}
func (m *Timestamp) XXX_DiscardUnknown() {
xxx_messageInfo_Timestamp.DiscardUnknown(m)
}
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() { func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
} }
func init() { func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
} }
var fileDescriptor0 = []byte{ var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
// 194 bytes of a gzipped FileDescriptorProto // 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
0x00, 0x00,
} }

View file

@ -1,27 +0,0 @@
Copyright 2016, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,149 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"math/rand"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// CallOption is an option used by Invoke to control behaviors of RPC calls.
// CallOption works by modifying relevant fields of CallSettings.
type CallOption interface {
// Resolve applies the option by modifying cs.
Resolve(cs *CallSettings)
}
// Retryer is used by Invoke to determine retry behavior.
type Retryer interface {
// Retry reports whether a request should be retriedand how long to pause before retrying
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
Retry(err error) (pause time.Duration, shouldRetry bool)
}
type retryerOption func() Retryer
func (o retryerOption) Resolve(s *CallSettings) {
s.Retry = o
}
// WithRetry sets CallSettings.Retry to fn.
func WithRetry(fn func() Retryer) CallOption {
return retryerOption(fn)
}
// OnCodes returns a Retryer that retries if and only if
// the previous attempt returns a GRPC error whose error code is stored in cc.
// Pause times between retries are specified by bo.
//
// bo is only used for its parameters; each Retryer has its own copy.
func OnCodes(cc []codes.Code, bo Backoff) Retryer {
return &boRetryer{
backoff: bo,
codes: append([]codes.Code(nil), cc...),
}
}
type boRetryer struct {
backoff Backoff
codes []codes.Code
}
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
c := grpc.Code(err)
for _, rc := range r.codes {
if c == rc {
return r.backoff.Pause(), true
}
}
return 0, false
}
// Backoff implements exponential backoff.
// The wait time between retries is a random value between 0 and the "retry envelope".
// The envelope starts at Initial and increases by the factor of Multiplier every retry,
// but is capped at Max.
type Backoff struct {
// Initial is the initial value of the retry envelope, defaults to 1 second.
Initial time.Duration
// Max is the maximum value of the retry envelope, defaults to 30 seconds.
Max time.Duration
// Multiplier is the factor by which the retry envelope increases.
// It should be greater than 1 and defaults to 2.
Multiplier float64
// cur is the current retry envelope
cur time.Duration
}
func (bo *Backoff) Pause() time.Duration {
if bo.Initial == 0 {
bo.Initial = time.Second
}
if bo.cur == 0 {
bo.cur = bo.Initial
}
if bo.Max == 0 {
bo.Max = 30 * time.Second
}
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
d := time.Duration(rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {
bo.cur = bo.Max
}
return d
}
type grpcOpt []grpc.CallOption
func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
}
type CallSettings struct {
// Retry returns a Retryer to be used to control retry logic of a method call.
// If Retry is nil or the returned Retryer is nil, the call will not be retried.
Retry func() Retryer
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
}

View file

@ -1,40 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package gax contains a set of modules which aid the development of APIs
// for clients and servers based on gRPC and Google API conventions.
//
// Application code will rarely need to use this library directly.
// However, code generated automatically from API definition files can use it
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
//
// This project is currently experimental and not supported.
package gax
const Version = "0.1.0"

View file

@ -1,24 +0,0 @@
package gax
import "bytes"
// XGoogHeader is for use by the Google Cloud Libraries only.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
func XGoogHeader(keyval ...string) string {
if len(keyval) == 0 {
return ""
}
if len(keyval)%2 != 0 {
panic("gax.Header: odd argument count")
}
var buf bytes.Buffer
for i := 0; i < len(keyval); i += 2 {
buf.WriteByte(' ')
buf.WriteString(keyval[i])
buf.WriteByte('/')
buf.WriteString(keyval[i+1])
}
return buf.String()[1:]
}

View file

@ -1,90 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"time"
"golang.org/x/net/context"
)
// A user defined call stub.
type APICall func(context.Context, CallSettings) error
// Invoke calls the given APICall,
// performing retries as specified by opts, if any.
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
var settings CallSettings
for _, opt := range opts {
opt.Resolve(&settings)
}
return invoke(ctx, call, settings, Sleep)
}
// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
// If interrupted, Sleep returns ctx.Err().
func Sleep(ctx context.Context, d time.Duration) error {
t := time.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
return ctx.Err()
case <-t.C:
return nil
}
}
type sleeper func(ctx context.Context, d time.Duration) error
// invoke implements Invoke, taking an additional sleeper argument for testing.
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
var retryer Retryer
for {
err := call(ctx, settings)
if err == nil {
return nil
}
if settings.Retry == nil {
return err
}
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r
} else {
return err
}
}
if d, ok := retryer.Retry(err); !ok {
return err
} else if err = sp(ctx, d); err != nil {
return err
}
}
}

View file

@ -1,176 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"errors"
"fmt"
"strings"
)
type matcher interface {
match([]string) (int, error)
String() string
}
type segment struct {
matcher
name string
}
type labelMatcher string
func (ls labelMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, fmt.Errorf("expected %s but no more segments found", ls)
}
if segments[0] != string(ls) {
return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
}
return 1, nil
}
func (ls labelMatcher) String() string {
return string(ls)
}
type wildcardMatcher int
func (wm wildcardMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, errors.New("no more segments found")
}
return 1, nil
}
func (wm wildcardMatcher) String() string {
return "*"
}
type pathWildcardMatcher int
func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
length := len(segments) - int(pwm)
if length <= 0 {
return 0, errors.New("not sufficient segments are supplied for path wildcard")
}
return length, nil
}
func (pwm pathWildcardMatcher) String() string {
return "**"
}
type ParseError struct {
Pos int
Template string
Message string
}
func (pe ParseError) Error() string {
return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
}
// PathTemplate manages the template to build and match with paths used
// by API services. It holds a template and variable names in it, and
// it can extract matched patterns from a path string or build a path
// string from a binding.
//
// See http.proto in github.com/googleapis/googleapis/ for the details of
// the template syntax.
type PathTemplate struct {
segments []segment
}
// NewPathTemplate parses a path template, and returns a PathTemplate
// instance if successful.
func NewPathTemplate(template string) (*PathTemplate, error) {
return parsePathTemplate(template)
}
// MustCompilePathTemplate is like NewPathTemplate but panics if the
// expression cannot be parsed. It simplifies safe initialization of
// global variables holding compiled regular expressions.
func MustCompilePathTemplate(template string) *PathTemplate {
pt, err := NewPathTemplate(template)
if err != nil {
panic(err)
}
return pt
}
// Match attempts to match the given path with the template, and returns
// the mapping of the variable name to the matched pattern string.
func (pt *PathTemplate) Match(path string) (map[string]string, error) {
paths := strings.Split(path, "/")
values := map[string]string{}
for _, segment := range pt.segments {
length, err := segment.match(paths)
if err != nil {
return nil, err
}
if segment.name != "" {
value := strings.Join(paths[:length], "/")
if oldValue, ok := values[segment.name]; ok {
values[segment.name] = oldValue + "/" + value
} else {
values[segment.name] = value
}
}
paths = paths[length:]
}
if len(paths) != 0 {
return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
}
return values, nil
}
// Render creates a path string from its template and the binding from
// the variable name to the value.
func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
result := make([]string, 0, len(pt.segments))
var lastVariableName string
for _, segment := range pt.segments {
name := segment.name
if lastVariableName != "" && name == lastVariableName {
continue
}
lastVariableName = name
if name == "" {
result = append(result, segment.String())
} else if value, ok := binding[name]; ok {
result = append(result, value)
} else {
return "", fmt.Errorf("%s is not found", name)
}
}
built := strings.Join(result, "/")
return built, nil
}

View file

@ -1,227 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"fmt"
"io"
"strings"
)
// This parser follows the syntax of path templates, from
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
// The differences are that there is no custom verb, we allow the initial slash
// to be absent, and that we are not strict as
// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
// literals.
type pathTemplateParser struct {
r *strings.Reader
runeCount int // the number of the current rune in the original string
nextVar int // the number to use for the next unnamed variable
seenName map[string]bool // names we've seen already
seenPathWildcard bool // have we seen "**" already?
}
func parsePathTemplate(template string) (pt *PathTemplate, err error) {
p := &pathTemplateParser{
r: strings.NewReader(template),
seenName: map[string]bool{},
}
// Handle panics with strings like errors.
// See pathTemplateParser.error, below.
defer func() {
if x := recover(); x != nil {
errmsg, ok := x.(errString)
if !ok {
panic(x)
}
pt = nil
err = ParseError{p.runeCount, template, string(errmsg)}
}
}()
segs := p.template()
// If there is a path wildcard, set its length. We can't do this
// until we know how many segments we've got all together.
for i, seg := range segs {
if _, ok := seg.matcher.(pathWildcardMatcher); ok {
segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
break
}
}
return &PathTemplate{segments: segs}, nil
}
// Used to indicate errors "thrown" by this parser. We don't use string because
// many parts of the standard library panic with strings.
type errString string
// Terminates parsing immediately with an error.
func (p *pathTemplateParser) error(msg string) {
panic(errString(msg))
}
// Template = [ "/" ] Segments
func (p *pathTemplateParser) template() []segment {
var segs []segment
if p.consume('/') {
// Initial '/' needs an initial empty matcher.
segs = append(segs, segment{matcher: labelMatcher("")})
}
return append(segs, p.segments("")...)
}
// Segments = Segment { "/" Segment }
func (p *pathTemplateParser) segments(name string) []segment {
var segs []segment
for {
subsegs := p.segment(name)
segs = append(segs, subsegs...)
if !p.consume('/') {
break
}
}
return segs
}
// Segment = "*" | "**" | LITERAL | Variable
func (p *pathTemplateParser) segment(name string) []segment {
if p.consume('*') {
if name == "" {
name = fmt.Sprintf("$%d", p.nextVar)
p.nextVar++
}
if p.consume('*') {
if p.seenPathWildcard {
p.error("multiple '**' disallowed")
}
p.seenPathWildcard = true
// We'll change 0 to the right number at the end.
return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
}
return []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if p.consume('{') {
if name != "" {
p.error("recursive named bindings are not allowed")
}
return p.variable()
}
return []segment{{name: name, matcher: labelMatcher(p.literal())}}
}
// Variable = "{" FieldPath [ "=" Segments ] "}"
// "{" is already consumed.
func (p *pathTemplateParser) variable() []segment {
// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
name := p.literal()
if p.seenName[name] {
p.error(name + " appears multiple times")
}
p.seenName[name] = true
var segs []segment
if p.consume('=') {
segs = p.segments(name)
} else {
// "{var}" is equivalent to "{var=*}"
segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if !p.consume('}') {
p.error("expected '}'")
}
return segs
}
// A literal is any sequence of characters other than a few special ones.
// The list of stop characters is not quite the same as in the template RFC.
func (p *pathTemplateParser) literal() string {
lit := p.consumeUntil("/*}{=")
if lit == "" {
p.error("empty literal")
}
return lit
}
// Read runes until EOF or one of the runes in stopRunes is encountered.
// If the latter, unread the stop rune. Return the accumulated runes as a string.
func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
var runes []rune
for {
r, ok := p.readRune()
if !ok {
break
}
if strings.IndexRune(stopRunes, r) >= 0 {
p.unreadRune()
break
}
runes = append(runes, r)
}
return string(runes)
}
// If the next rune is r, consume it and return true.
// Otherwise, leave the input unchanged and return false.
func (p *pathTemplateParser) consume(r rune) bool {
rr, ok := p.readRune()
if !ok {
return false
}
if r == rr {
return true
}
p.unreadRune()
return false
}
// Read the next rune from the input. Return it.
// The second return value is false at EOF.
func (p *pathTemplateParser) readRune() (rune, bool) {
r, _, err := p.r.ReadRune()
if err == io.EOF {
return r, false
}
if err != nil {
p.error(err.Error())
}
p.runeCount++
return r, true
}
// Put the last rune that was read back on the input.
func (p *pathTemplateParser) unreadRune() {
if err := p.r.UnreadRune(); err != nil {
p.error(err.Error())
}
p.runeCount--
}

View file

@ -1,9 +1,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
/* /*
High Performance, Feature-Rich Idiomatic Go codec/encoding library for Package codec provides a
binc, msgpack, cbor, json. High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library
for binc, msgpack, cbor, json.
Supported Serialization formats are: Supported Serialization formats are:
@ -11,21 +12,17 @@ Supported Serialization formats are:
- binc: http://github.com/ugorji/binc - binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159 - json: http://json.org http://tools.ietf.org/html/rfc7159
- simple: - simple:
To install: To install:
go get github.com/ugorji/go/codec go get github.com/ugorji/go/codec
This package understands the 'unsafe' tag, to allow using unsafe semantics: This package will carefully use 'unsafe' for performance reasons in specific places.
You can build without unsafe use by passing the safe or appengine tag
- When decoding into a struct, you need to read the field name as a string i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
so you can find the struct field it is mapped to. go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
To install using unsafe, pass the 'unsafe' tag:
go get -tags=unsafe github.com/ugorji/go/codec
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
@ -35,12 +32,16 @@ the standard library (ie json, xml, gob, etc).
Rich Feature Set includes: Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API - Simple but extremely powerful and feature-rich API
- Support for go1.4 and above, while selectively using newer APIs for later releases
- Excellent code coverage ( > 90% )
- Very High Performance. - Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions: - Careful selected use of 'unsafe' for targeted performance gains.
Package coerces types where appropriate 100% mode exists where 'unsafe' is not used at all.
e.g. decode an int in the stream into a float, etc. - Lock-free (sans mutex) concurrency for scaling to 100's of cores
- Corner Cases: - Coerce types where appropriate
e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags - Standard field renaming via tags
- Support for omitting empty fields during an encoding - Support for omitting empty fields during an encoding
@ -48,15 +49,21 @@ Rich Feature Set includes:
(struct, slice, map, primitives, pointers, interface{}, etc) (struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types - Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Support IsZero() bool to determine if a value is a zero value.
Analogous to time.Time.IsZero() bool.
- Decoding without a schema (into a interface{}). - Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{} when decoding an encoded list or map into a nil interface{}
- Mapping a non-interface type to an interface, so we can decode appropriately
into any interface type with a correctly configured non-interface value.
- Encode a struct as an array, and decode struct from an array in the data stream - Encode a struct as an array, and decode struct from an array in the data stream
- Option to encode struct keys as numbers (instead of strings)
(to support structured streams with fields encoded as numeric codes)
- Comprehensive support for anonymous fields - Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices - Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance. - Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming - Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor) (for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic. This mostly applies to maps, where iteration order is non-deterministic.
@ -68,12 +75,12 @@ Rich Feature Set includes:
- Encode/Decode from/to chan types (for iterative streaming support) - Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol. - Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosynchracies of codecs e.g. - Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved - For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support - For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at: msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
Extension Support Extension Support
Users can register a function to handle the encoding or decoding of Users can register a function to handle the encoding or decoding of
@ -92,6 +99,27 @@ encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can would be encoded as a string. However, with extension support, you can
encode any of these however you like. encode any of these however you like.
Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symmetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
RPC RPC
RPC Client and Server Codecs are implemented, so the codecs can be used RPC Client and Server Codecs are implemented, so the codecs can be used
@ -160,40 +188,77 @@ Sample usage model:
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec) client := rpc.NewClientWithCodec(rpcCodec)
Running Tests
To run tests, use the following:
go test
To run the full suite of tests, use the following:
go test -tags alltests -run Suite
You can run the tag 'safe' to run tests or build in safe mode. e.g.
go test -tags safe -run Json
go test -tags "alltests safe" -run Suite
Running Benchmarks
Please see http://github.com/ugorji/go-codec-bench .
Caveats
Struct fields matching the following are ignored during encoding and decoding
- struct tag value set to -
- func, complex numbers, unsafe pointers
- unexported and not embedded
- unexported and embedded and not struct kind
- unexported and embedded pointers (from go1.10)
Every other field in a struct will be encoded/decoded.
Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.
*/ */
package codec package codec
// Benefits of go-codec:
//
// - encoding/json always reads whole file into memory first.
// This makes it unsuitable for parsing very large files.
// - encoding/xml cannot parse into a map[string]interface{}
// I found this out on reading https://github.com/clbanning/mxj
// TODO: // TODO:
// - For Go 1.11, when mid-stack inlining is enabled,
// we should use committed functions for writeXXX and readXXX calls.
// This involves uncommenting the methods for decReaderSwitch and encWriterSwitch
// and using those (decReaderSwitch and encWriterSwitch) in all handles
// instead of encWriter and decReader.
// The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader
// will be inlined, giving a performance bump for that typical case.
// However, it will only be inlined if mid-stack inlining is enabled,
// as we call panic to raise errors, and panic currently prevents inlining.
// //
// - optimization for codecgen: // PUNTED:
// if len of entity is <= 3 words, then support a value receiver for encode. // - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer,
// - (En|De)coder should store an error when it occurs. // and use overlay methods on *BasicHandle to call through to extHandle after initializing
// Until reset, subsequent calls return that error that was stored. // the "xh *extHandle" to point to a real slice.
// This means that free panics must go away. //
// All errors must be raised through errorf method. // BEFORE EACH RELEASE:
// - Decoding using a chan is good, but incurs concurrency costs. // - Look through and fix padding for each type, to eliminate false sharing
// This is because there's no fast way to use a channel without it // - critical shared objects that are read many times
// having to switch goroutines constantly. // TypeInfos
// Callback pattern is still the best. Maybe cnsider supporting something like: // - pooled objects:
// type X struct { // decNaked, decNakedContainers, codecFner, typeInfoLoadArray,
// Name string // - small objects allocated independently, that we read/use much across threads:
// Ys []Y // codecFn, typeInfo
// Ys chan <- Y // - Objects allocated independently and used a lot
// Ys func(interface{}) -> call this interface for each entry in there. // Decoder, Encoder,
// } // xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple)
// - Consider adding a isZeroer interface { isZero() bool } // - In all above, arrange values modified together to be close to each other.
// It is used within isEmpty, for omitEmpty support. //
// - Consider making Handle used AS-IS within the encoding/decoding session. // For all of these, either ensure that they occupy full cache lines,
// This means that we don't cache Handle information within the (En|De)coder, // or ensure that the things just past the cache line boundary are hardly read/written
// except we really need it at Reset(...) // e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init
// - Consider adding math/big support //
// - Consider reducing the size of the generated functions: // Occupying full cache lines means they occupy 8*N words (where N is an integer).
// Maybe use one loop, and put the conditionals in the loop. // Check this out by running: ./run.sh -z
// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } // - look at those tagged ****, meaning they are not occupying full cache lines
// - look at those tagged <<<<, meaning they are larger than 32 words (something to watch)
// - Run "golint -min_confidence 0.81"

View file

@ -1,4 +1,4 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -55,39 +55,77 @@ const (
// others not currently supported // others not currently supported
) )
func bincdesc(vd, vs byte) string {
switch vd {
case bincVdSpecial:
switch vs {
case bincSpNil:
return "nil"
case bincSpFalse:
return "false"
case bincSpTrue:
return "true"
case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat:
return "float"
case bincSpZero:
return "uint"
case bincSpNegOne:
return "int"
default:
return "unknown"
}
case bincVdSmallInt, bincVdPosInt:
return "uint"
case bincVdNegInt:
return "int"
case bincVdFloat:
return "float"
case bincVdSymbol:
return "string"
case bincVdString:
return "string"
case bincVdByteArray:
return "bytes"
case bincVdTimestamp:
return "time"
case bincVdCustomExt:
return "ext"
case bincVdArray:
return "array"
case bincVdMap:
return "map"
default:
return "unknown"
}
}
type bincEncDriver struct { type bincEncDriver struct {
e *Encoder e *Encoder
h *BincHandle
w encWriter w encWriter
m map[string]uint16 // symbols m map[string]uint16 // symbols
b [scratchByteArrayLen]byte b [16]byte // scratch, used for encoding numbers - bigendian style
s uint16 // symbols sequencer s uint16 // symbols sequencer
encNoSeparator // c containerState
} encDriverTrackContainerWriter
noBuiltInTypes
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { // encNoSeparator
return rt == timeTypId
}
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
if rt == timeTypId {
var bs []byte
switch x := v.(type) {
case time.Time:
bs = encodeTime(x)
case *time.Time:
bs = encodeTime(*x)
default:
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
}
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
} }
func (e *bincEncDriver) EncodeNil() { func (e *bincEncDriver) EncodeNil() {
e.w.writen1(bincVdSpecial<<4 | bincSpNil) e.w.writen1(bincVdSpecial<<4 | bincSpNil)
} }
func (e *bincEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
} else {
bs := bincEncodeTime(t)
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
}
func (e *bincEncDriver) EncodeBool(b bool) { func (e *bincEncDriver) EncodeBool(b bool) {
if b { if b {
e.w.writen1(bincVdSpecial<<4 | bincSpTrue) e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
@ -195,15 +233,21 @@ func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
e.w.writen1(xtag) e.w.writen1(xtag)
} }
func (e *bincEncDriver) EncodeArrayStart(length int) { func (e *bincEncDriver) WriteArrayStart(length int) {
e.encLen(bincVdArray<<4, uint64(length)) e.encLen(bincVdArray<<4, uint64(length))
e.c = containerArrayStart
} }
func (e *bincEncDriver) EncodeMapStart(length int) { func (e *bincEncDriver) WriteMapStart(length int) {
e.encLen(bincVdMap<<4, uint64(length)) e.encLen(bincVdMap<<4, uint64(length))
e.c = containerMapStart
} }
func (e *bincEncDriver) EncodeString(c charEncoding, v string) { func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) {
e.EncodeSymbol(v)
return
}
l := uint64(len(v)) l := uint64(len(v))
e.encBytesLen(c, l) e.encBytesLen(c, l)
if l > 0 { if l > 0 {
@ -213,7 +257,7 @@ func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
func (e *bincEncDriver) EncodeSymbol(v string) { func (e *bincEncDriver) EncodeSymbol(v string) {
// if WriteSymbolsNoRefs { // if WriteSymbolsNoRefs {
// e.encodeString(c_UTF8, v) // e.encodeString(cUTF8, v)
// return // return
// } // }
@ -223,10 +267,10 @@ func (e *bincEncDriver) EncodeSymbol(v string) {
l := len(v) l := len(v)
if l == 0 { if l == 0 {
e.encBytesLen(c_UTF8, 0) e.encBytesLen(cUTF8, 0)
return return
} else if l == 1 { } else if l == 1 {
e.encBytesLen(c_UTF8, 1) e.encBytesLen(cUTF8, 1)
e.w.writen1(v[0]) e.w.writen1(v[0])
return return
} }
@ -276,6 +320,10 @@ func (e *bincEncDriver) EncodeSymbol(v string) {
} }
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if v == nil {
e.EncodeNil()
return
}
l := uint64(len(v)) l := uint64(len(v))
e.encBytesLen(c, l) e.encBytesLen(c, l)
if l > 0 { if l > 0 {
@ -285,7 +333,7 @@ func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
//TODO: support bincUnicodeOther (for now, just use string or bytearray) //TODO: support bincUnicodeOther (for now, just use string or bytearray)
if c == c_RAW { if c == cRAW {
e.encLen(bincVdByteArray<<4, length) e.encLen(bincVdByteArray<<4, length)
} else { } else {
e.encLen(bincVdString<<4, length) e.encLen(bincVdString<<4, length)
@ -324,6 +372,9 @@ type bincDecSymbol struct {
} }
type bincDecDriver struct { type bincDecDriver struct {
decDriverNoopContainerReader
noBuiltInTypes
d *Decoder d *Decoder
h *BincHandle h *BincHandle
r decReader r decReader
@ -332,13 +383,15 @@ type bincDecDriver struct {
bd byte bd byte
vd byte vd byte
vs byte vs byte
noStreamingCodec _ [3]byte // padding
decNoSeparator
b [scratchByteArrayLen]byte
// linear searching on this slice is ok, // linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream. // because we typically expect < 32 symbols in each stream.
s []bincDecSymbol s []bincDecSymbol
// noStreamingCodec
// decNoSeparator
b [8 * 8]byte // scratch
} }
func (d *bincDecDriver) readNextBd() { func (d *bincDecDriver) readNextBd() {
@ -348,7 +401,17 @@ func (d *bincDecDriver) readNextBd() {
d.bdRead = true d.bdRead = true
} }
func (d *bincDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *bincDecDriver) ContainerType() (vt valueType) { func (d *bincDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.vd == bincVdSpecial && d.vs == bincSpNil { if d.vd == bincVdSpecial && d.vs == bincSpNil {
return valueTypeNil return valueTypeNil
} else if d.vd == bincVdByteArray { } else if d.vd == bincVdByteArray {
@ -359,9 +422,10 @@ func (d *bincDecDriver) ContainerType() (vt valueType) {
return valueTypeArray return valueTypeArray
} else if d.vd == bincVdMap { } else if d.vd == bincVdMap {
return valueTypeMap return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -376,27 +440,24 @@ func (d *bincDecDriver) TryDecodeAsNil() bool {
return false return false
} }
func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool { func (d *bincDecDriver) DecodeTime() (t time.Time) {
return rt == timeTypId
}
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
if rt == timeTypId { if d.bd == bincVdSpecial<<4|bincSpNil {
if d.vd != bincVdTimestamp {
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
return
}
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
var vt *time.Time = v.(*time.Time)
*vt = tt
d.bdRead = false d.bdRead = false
return
} }
if d.vd != bincVdTimestamp {
d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return
}
t, err := bincDecodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
d.bdRead = false
return
} }
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
@ -405,7 +466,7 @@ func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
} else { } else {
l := d.r.readn1() l := d.r.readn1()
if l > 8 { if l > 8 {
d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l) d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l)
return return
} }
for i := l; i < 8; i++ { for i := l; i < 8; i++ {
@ -424,7 +485,7 @@ func (d *bincDecDriver) decFloat() (f float64) {
d.decFloatPre(d.vs, 8) d.decFloatPre(d.vs, 8)
f = math.Float64frombits(bigen.Uint64(d.b[0:8])) f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
} else { } else {
d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
return return
@ -481,49 +542,38 @@ func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
neg = true neg = true
ui = 1 ui = 1
} else { } else {
d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs) d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s",
d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
} else { } else {
d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
return return
} }
return return
} }
func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { func (d *bincDecDriver) DecodeInt64() (i int64) {
ui, neg := d.decCheckInteger() ui, neg := d.decCheckInteger()
i, overflow := chkOvf.SignedInt(ui) i = chkOvf.SignedIntV(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg { if neg {
i = -i i = -i
} }
if chkOvf.Int(i, bitsize) {
d.d.errorf("binc: overflow integer: %v", i)
return
}
d.bdRead = false d.bdRead = false
return return
} }
func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) { func (d *bincDecDriver) DecodeUint64() (ui uint64) {
ui, neg := d.decCheckInteger() ui, neg := d.decCheckInteger()
if neg { if neg {
d.d.errorf("Assigning negative signed value to unsigned type") d.d.errorf("assigning negative signed value to unsigned integer type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("binc: overflow integer: %v", ui)
return return
} }
d.bdRead = false d.bdRead = false
return return
} }
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { func (d *bincDecDriver) DecodeFloat64() (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -539,17 +589,14 @@ func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
} else if vs == bincSpNegInf { } else if vs == bincSpNegInf {
return math.Inf(-1) return math.Inf(-1)
} else { } else {
d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) d.d.errorf("float - invalid special value from descriptor %x-%x/%s",
d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
} else if vd == bincVdFloat { } else if vd == bincVdFloat {
f = d.decFloat() f = d.decFloat()
} else { } else {
f = float64(d.DecodeInt(64)) f = float64(d.DecodeInt64())
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("binc: float32 overflow: %v", f)
return
} }
d.bdRead = false d.bdRead = false
return return
@ -565,7 +612,7 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
} else if bd == (bincVdSpecial | bincSpTrue) { } else if bd == (bincVdSpecial | bincSpTrue) {
b = true b = true
} else { } else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
d.bdRead = false d.bdRead = false
@ -573,8 +620,11 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
} }
func (d *bincDecDriver) ReadMapStart() (length int) { func (d *bincDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
if d.vd != bincVdMap { if d.vd != bincVdMap {
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
length = d.decLen() length = d.decLen()
@ -583,8 +633,11 @@ func (d *bincDecDriver) ReadMapStart() (length int) {
} }
func (d *bincDecDriver) ReadArrayStart() (length int) { func (d *bincDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
if d.vd != bincVdArray { if d.vd != bincVdArray {
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
length = d.decLen() length = d.decLen()
@ -615,7 +668,8 @@ func (d *bincDecDriver) decLenNumber() (v uint64) {
return return
} }
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) { func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (
bs2 []byte, s string) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -623,7 +677,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
d.bdRead = false d.bdRead = false
return return
} }
var slen int = -1 var slen = -1
// var ok bool // var ok bool
switch d.vd { switch d.vd {
case bincVdString, bincVdByteArray: case bincVdString, bincVdByteArray:
@ -632,12 +686,12 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
if d.br { if d.br {
bs2 = d.r.readx(slen) bs2 = d.r.readx(slen)
} else if len(bs) == 0 { } else if len(bs) == 0 {
bs2 = decByteSlice(d.r, slen, d.b[:]) bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
} else { } else {
bs2 = decByteSlice(d.r, slen, bs) bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
} }
} else { } else {
bs2 = decByteSlice(d.r, slen, bs) bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
} }
if withString { if withString {
s = string(bs2) s = string(bs2)
@ -689,15 +743,14 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
// since using symbols, do not store any part of // since using symbols, do not store any part of
// the parameter bs in the map, as it might be a shared buffer. // the parameter bs in the map, as it might be a shared buffer.
// bs2 = decByteSlice(d.r, slen, bs) // bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, nil) bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
if withString { if withString {
s = string(bs2) s = string(bs2)
} }
d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
} }
default: default:
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
return return
} }
d.bdRead = false d.bdRead = false
@ -705,18 +758,19 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
} }
func (d *bincDecDriver) DecodeString() (s string) { func (d *bincDecDriver) DecodeString() (s string) {
// DecodeBytes does not accomodate symbols, whose impl stores string version in map. // DecodeBytes does not accommodate symbols, whose impl stores string version in map.
// Use decStringAndBytes directly. // Use decStringAndBytes directly.
// return string(d.DecodeBytes(d.b[:], true, true)) // return string(d.DecodeBytes(d.b[:], true, true))
_, s = d.decStringAndBytes(d.b[:], true, true) _, s = d.decStringAndBytes(d.b[:], true, true)
return return
} }
func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) {
if isstring { s, _ = d.decStringAndBytes(d.b[:], false, true)
bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) return
return }
}
func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -724,12 +778,16 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
d.bdRead = false d.bdRead = false
return nil return nil
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.vd == bincVdArray {
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
var clen int var clen int
if d.vd == bincVdString || d.vd == bincVdByteArray { if d.vd == bincVdString || d.vd == bincVdByteArray {
clen = d.decLen() clen = d.decLen()
} else { } else {
d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
bincVdString, bincVdByteArray, d.vd)
return return
} }
d.bdRead = false d.bdRead = false
@ -740,12 +798,12 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
bs = d.b[:] bs = d.b[:]
} }
} }
return decByteSlice(d.r, clen, bs) return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
} }
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff { if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag) d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
return return
} }
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
@ -768,14 +826,14 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
l := d.decLen() l := d.decLen()
xtag = d.r.readn1() xtag = d.r.readn1()
if verifyTag && xtag != tag { if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
return return
} }
xbs = d.r.readx(l) xbs = d.r.readx(l)
} else if d.vd == bincVdByteArray { } else if d.vd == bincVdByteArray {
xbs = d.DecodeBytes(nil, false, true) xbs = d.DecodeBytes(nil, true)
} else { } else {
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
return return
} }
d.bdRead = false d.bdRead = false
@ -787,7 +845,7 @@ func (d *bincDecDriver) DecodeNaked() {
d.readNextBd() d.readNextBd()
} }
n := &d.d.n n := d.d.n
var decodeFurther bool var decodeFurther bool
switch d.vd { switch d.vd {
@ -820,7 +878,7 @@ func (d *bincDecDriver) DecodeNaked() {
n.v = valueTypeInt n.v = valueTypeInt
n.i = int64(-1) // int8(-1) n.i = int64(-1) // int8(-1)
default: default:
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs) d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
} }
case bincVdSmallInt: case bincVdSmallInt:
n.v = valueTypeUint n.v = valueTypeUint
@ -842,10 +900,10 @@ func (d *bincDecDriver) DecodeNaked() {
n.s = d.DecodeString() n.s = d.DecodeString()
case bincVdByteArray: case bincVdByteArray:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false) n.l = d.DecodeBytes(nil, false)
case bincVdTimestamp: case bincVdTimestamp:
n.v = valueTypeTimestamp n.v = valueTypeTime
tt, err := decodeTime(d.r.readx(int(d.vs))) tt, err := bincDecodeTime(d.r.readx(int(d.vs)))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -862,7 +920,7 @@ func (d *bincDecDriver) DecodeNaked() {
n.v = valueTypeMap n.v = valueTypeMap
decodeFurther = true decodeFurther = true
default: default:
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
} }
if !decodeFurther { if !decodeFurther {
@ -892,31 +950,219 @@ func (d *bincDecDriver) DecodeNaked() {
type BincHandle struct { type BincHandle struct {
BasicHandle BasicHandle
binaryEncodingType binaryEncodingType
noElemSeparators
// AsSymbols defines what should be encoded as symbols.
//
// Encoding as symbols can reduce the encoded size significantly.
//
// However, during decoding, each string to be encoded as a symbol must
// be checked to see if it has been seen before. Consequently, encoding time
// will increase if using symbols, because string comparisons has a clear cost.
//
// Values:
// - 0: default: library uses best judgement
// - 1: use symbols
// - 2: do not use symbols
AsSymbols uint8
// AsSymbols: may later on introduce more options ...
// - m: map keys
// - s: struct fields
// - n: none
// - a: all: same as m, s, ...
// _ [1]uint64 // padding
} }
// Name returns the name of the handle: binc
func (h *BincHandle) Name() string { return "binc" }
// SetBytesExt sets an extension
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{b: ext}) return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}})
} }
func (h *BincHandle) newEncDriver(e *Encoder) encDriver { func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
return &bincEncDriver{e: e, w: e.w} return &bincEncDriver{e: e, h: h, w: e.w}
} }
func (h *BincHandle) newDecDriver(d *Decoder) decDriver { func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes} return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
} }
func (e *bincEncDriver) reset() { func (e *bincEncDriver) reset() {
e.w = e.e.w e.w = e.e.w
e.s = 0 e.s = 0
e.c = 0
e.m = nil e.m = nil
} }
func (d *bincDecDriver) reset() { func (d *bincDecDriver) reset() {
d.r = d.d.r d.r, d.br = d.d.r, d.d.bytes
d.s = nil d.s = nil
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
} }
// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func bincEncodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// bincDecodeTime decodes a []byte into a time.Time.
func bincDecodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
// i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
var _ decDriver = (*bincDecDriver)(nil) var _ decDriver = (*bincDecDriver)(nil)
var _ encDriver = (*bincEncDriver)(nil) var _ encDriver = (*bincEncDriver)(nil)

View file

@ -1,4 +1,4 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
package codec package codec
@ -6,6 +6,7 @@ package codec
import ( import (
"math" "math"
"reflect" "reflect"
"time"
) )
const ( const (
@ -38,6 +39,8 @@ const (
cborBdBreak = 0xff cborBdBreak = 0xff
) )
// These define some in-stream descriptors for
// manual encoding e.g. when doing explicit indefinite-length
const ( const (
CborStreamBytes byte = 0x5f CborStreamBytes byte = 0x5f
CborStreamString = 0x7f CborStreamString = 0x7f
@ -57,15 +60,57 @@ const (
cborBaseSimple = 0xe0 cborBaseSimple = 0xe0
) )
func cbordesc(bd byte) string {
switch bd {
case cborBdNil:
return "nil"
case cborBdFalse:
return "false"
case cborBdTrue:
return "true"
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
return "float"
case cborBdIndefiniteBytes:
return "bytes*"
case cborBdIndefiniteString:
return "string*"
case cborBdIndefiniteArray:
return "array*"
case cborBdIndefiniteMap:
return "map*"
default:
switch {
case bd >= cborBaseUint && bd < cborBaseNegInt:
return "(u)int"
case bd >= cborBaseNegInt && bd < cborBaseBytes:
return "int"
case bd >= cborBaseBytes && bd < cborBaseString:
return "bytes"
case bd >= cborBaseString && bd < cborBaseArray:
return "string"
case bd >= cborBaseArray && bd < cborBaseMap:
return "array"
case bd >= cborBaseMap && bd < cborBaseTag:
return "map"
case bd >= cborBaseTag && bd < cborBaseSimple:
return "ext"
default:
return "unknown"
}
}
}
// ------------------- // -------------------
type cborEncDriver struct { type cborEncDriver struct {
noBuiltInTypes noBuiltInTypes
encNoSeparator encDriverNoopContainerWriter
// encNoSeparator
e *Encoder e *Encoder
w encWriter w encWriter
h *CborHandle h *CborHandle
x [8]byte x [8]byte
_ [3]uint64 // padding
} }
func (e *cborEncDriver) EncodeNil() { func (e *cborEncDriver) EncodeNil() {
@ -123,6 +168,24 @@ func (e *cborEncDriver) encLen(bd byte, length int) {
e.encUint(uint64(length), bd) e.encUint(uint64(length), bd)
} }
func (e *cborEncDriver) EncodeTime(t time.Time) {
if t.IsZero() {
e.EncodeNil()
} else if e.h.TimeRFC3339 {
e.encUint(0, cborBaseTag)
e.EncodeString(cUTF8, t.Format(time.RFC3339Nano))
} else {
e.encUint(1, cborBaseTag)
t = t.UTC().Round(time.Microsecond)
sec, nsec := t.Unix(), uint64(t.Nanosecond())
if nsec == 0 {
e.EncodeInt(sec)
} else {
e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
}
}
}
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
e.encUint(uint64(xtag), cborBaseTag) e.encUint(uint64(xtag), cborBaseTag)
if v := ext.ConvertExt(rv); v == nil { if v := ext.ConvertExt(rv); v == nil {
@ -134,53 +197,103 @@ func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Enco
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
e.encUint(uint64(re.Tag), cborBaseTag) e.encUint(uint64(re.Tag), cborBaseTag)
if re.Data != nil { if false && re.Data != nil {
en.encode(re.Data) en.encode(re.Data)
} else if re.Value == nil { } else if re.Value != nil {
e.EncodeNil()
} else {
en.encode(re.Value) en.encode(re.Value)
} else {
e.EncodeNil()
} }
} }
func (e *cborEncDriver) EncodeArrayStart(length int) { func (e *cborEncDriver) WriteArrayStart(length int) {
e.encLen(cborBaseArray, length) if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteArray)
} else {
e.encLen(cborBaseArray, length)
}
} }
func (e *cborEncDriver) EncodeMapStart(length int) { func (e *cborEncDriver) WriteMapStart(length int) {
e.encLen(cborBaseMap, length) if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteMap)
} else {
e.encLen(cborBaseMap, length)
}
}
func (e *cborEncDriver) WriteMapEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
}
func (e *cborEncDriver) WriteArrayEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
} }
func (e *cborEncDriver) EncodeString(c charEncoding, v string) { func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(cborBaseString, len(v)) e.encStringBytesS(cborBaseString, v)
e.w.writestr(v)
}
func (e *cborEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
} }
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if c == c_RAW { if v == nil {
e.encLen(cborBaseBytes, len(v)) e.EncodeNil()
} else if c == cRAW {
e.encStringBytesS(cborBaseBytes, stringView(v))
} else { } else {
e.encLen(cborBaseString, len(v)) e.encStringBytesS(cborBaseString, stringView(v))
}
}
func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
if e.h.IndefiniteLength {
if bb == cborBaseBytes {
e.w.writen1(cborBdIndefiniteBytes)
} else {
e.w.writen1(cborBdIndefiniteString)
}
blen := len(v) / 4
if blen == 0 {
blen = 64
} else if blen > 1024 {
blen = 1024
}
for i := 0; i < len(v); {
var v2 string
i2 := i + blen
if i2 < len(v) {
v2 = v[i:i2]
} else {
v2 = v[i:]
}
e.encLen(bb, len(v2))
e.w.writestr(v2)
i = i2
}
e.w.writen1(cborBdBreak)
} else {
e.encLen(bb, len(v))
e.w.writestr(v)
} }
e.w.writeb(v)
} }
// ---------------------- // ----------------------
type cborDecDriver struct { type cborDecDriver struct {
d *Decoder d *Decoder
h *CborHandle h *CborHandle
r decReader r decReader
b [scratchByteArrayLen]byte // b [scratchByteArrayLen]byte
br bool // bytes reader br bool // bytes reader
bdRead bool bdRead bool
bd byte bd byte
noBuiltInTypes noBuiltInTypes
decNoSeparator // decNoSeparator
decDriverNoopContainerReader
_ [3]uint64 // padding
} }
func (d *cborDecDriver) readNextBd() { func (d *cborDecDriver) readNextBd() {
@ -188,7 +301,17 @@ func (d *cborDecDriver) readNextBd() {
d.bdRead = true d.bdRead = true
} }
func (d *cborDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *cborDecDriver) ContainerType() (vt valueType) { func (d *cborDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil { if d.bd == cborBdNil {
return valueTypeNil return valueTypeNil
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
@ -199,9 +322,10 @@ func (d *cborDecDriver) ContainerType() (vt valueType) {
return valueTypeArray return valueTypeArray
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
return valueTypeMap return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
} }
// else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
// }
return valueTypeUnset return valueTypeUnset
} }
@ -242,7 +366,7 @@ func (d *cborDecDriver) decUint() (ui uint64) {
} else if v == 0x1b { } else if v == 0x1b {
ui = uint64(bigen.Uint64(d.r.readx(8))) ui = uint64(bigen.Uint64(d.r.readx(8)))
} else { } else {
d.d.errorf("decUint: Invalid descriptor: %v", d.bd) d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
return return
} }
} }
@ -258,52 +382,36 @@ func (d *cborDecDriver) decCheckInteger() (neg bool) {
} else if major == cborMajorNegInt { } else if major == cborMajorNegInt {
neg = true neg = true
} else { } else {
d.d.errorf("invalid major: %v (bd: %v)", major, d.bd) d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd))
return return
} }
return return
} }
func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) { func (d *cborDecDriver) DecodeInt64() (i int64) {
neg := d.decCheckInteger() neg := d.decCheckInteger()
ui := d.decUint() ui := d.decUint()
// check if this number can be converted to an int without overflow // check if this number can be converted to an int without overflow
var overflow bool
if neg { if neg {
if i, overflow = chkOvf.SignedInt(ui + 1); overflow { i = -(chkOvf.SignedIntV(ui + 1))
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
return
}
i = -i
} else { } else {
if i, overflow = chkOvf.SignedInt(ui); overflow { i = chkOvf.SignedIntV(ui)
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
return
}
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("cbor: overflow integer: %v", i)
return
} }
d.bdRead = false d.bdRead = false
return return
} }
func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) { func (d *cborDecDriver) DecodeUint64() (ui uint64) {
if d.decCheckInteger() { if d.decCheckInteger() {
d.d.errorf("Assigning negative signed value to unsigned type") d.d.errorf("assigning negative signed value to unsigned type")
return return
} }
ui = d.decUint() ui = d.decUint()
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("cbor: overflow integer: %v", ui)
return
}
d.bdRead = false d.bdRead = false
return return
} }
func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { func (d *cborDecDriver) DecodeFloat64() (f float64) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -314,13 +422,9 @@ func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
} else if bd == cborBdFloat64 { } else if bd == cborBdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else if bd >= cborBaseUint && bd < cborBaseBytes { } else if bd >= cborBaseUint && bd < cborBaseBytes {
f = float64(d.DecodeInt(64)) f = float64(d.DecodeInt64())
} else { } else {
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd) d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd))
return
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("cbor: float32 overflow: %v", f)
return return
} }
d.bdRead = false d.bdRead = false
@ -336,7 +440,7 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
b = true b = true
} else if bd == cborBdFalse { } else if bd == cborBdFalse {
} else { } else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
return return
} }
d.bdRead = false d.bdRead = false
@ -344,6 +448,9 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
} }
func (d *cborDecDriver) ReadMapStart() (length int) { func (d *cborDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false d.bdRead = false
if d.bd == cborBdIndefiniteMap { if d.bd == cborBdIndefiniteMap {
return -1 return -1
@ -352,6 +459,9 @@ func (d *cborDecDriver) ReadMapStart() (length int) {
} }
func (d *cborDecDriver) ReadArrayStart() (length int) { func (d *cborDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false d.bdRead = false
if d.bd == cborBdIndefiniteArray { if d.bd == cborBdIndefiniteArray {
return -1 return -1
@ -370,7 +480,8 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
break break
} }
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd) d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+
" got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd))
return nil return nil
} }
n := d.decLen() n := d.decLen()
@ -391,7 +502,7 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
return bs return bs
} }
func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead { if !d.bdRead {
d.readNextBd() d.readNextBd()
} }
@ -400,25 +511,84 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
return nil return nil
} }
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
d.bdRead = false
if bs == nil { if bs == nil {
return d.decAppendIndefiniteBytes(nil) if zerocopy {
return d.decAppendIndefiniteBytes(d.d.b[:0])
}
return d.decAppendIndefiniteBytes(zeroByteSlice)
} }
return d.decAppendIndefiniteBytes(bs[:0]) return d.decAppendIndefiniteBytes(bs[:0])
} }
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
return
}
clen := d.decLen() clen := d.decLen()
d.bdRead = false d.bdRead = false
if zerocopy { if zerocopy {
if d.br { if d.br {
return d.r.readx(clen) return d.r.readx(clen)
} else if len(bs) == 0 { } else if len(bs) == 0 {
bs = d.b[:] bs = d.d.b[:]
} }
} }
return decByteSlice(d.r, clen, bs) return decByteSlice(d.r, clen, d.h.MaxInitLen, bs)
} }
func (d *cborDecDriver) DecodeString() (s string) { func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true)) return string(d.DecodeBytes(d.d.b[:], true))
}
func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.d.b[:], true)
}
func (d *cborDecDriver) DecodeTime() (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return
}
xtag := d.decUint()
d.bdRead = false
return d.decodeTime(xtag)
}
func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
if !d.bdRead {
d.readNextBd()
}
switch xtag {
case 0:
var err error
if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil {
d.d.errorv(err)
}
case 1:
// decode an int64 or a float, and infer time.Time from there.
// for floats, round to microseconds, as that is what is guaranteed to fit well.
switch {
case d.bd == cborBdFloat16, d.bd == cborBdFloat32:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd == cborBdFloat64:
f1, f2 := math.Modf(d.DecodeFloat64())
t = time.Unix(int64(f1), int64(f2*1e9))
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt,
d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
t = time.Unix(d.DecodeInt64(), 0)
default:
d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)")
}
default:
d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
}
t = t.UTC().Round(time.Microsecond)
return
} }
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -449,7 +619,7 @@ func (d *cborDecDriver) DecodeNaked() {
d.readNextBd() d.readNextBd()
} }
n := &d.d.n n := d.d.n
var decodeFurther bool var decodeFurther bool
switch d.bd { switch d.bd {
@ -461,15 +631,12 @@ func (d *cborDecDriver) DecodeNaked() {
case cborBdTrue: case cborBdTrue:
n.v = valueTypeBool n.v = valueTypeBool
n.b = true n.b = true
case cborBdFloat16, cborBdFloat32: case cborBdFloat16, cborBdFloat32, cborBdFloat64:
n.v = valueTypeFloat n.v = valueTypeFloat
n.f = d.DecodeFloat(true) n.f = d.DecodeFloat64()
case cborBdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat(false)
case cborBdIndefiniteBytes: case cborBdIndefiniteBytes:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false) n.l = d.DecodeBytes(nil, false)
case cborBdIndefiniteString: case cborBdIndefiniteString:
n.v = valueTypeString n.v = valueTypeString
n.s = d.DecodeString() n.s = d.DecodeString()
@ -484,17 +651,17 @@ func (d *cborDecDriver) DecodeNaked() {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger { if d.h.SignedInteger {
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt(64) n.i = d.DecodeInt64()
} else { } else {
n.v = valueTypeUint n.v = valueTypeUint
n.u = d.DecodeUint(64) n.u = d.DecodeUint64()
} }
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
n.v = valueTypeInt n.v = valueTypeInt
n.i = d.DecodeInt(64) n.i = d.DecodeInt64()
case d.bd >= cborBaseBytes && d.bd < cborBaseString: case d.bd >= cborBaseBytes && d.bd < cborBaseString:
n.v = valueTypeBytes n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false) n.l = d.DecodeBytes(nil, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray: case d.bd >= cborBaseString && d.bd < cborBaseArray:
n.v = valueTypeString n.v = valueTypeString
n.s = d.DecodeString() n.s = d.DecodeString()
@ -508,7 +675,12 @@ func (d *cborDecDriver) DecodeNaked() {
n.v = valueTypeExt n.v = valueTypeExt
n.u = d.decUint() n.u = d.decUint()
n.l = nil n.l = nil
d.bdRead = false if n.u == 0 || n.u == 1 {
d.bdRead = false
n.v = valueTypeTime
n.t = d.decodeTime(n.u)
}
// d.bdRead = false
// d.d.decode(&re.Value) // handled by decode itself. // d.d.decode(&re.Value) // handled by decode itself.
// decodeFurther = true // decodeFurther = true
default: default:
@ -538,30 +710,29 @@ func (d *cborDecDriver) DecodeNaked() {
// //
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. // None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones: // Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. // - timestamp, BigNum, BigFloat, Decimals,
// // - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
// To encode with indefinite lengths (streaming), users will use
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
//
// For example, to encode "one-byte" as an indefinite length string:
// var buf bytes.Buffer
// e := NewEncoder(&buf, new(CborHandle))
// buf.WriteByte(CborStreamString)
// e.MustEncode("one-")
// e.MustEncode("byte")
// buf.WriteByte(CborStreamBreak)
// encodedBytes := buf.Bytes()
// var vv interface{}
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
// // Now, vv contains the same string "one-byte"
//
type CborHandle struct { type CborHandle struct {
binaryEncodingType binaryEncodingType
noElemSeparators
BasicHandle BasicHandle
// IndefiniteLength=true, means that we encode using indefinitelength
IndefiniteLength bool
// TimeRFC3339 says to encode time.Time using RFC3339 format.
// If unset, we encode time.Time using seconds past epoch.
TimeRFC3339 bool
// _ [1]uint64 // padding
} }
// Name returns the name of the handle: cbor
func (h *CborHandle) Name() string { return "cbor" }
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{i: ext}) return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext})
} }
func (h *CborHandle) newEncDriver(e *Encoder) encDriver { func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
@ -569,7 +740,7 @@ func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
} }
func (h *CborHandle) newDecDriver(d *Decoder) decDriver { func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes} return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
} }
func (e *cborEncDriver) reset() { func (e *cborEncDriver) reset() {
@ -577,7 +748,7 @@ func (e *cborEncDriver) reset() {
} }
func (d *cborDecDriver) reset() { func (d *cborDecDriver) reset() {
d.r = d.d.r d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false d.bd, d.bdRead = 0, false
} }

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,14 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build notfastpath // +build notfastpath
package codec package codec
import "reflect" import "reflect"
const fastpathEnabled = false
// The generated fast-path code is very large, and adds a few seconds to the build time. // The generated fast-path code is very large, and adds a few seconds to the build time.
// This causes test execution, execution of small tools which use codec, etc // This causes test execution, execution of small tools which use codec, etc
// to take a long time. // to take a long time.
@ -16,17 +21,27 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return fal
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
type fastpathT struct{} type fastpathT struct{}
type fastpathE struct { type fastpathE struct {
rtid uintptr rtid uintptr
rt reflect.Type rt reflect.Type
encfn func(*encFnInfo, reflect.Value) encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*decFnInfo, reflect.Value) decfn func(*Decoder, *codecFnInfo, reflect.Value)
} }
type fastpathA [0]fastpathE type fastpathA [0]fastpathE
func (x fastpathA) index(rtid uintptr) int { return -1 } func (x fastpathA) index(rtid uintptr) int { return -1 }
func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) {
fn := d.cfer().get(uint8SliceTyp, true, true)
d.kSlice(&fn.i, reflect.ValueOf(&v).Elem())
return v, true
}
var fastpathAV fastpathA var fastpathAV fastpathA
var fastpathTV fastpathT var fastpathTV fastpathT
// ----
type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode

View file

@ -1,12 +1,9 @@
// //+build ignore /* // +build ignore */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************ // Code generated from gen-helper.go.tmpl - DO NOT EDIT.
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec package codec
@ -15,9 +12,12 @@ import (
"reflect" "reflect"
) )
// GenVersion is the current version of codecgen.
const GenVersion = 8
// This file is used to generate helper code for codecgen. // This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by // The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice. // library users. They WILL change continuously and without notice.
// //
// To help enforce this, we create an unexported type with exported members. // To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat). // The only way to get the type is via the one exported type that we control (somewhat).
@ -26,25 +26,75 @@ import (
// to perform encoding or decoding of primitives or known slice or map types. // to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen. // GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
return genHelperEncoder{e: e}, e.e ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
} }
// GenHelperDecoder is exported so that it can be used externally by codecgen. // GenHelperDecoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
return genHelperDecoder{d: d}, d.d gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) {
encStructFieldKey(x.encDriver, keyType, s)
}
func (x genHelperEncDriver) EncodeSymbol(s string) {
x.encDriver.EncodeString(cUTF8, s)
}
type genHelperDecDriver struct {
decDriver
C checkOverflow
}
func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {}
func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte {
return decStructFieldKey(x.decDriver, keyType, buf)
}
func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) {
return x.C.IntV(x.decDriver.DecodeInt64(), bitsize)
}
func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
return x.C.UintV(x.decDriver.DecodeUint64(), bitsize)
}
func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
f = x.DecodeFloat64()
if chkOverflow32 && chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
}
func (x genHelperDecDriver) DecodeFloat32As64() (f float64) {
f = x.DecodeFloat64()
if chkOvf.Float32(f) {
panicv.errorf("float32 overflow: %v", f)
}
return
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct { type genHelperEncoder struct {
M must
e *Encoder e *Encoder
F fastpathT F fastpathT
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct { type genHelperDecoder struct {
C checkOverflow
d *Decoder d *Decoder
F fastpathT F fastpathT
} }
@ -59,69 +109,86 @@ func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding() return f.e.be // f.e.hh.isBinaryEncoding()
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, c_RAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
}
return 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool { func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js return f.e.js
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, cUTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, cUTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, cRAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: builtin no longer supported - so we make this method a no-op,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
// if _, ok := f.e.hh.(*BincHandle); ok {
// return timeTypId
// }
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.e.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) HasExtensions() bool { func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0 return len(f.e.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperEncoder) EncExt(v interface{}) (r bool) { func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v) if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil {
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true return true
} }
return false return false
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
// ---------------- DECODER FOLLOWS ----------------- // ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -135,19 +202,27 @@ func (f genHelperDecoder) DecBinary() bool {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte { func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:] return f.d.b[:]
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback") // println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false) rv := reflect.ValueOf(iv)
if chkPtr {
rv = f.d.ensureDecodeable(rv)
}
f.d.decodeValue(rv, nil, false)
// f.d.decodeValueFallback(rv)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -167,7 +242,7 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
if fnerr != nil { if fnerr != nil {
panic(fnerr) panic(fnerr)
} }
@ -175,7 +250,7 @@ func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true) // bs := f.dd.DecodeStringAsBytes()
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil { if fnerr != nil {
@ -185,19 +260,28 @@ func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
if fnerr != nil { if fnerr != nil {
panic(fnerr) panic(fnerr)
} }
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr { func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
if _, ok := f.d.hh.(*BincHandle); ok {
return timeTypId // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
} //
return 0 // Deprecated: builtin no longer supported - so we make this method a no-op,
} // but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return }
// func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
// // Note: builtin is no longer supported - so make this a no-op
// if _, ok := f.d.hh.(*BincHandle); ok {
// return timeTypId
// }
// return 0
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool { func (f genHelperDecoder) IsJSONHandle() bool {
@ -205,15 +289,34 @@ func (f genHelperDecoder) IsJSONHandle() bool {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) {
return f.d.h.getExt(rtid)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) HasExtensions() bool { func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0 return len(f.d.h.extHandle) != 0
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
//
// Deprecated: No longer used,
// but leave in-place so that old generated files continue to work without regeneration.
func (f genHelperDecoder) DecExt(v interface{}) (r bool) { func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v).Elem() if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil {
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true return true
} }
@ -221,13 +324,12 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit) return decInferLen(clen, maxlen, unit)
} }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) { //
if f.d.cr != nil { // Deprecated: no longer used,
f.d.cr.sendContainerState(c) // but leave in-place so that old generated files continue to work without regeneration.
} func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) }
}

View file

@ -1,3 +1,5 @@
// +build codecgen.exec
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file. // Use of this source code is governed by a MIT license found in the LICENSE file.
@ -10,21 +12,22 @@ const genDecMapTmpl = `
{{var "l"}} := r.ReadMapStart() {{var "l"}} := r.ReadMapStart()
{{var "bh"}} := z.DecBasicHandle() {{var "bh"}} := z.DecBasicHandle()
if {{var "v"}} == nil { if {{var "v"}} == nil {
{{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}} *{{ .Varname }} = {{var "v"}}
} }
var {{var "mk"}} {{ .KTyp }} var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }} var {{var "mv"}} {{ .Typ }}
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset { if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true {{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true {{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} } {{end}} }
if {{var "l"}} > 0 { if {{var "l"}} != 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { {{var "hl"}} := {{var "l"}} > 0
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}}) {{var "mk"}} = string({{var "bv"}})
@ -36,34 +39,17 @@ for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
{{var "ms"}} = false {{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} {{var "mdn"}} = false
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
{{var "v"}}[{{var "mk"}}] = {{var "mv"}} if {{var "mdn"}} {
} if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
} } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}{{ end }}{{if decElemKindPtr}}
{{var "ms"}} = true {{ end }}
if {{var "mg"}} {
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
if {{var "mok"}} {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}} {{var "v"}}[{{var "mk"}}] = {{var "mv"}}
} }
} }
} // else len==0: TODO: Should we clear map entries? } // else len==0: TODO: Should we clear map entries?
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
` `
const genDecListTmpl = ` const genDecListTmpl = `
@ -78,93 +64,68 @@ if {{var "l"}} == 0 {
} else if len({{var "v"}}) != 0 { } else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0] {{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true {{var "c"}} = true
} {{end}} {{if isChan }}if {{var "v"}} == nil { } {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0) {{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true {{var "c"}} = true
} {{end}} } {{end}}
} else if {{var "l"}} > 0 { } else {
{{if isChan }}if {{var "v"}} == nil { {{var "hl"}} := {{var "l"}} > 0
{{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) var {{var "rl"}} int
{{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) _ = {{var "rl"}}
{{var "c"}} = true {{if isSlice }} if {{var "hl"}} {
}
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
{{var "h"}}.ElemContainerState({{var "r"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
}
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
var {{var "rt"}} bool {{/* truncated */}}
_, _, _ = {{var "rr"}}, {{var "rl"}}, {{var "rt"}}
if {{var "l"}} > cap({{var "v"}}) { if {{var "l"}} > cap({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{ else }}{{if not .Immutable }} if {{var "rl"}} <= cap({{var "v"}}) {
{{var "rg"}} := len({{var "v"}}) > 0 {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
{{var "v2"}} := {{var "v"}} {{end}}
{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rt"}} {
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
} else { } else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
} }
{{var "c"}} = true {{var "c"}} = true
{{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} } else if {{var "l"}} != len({{var "v"}}) {
if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
} {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}] {{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true {{var "c"}} = true
} {{end}} {{/* end isSlice:47 */}}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
{{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} }
{{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { } {{end}}
var {{var "j"}} int
// var {{var "dn"}} bool
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}{{end}}
{{var "h"}}.ElemContainerState({{var "j"}}) {{var "h"}}.ElemContainerState({{var "j"}})
z.DecSwallow() {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}}
} {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ else }}if {{var "rt"}} { {{ decLineVar $x }}
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { {{var "v"}} <- {{ $x }}
{{var "v"}} = append({{var "v"}}, {{ zero}}) // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this
{{var "h"}}.ElemContainerState({{var "j"}}) {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}}
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} var {{var "db"}} bool
}
} {{end}} {{/* end isArray:56 */}}
{{end}} {{/* end isChan:16 */}}
} else { {{/* len < 0 */}}
{{var "j"}} := 0
for ; !r.CheckBreak(); {{var "j"}}++ {
{{if isChan }}
{{var "h"}}.ElemContainerState({{var "j"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
if {{var "j"}} >= len({{var "v"}}) { if {{var "j"}} >= len({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} {{var "c"}} = true
{{var "c"}} = true {{end}} {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end}}
} }
{{var "h"}}.ElemContainerState({{var "j"}}) if {{var "db"}} {
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow() z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} }
{{end}} {{end}}
} }
{{if isSlice }}if {{var "j"}} < len({{var "v"}}) { {{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}] {{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true {{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil { } else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{} {{var "v"}} = make([]{{ .Typ }}, 0)
{{var "c"}} = true {{var "c"}} = true
}{{end}} } {{end}}
} }
{{var "h"}}.End() {{var "h"}}.End()
{{if not isArray }}if {{var "c"}} { {{if not isArray }}if {{var "c"}} {
@ -172,3 +133,32 @@ if {{var "l"}} == 0 {
}{{end}} }{{end}}
` `
const genEncChanTmpl = `
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}
`

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,14 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
return reflect.ArrayOf(count, elem)
}

View file

@ -0,0 +1,14 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
import "reflect"
const reflectArrayOfSupported = false
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic("codec: reflect.ArrayOf unsupported in this go version")
}

View file

@ -0,0 +1,15 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
if size < 0 {
return reflect.MakeMapWithSize(t, 4)
}
return reflect.MakeMapWithSize(t, size)
}

View file

@ -0,0 +1,12 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMap(t)
}

View file

@ -0,0 +1,8 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.10
package codec
const allowSetUnexportedEmbeddedPtr = false

View file

@ -0,0 +1,8 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.10
package codec
const allowSetUnexportedEmbeddedPtr = true

View file

@ -0,0 +1,17 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.4
package codec
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
// - go runtime is written fully in go
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
func init() {
panic("codec: go 1.3 and below are not supported")
}

View file

@ -0,0 +1,10 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5,!go1.6
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"

View file

@ -0,0 +1,10 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6,!go1.7
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"

View file

@ -0,0 +1,8 @@
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7
package codec
const genCheckVendor = true

Some files were not shown because too many files have changed in this diff Show more