Bump kubernetes/client-go

This commit is contained in:
Kim Min 2018-02-14 16:56:04 +08:00 committed by Traefiker Bot
parent 029fa83690
commit 83a92596c3
901 changed files with 169303 additions and 306433 deletions

263
Gopkg.lock generated
View file

@ -204,12 +204,6 @@
packages = ["quantile"]
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
[[projects]]
name = "github.com/blang/semver"
packages = ["."]
revision = "31b736133b98f26d5e078ec9eb591666edfd091f"
version = "v3.0.1"
[[projects]]
branch = "master"
name = "github.com/cenk/backoff"
@ -275,17 +269,6 @@
revision = "f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b"
version = "v3.2.9"
[[projects]]
name = "github.com/coreos/go-oidc"
packages = [
"http",
"jose",
"key",
"oauth2",
"oidc"
]
revision = "5644a2f50e2d2d5ba0b474bc5bc55fea1925936d"
[[projects]]
name = "github.com/coreos/go-semver"
packages = ["semver"]
@ -298,15 +281,6 @@
revision = "48702e0da86bd25e76cfef347e2adeb434a0d0a6"
version = "v14"
[[projects]]
name = "github.com/coreos/pkg"
packages = [
"health",
"httputil",
"timeutil"
]
revision = "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
@ -517,8 +491,7 @@
name = "github.com/emicklei/go-restful"
packages = [
".",
"log",
"swagger"
"log"
]
revision = "89ef8af493ab468a45a42bb0d89a06fccdd2fb22"
@ -628,7 +601,10 @@
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes/any"
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "4bd1920723d7b7c925de087aa32e2187708897f7"
@ -638,6 +614,12 @@
packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
name = "github.com/google/btree"
packages = ["."]
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
name = "github.com/google/go-github"
packages = ["github"]
@ -659,6 +641,16 @@
packages = ["."]
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
[[projects]]
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions"
]
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
@ -680,6 +672,15 @@
revision = "0bd13642feb8f57acc0d8e3a568edc34e05a74b9"
version = "1.1.3"
[[projects]]
branch = "master"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache"
]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]]
name = "github.com/hashicorp/consul"
packages = ["api"]
@ -695,6 +696,15 @@
packages = ["."]
revision = "03c5bf6be031b6dd45afec16b1cf94fc8938bc77"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
@ -737,10 +747,17 @@
packages = ["."]
revision = "72f9bd7c4e0c2a40055ab3d0f09654f730cce982"
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "28452fcdec4e44348d2af0d91d1e9e38da3a9e0a"
version = "1.0.5"
[[projects]]
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "77ed1c8a01217656d2080ad51981f6e99adaa177"
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
version = "1.0.1"
[[projects]]
branch = "master"
@ -944,9 +961,16 @@
revision = "4b1fea467323b74c5f462f0947f402b428ca0626"
[[projects]]
name = "github.com/pborman/uuid"
branch = "master"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
name = "github.com/peterbourgon/diskv"
packages = ["."]
revision = "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
name = "github.com/pierrec/lz4"
@ -1383,72 +1407,56 @@
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
[[projects]]
name = "k8s.io/client-go"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a"
version = "kubernetes-1.9.0"
[[projects]]
name = "k8s.io/apimachinery"
packages = [
"discovery",
"kubernetes",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1alpha1",
"kubernetes/typed/core/v1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"pkg/api",
"pkg/api/errors",
"pkg/api/install",
"pkg/api/meta",
"pkg/api/meta/metatypes",
"pkg/api/resource",
"pkg/api/unversioned",
"pkg/api/v1",
"pkg/api/validation/path",
"pkg/apimachinery",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/apps",
"pkg/apis/apps/install",
"pkg/apis/apps/v1beta1",
"pkg/apis/authentication",
"pkg/apis/authentication/install",
"pkg/apis/authentication/v1beta1",
"pkg/apis/authorization",
"pkg/apis/authorization/install",
"pkg/apis/authorization/v1beta1",
"pkg/apis/autoscaling",
"pkg/apis/autoscaling/install",
"pkg/apis/autoscaling/v1",
"pkg/apis/batch",
"pkg/apis/batch/install",
"pkg/apis/batch/v1",
"pkg/apis/batch/v2alpha1",
"pkg/apis/certificates",
"pkg/apis/certificates/install",
"pkg/apis/certificates/v1alpha1",
"pkg/apis/extensions",
"pkg/apis/extensions/install",
"pkg/apis/extensions/v1beta1",
"pkg/apis/policy",
"pkg/apis/policy/install",
"pkg/apis/policy/v1beta1",
"pkg/apis/rbac",
"pkg/apis/rbac/install",
"pkg/apis/rbac/v1alpha1",
"pkg/apis/storage",
"pkg/apis/storage/install",
"pkg/apis/storage/v1beta1",
"pkg/auth/user",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/genericapiserver/openapi/common",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
@ -1456,49 +1464,88 @@
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/third_party/forked/golang/reflect",
"pkg/third_party/forked/golang/template",
"pkg/types",
"pkg/util",
"pkg/util/cert",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/flowcontrol",
"pkg/util/framer",
"pkg/util/integer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/jsonpath",
"pkg/util/labels",
"pkg/util/net",
"pkg/util/parsers",
"pkg/util/rand",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/uuid",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"pkg/watch/versioned",
"plugin/pkg/client/auth",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"third_party/forked/golang/reflect"
]
revision = "180eddb345a5be3a157cea1c624700ad5bd27b8f"
version = "kubernetes-1.9.0"
[[projects]]
name = "k8s.io/client-go"
packages = [
"discovery",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"pkg/version",
"rest",
"rest/watch",
"tools/cache",
"tools/clientcmd/api",
"tools/metrics",
"transport"
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/flowcontrol",
"util/integer"
]
revision = "e121606b0d09b2e1c467183ee46217fa85a6b672"
version = "v2.0.0"
revision = "78700dec6369ba22221b72770783300f143df150"
version = "v6.0.0"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
revision = "275e2ce91dec4c05a4094a7b1daee5560b555ac9"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "17cefac15e170ca9d73093cad7a8f1fb6874cd03eeb67ccc3f8fe4d239f6e9aa"
inputs-digest = "2fa6756003e3f9ca71c3b55e76b9de45677df953dbee5a2f8be7bd77c8b9987b"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -190,7 +190,15 @@
[[constraint]]
name = "k8s.io/client-go"
version = "2.0.0"
version = "6.0.0"
[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.9.0"
[[constraint]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.9.0"
[[constraint]]
branch = "master"

View file

@ -4,39 +4,40 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/types"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func buildEndpoint(opts ...func(*v1.Endpoints)) *v1.Endpoints {
e := &v1.Endpoints{}
func buildEndpoint(opts ...func(*corev1.Endpoints)) *corev1.Endpoints {
e := &corev1.Endpoints{}
for _, opt := range opts {
opt(e)
}
return e
}
func eNamespace(value string) func(*v1.Endpoints) {
return func(i *v1.Endpoints) {
func eNamespace(value string) func(*corev1.Endpoints) {
return func(i *corev1.Endpoints) {
i.Namespace = value
}
}
func eName(value string) func(*v1.Endpoints) {
return func(i *v1.Endpoints) {
func eName(value string) func(*corev1.Endpoints) {
return func(i *corev1.Endpoints) {
i.Name = value
}
}
func eUID(value types.UID) func(*v1.Endpoints) {
return func(i *v1.Endpoints) {
func eUID(value types.UID) func(*corev1.Endpoints) {
return func(i *corev1.Endpoints) {
i.UID = value
}
}
func subset(opts ...func(*v1.EndpointSubset)) func(*v1.Endpoints) {
return func(e *v1.Endpoints) {
s := &v1.EndpointSubset{}
func subset(opts ...func(*corev1.EndpointSubset)) func(*corev1.Endpoints) {
return func(e *corev1.Endpoints) {
s := &corev1.EndpointSubset{}
for _, opt := range opts {
opt(s)
}
@ -44,9 +45,9 @@ func subset(opts ...func(*v1.EndpointSubset)) func(*v1.Endpoints) {
}
}
func eAddresses(opts ...func(*v1.EndpointAddress)) func(*v1.EndpointSubset) {
return func(subset *v1.EndpointSubset) {
a := &v1.EndpointAddress{}
func eAddresses(opts ...func(*corev1.EndpointAddress)) func(*corev1.EndpointSubset) {
return func(subset *corev1.EndpointSubset) {
a := &corev1.EndpointAddress{}
for _, opt := range opts {
opt(a)
}
@ -54,24 +55,24 @@ func eAddresses(opts ...func(*v1.EndpointAddress)) func(*v1.EndpointSubset) {
}
}
func eAddress(ip string) func(*v1.EndpointAddress) {
return func(address *v1.EndpointAddress) {
func eAddress(ip string) func(*corev1.EndpointAddress) {
return func(address *corev1.EndpointAddress) {
address.IP = ip
}
}
func ePorts(opts ...func(port *v1.EndpointPort)) func(*v1.EndpointSubset) {
return func(spec *v1.EndpointSubset) {
func ePorts(opts ...func(port *corev1.EndpointPort)) func(*corev1.EndpointSubset) {
return func(spec *corev1.EndpointSubset) {
for _, opt := range opts {
p := &v1.EndpointPort{}
p := &corev1.EndpointPort{}
opt(p)
spec.Ports = append(spec.Ports, *p)
}
}
}
func ePort(port int32, name string) func(*v1.EndpointPort) {
return func(sp *v1.EndpointPort) {
func ePort(port int32, name string) func(*corev1.EndpointPort) {
return func(sp *corev1.EndpointPort) {
sp.Port = port
sp.Name = name
}
@ -103,21 +104,21 @@ func TestBuildEndpoint(t *testing.T) {
assert.EqualValues(t, sampleEndpoint1(), actual)
}
func sampleEndpoint1() *v1.Endpoints {
return &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
func sampleEndpoint1() *corev1.Endpoints {
return &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "service3",
UID: "3",
Namespace: "testing",
},
Subsets: []v1.EndpointSubset{
Subsets: []corev1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
Addresses: []corev1.EndpointAddress{
{
IP: "10.15.0.1",
},
},
Ports: []v1.EndpointPort{
Ports: []corev1.EndpointPort{
{
Name: "http",
Port: 8080,
@ -129,12 +130,12 @@ func sampleEndpoint1() *v1.Endpoints {
},
},
{
Addresses: []v1.EndpointAddress{
Addresses: []corev1.EndpointAddress{
{
IP: "10.15.0.2",
},
},
Ports: []v1.EndpointPort{
Ports: []corev1.EndpointPort{
{
Name: "http",
Port: 9080,

View file

@ -4,27 +4,27 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func buildIngress(opts ...func(*v1beta1.Ingress)) *v1beta1.Ingress {
i := &v1beta1.Ingress{}
func buildIngress(opts ...func(*extensionsv1beta1.Ingress)) *extensionsv1beta1.Ingress {
i := &extensionsv1beta1.Ingress{}
for _, opt := range opts {
opt(i)
}
return i
}
func iNamespace(value string) func(*v1beta1.Ingress) {
return func(i *v1beta1.Ingress) {
func iNamespace(value string) func(*extensionsv1beta1.Ingress) {
return func(i *extensionsv1beta1.Ingress) {
i.Namespace = value
}
}
func iAnnotation(name string, value string) func(*v1beta1.Ingress) {
return func(i *v1beta1.Ingress) {
func iAnnotation(name string, value string) func(*extensionsv1beta1.Ingress) {
return func(i *extensionsv1beta1.Ingress) {
if i.Annotations == nil {
i.Annotations = make(map[string]string)
}
@ -32,9 +32,9 @@ func iAnnotation(name string, value string) func(*v1beta1.Ingress) {
}
}
func iRules(opts ...func(*v1beta1.IngressSpec)) func(*v1beta1.Ingress) {
return func(i *v1beta1.Ingress) {
s := &v1beta1.IngressSpec{}
func iRules(opts ...func(*extensionsv1beta1.IngressSpec)) func(*extensionsv1beta1.Ingress) {
return func(i *extensionsv1beta1.Ingress) {
s := &extensionsv1beta1.IngressSpec{}
for _, opt := range opts {
opt(s)
}
@ -42,9 +42,9 @@ func iRules(opts ...func(*v1beta1.IngressSpec)) func(*v1beta1.Ingress) {
}
}
func iRule(opts ...func(*v1beta1.IngressRule)) func(*v1beta1.IngressSpec) {
return func(spec *v1beta1.IngressSpec) {
r := &v1beta1.IngressRule{}
func iRule(opts ...func(*extensionsv1beta1.IngressRule)) func(*extensionsv1beta1.IngressSpec) {
return func(spec *extensionsv1beta1.IngressSpec) {
r := &extensionsv1beta1.IngressRule{}
for _, opt := range opts {
opt(r)
}
@ -52,24 +52,24 @@ func iRule(opts ...func(*v1beta1.IngressRule)) func(*v1beta1.IngressSpec) {
}
}
func iHost(name string) func(*v1beta1.IngressRule) {
return func(rule *v1beta1.IngressRule) {
func iHost(name string) func(*extensionsv1beta1.IngressRule) {
return func(rule *extensionsv1beta1.IngressRule) {
rule.Host = name
}
}
func iPaths(opts ...func(*v1beta1.HTTPIngressRuleValue)) func(*v1beta1.IngressRule) {
return func(rule *v1beta1.IngressRule) {
rule.HTTP = &v1beta1.HTTPIngressRuleValue{}
func iPaths(opts ...func(*extensionsv1beta1.HTTPIngressRuleValue)) func(*extensionsv1beta1.IngressRule) {
return func(rule *extensionsv1beta1.IngressRule) {
rule.HTTP = &extensionsv1beta1.HTTPIngressRuleValue{}
for _, opt := range opts {
opt(rule.HTTP)
}
}
}
func onePath(opts ...func(*v1beta1.HTTPIngressPath)) func(*v1beta1.HTTPIngressRuleValue) {
return func(irv *v1beta1.HTTPIngressRuleValue) {
p := &v1beta1.HTTPIngressPath{}
func onePath(opts ...func(*extensionsv1beta1.HTTPIngressPath)) func(*extensionsv1beta1.HTTPIngressRuleValue) {
return func(irv *extensionsv1beta1.HTTPIngressRuleValue) {
p := &extensionsv1beta1.HTTPIngressPath{}
for _, opt := range opts {
opt(p)
}
@ -77,33 +77,33 @@ func onePath(opts ...func(*v1beta1.HTTPIngressPath)) func(*v1beta1.HTTPIngressRu
}
}
func iPath(name string) func(*v1beta1.HTTPIngressPath) {
return func(p *v1beta1.HTTPIngressPath) {
func iPath(name string) func(*extensionsv1beta1.HTTPIngressPath) {
return func(p *extensionsv1beta1.HTTPIngressPath) {
p.Path = name
}
}
func iBackend(name string, port intstr.IntOrString) func(*v1beta1.HTTPIngressPath) {
return func(p *v1beta1.HTTPIngressPath) {
p.Backend = v1beta1.IngressBackend{
func iBackend(name string, port intstr.IntOrString) func(*extensionsv1beta1.HTTPIngressPath) {
return func(p *extensionsv1beta1.HTTPIngressPath) {
p.Backend = extensionsv1beta1.IngressBackend{
ServiceName: name,
ServicePort: port,
}
}
}
func iTLSes(opts ...func(*v1beta1.IngressTLS)) func(*v1beta1.Ingress) {
return func(i *v1beta1.Ingress) {
func iTLSes(opts ...func(*extensionsv1beta1.IngressTLS)) func(*extensionsv1beta1.Ingress) {
return func(i *extensionsv1beta1.Ingress) {
for _, opt := range opts {
iTLS := v1beta1.IngressTLS{}
iTLS := extensionsv1beta1.IngressTLS{}
opt(&iTLS)
i.Spec.TLS = append(i.Spec.TLS, iTLS)
}
}
}
func iTLS(secret string, hosts ...string) func(*v1beta1.IngressTLS) {
return func(i *v1beta1.IngressTLS) {
func iTLS(secret string, hosts ...string) func(*extensionsv1beta1.IngressTLS) {
return func(i *extensionsv1beta1.IngressTLS) {
i.SecretName = secret
i.Hosts = hosts
}
@ -133,28 +133,28 @@ func TestBuildIngress(t *testing.T) {
assert.EqualValues(t, sampleIngress(), i)
}
func sampleIngress() *v1beta1.Ingress {
return &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
func sampleIngress() *extensionsv1beta1.Ingress {
return &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Namespace: "testing",
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
Spec: extensionsv1beta1.IngressSpec{
Rules: []extensionsv1beta1.IngressRule{
{
Host: "foo",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Path: "/bar",
Backend: v1beta1.IngressBackend{
Backend: extensionsv1beta1.IngressBackend{
ServiceName: "service1",
ServicePort: intstr.FromInt(80),
},
},
{
Path: "/namedthing",
Backend: v1beta1.IngressBackend{
Backend: extensionsv1beta1.IngressBackend{
ServiceName: "service4",
ServicePort: intstr.FromString("https"),
},
@ -165,17 +165,17 @@ func sampleIngress() *v1beta1.Ingress {
},
{
Host: "bar",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
Paths: []extensionsv1beta1.HTTPIngressPath{
{
Backend: v1beta1.IngressBackend{
Backend: extensionsv1beta1.IngressBackend{
ServiceName: "service3",
ServicePort: intstr.FromString("https"),
},
},
{
Backend: v1beta1.IngressBackend{
Backend: extensionsv1beta1.IngressBackend{
ServiceName: "service2",
ServicePort: intstr.FromInt(802),
},
@ -185,7 +185,7 @@ func sampleIngress() *v1beta1.Ingress {
},
},
},
TLS: []v1beta1.IngressTLS{
TLS: []extensionsv1beta1.IngressTLS{
{
Hosts: []string{"foo"},
SecretName: "tls-secret",

View file

@ -4,38 +4,39 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/types"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func buildService(opts ...func(*v1.Service)) *v1.Service {
s := &v1.Service{}
func buildService(opts ...func(*corev1.Service)) *corev1.Service {
s := &corev1.Service{}
for _, opt := range opts {
opt(s)
}
return s
}
func sNamespace(value string) func(*v1.Service) {
return func(i *v1.Service) {
func sNamespace(value string) func(*corev1.Service) {
return func(i *corev1.Service) {
i.Namespace = value
}
}
func sName(value string) func(*v1.Service) {
return func(i *v1.Service) {
func sName(value string) func(*corev1.Service) {
return func(i *corev1.Service) {
i.Name = value
}
}
func sUID(value types.UID) func(*v1.Service) {
return func(i *v1.Service) {
func sUID(value types.UID) func(*corev1.Service) {
return func(i *corev1.Service) {
i.UID = value
}
}
func sAnnotation(name string, value string) func(*v1.Service) {
return func(s *v1.Service) {
func sAnnotation(name string, value string) func(*corev1.Service) {
return func(s *corev1.Service) {
if s.Annotations == nil {
s.Annotations = make(map[string]string)
}
@ -43,9 +44,9 @@ func sAnnotation(name string, value string) func(*v1.Service) {
}
}
func sSpec(opts ...func(*v1.ServiceSpec)) func(*v1.Service) {
return func(i *v1.Service) {
spec := &v1.ServiceSpec{}
func sSpec(opts ...func(*corev1.ServiceSpec)) func(*corev1.Service) {
return func(i *corev1.Service) {
spec := &corev1.ServiceSpec{}
for _, opt := range opts {
opt(spec)
}
@ -53,36 +54,36 @@ func sSpec(opts ...func(*v1.ServiceSpec)) func(*v1.Service) {
}
}
func clusterIP(ip string) func(*v1.ServiceSpec) {
return func(spec *v1.ServiceSpec) {
func clusterIP(ip string) func(*corev1.ServiceSpec) {
return func(spec *corev1.ServiceSpec) {
spec.ClusterIP = ip
}
}
func sType(value v1.ServiceType) func(*v1.ServiceSpec) {
return func(spec *v1.ServiceSpec) {
func sType(value corev1.ServiceType) func(*corev1.ServiceSpec) {
return func(spec *corev1.ServiceSpec) {
spec.Type = value
}
}
func sExternalName(name string) func(*v1.ServiceSpec) {
return func(spec *v1.ServiceSpec) {
func sExternalName(name string) func(*corev1.ServiceSpec) {
return func(spec *corev1.ServiceSpec) {
spec.ExternalName = name
}
}
func sPorts(opts ...func(*v1.ServicePort)) func(*v1.ServiceSpec) {
return func(spec *v1.ServiceSpec) {
func sPorts(opts ...func(*corev1.ServicePort)) func(*corev1.ServiceSpec) {
return func(spec *corev1.ServiceSpec) {
for _, opt := range opts {
p := &v1.ServicePort{}
p := &corev1.ServicePort{}
opt(p)
spec.Ports = append(spec.Ports, *p)
}
}
}
func sPort(port int32, name string) func(*v1.ServicePort) {
return func(sp *v1.ServicePort) {
func sPort(port int32, name string) func(*corev1.ServicePort) {
return func(sp *corev1.ServicePort) {
sp.Port = port
sp.Name = name
}
@ -121,16 +122,16 @@ func TestBuildService(t *testing.T) {
assert.EqualValues(t, sampleService2(), actual2)
}
func sampleService1() *v1.Service {
return &v1.Service{
ObjectMeta: v1.ObjectMeta{
func sampleService1() *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service1",
UID: "1",
Namespace: "testing",
},
Spec: v1.ServiceSpec{
Spec: corev1.ServiceSpec{
ClusterIP: "10.0.0.1",
Ports: []v1.ServicePort{
Ports: []corev1.ServicePort{
{
Port: 80,
},
@ -139,18 +140,18 @@ func sampleService1() *v1.Service {
}
}
func sampleService2() *v1.Service {
return &v1.Service{
ObjectMeta: v1.ObjectMeta{
func sampleService2() *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service3",
UID: "3",
Namespace: "testing",
},
Spec: v1.ServiceSpec{
Spec: corev1.ServiceSpec{
ClusterIP: "10.0.0.3",
Type: "ExternalName",
ExternalName: "example.com",
Ports: []v1.ServicePort{
Ports: []corev1.ServicePort{
{
Name: "http",
Port: 80,

View file

@ -8,14 +8,14 @@ import (
"time"
"github.com/containous/traefik/safe"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/fields"
"k8s.io/client-go/pkg/labels"
"k8s.io/client-go/pkg/runtime"
"k8s.io/client-go/pkg/watch"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
@ -62,10 +62,10 @@ func (im *informerManager) extend(informer cache.SharedInformer, withSyncFunc bo
// The stores can then be accessed via the Get* functions.
type Client interface {
WatchAll(namespaces Namespaces, labelSelector string, stopCh <-chan struct{}) (<-chan interface{}, error)
GetIngresses() []*v1beta1.Ingress
GetService(namespace, name string) (*v1.Service, bool, error)
GetSecret(namespace, name string) (*v1.Secret, bool, error)
GetEndpoints(namespace, name string) (*v1.Endpoints, bool, error)
GetIngresses() []*extensionsv1beta1.Ingress
GetService(namespace, name string) (*corev1.Service, bool, error)
GetSecret(namespace, name string) (*corev1.Secret, bool, error)
GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error)
}
type clientImpl struct {
@ -146,7 +146,7 @@ func (c *clientImpl) WatchAll(namespaces Namespaces, labelSelector string, stopC
}
if len(namespaces) == 0 {
namespaces = Namespaces{api.NamespaceAll}
namespaces = Namespaces{metav1.NamespaceAll}
c.isNamespaceAll = true
}
@ -154,13 +154,13 @@ func (c *clientImpl) WatchAll(namespaces Namespaces, labelSelector string, stopC
for _, ns := range namespaces {
ns := ns
informManager.extend(c.WatchIngresses(ns, kubeLabelSelector, eventCh), true)
informManager.extend(c.WatchObjects(ns, kindServices, &v1.Service{}, c.svcStores, eventCh), true)
informManager.extend(c.WatchObjects(ns, kindEndpoints, &v1.Endpoints{}, c.epStores, eventCh), true)
informManager.extend(c.WatchObjects(ns, kindServices, &corev1.Service{}, c.svcStores, eventCh), true)
informManager.extend(c.WatchObjects(ns, kindEndpoints, &corev1.Endpoints{}, c.epStores, eventCh), true)
// Do not wait for the Secrets store to get synced since we cannot rely on
// users having granted RBAC permissions for this object.
// https://github.com/containous/traefik/issues/1784 should improve the
// situation here in the future.
informManager.extend(c.WatchObjects(ns, kindSecrets, &v1.Secret{}, c.secStores, eventCh), false)
informManager.extend(c.WatchObjects(ns, kindSecrets, &corev1.Secret{}, c.secStores, eventCh), false)
}
var wg sync.WaitGroup
@ -188,14 +188,18 @@ func (c *clientImpl) WatchAll(namespaces Namespaces, labelSelector string, stopC
// WatchIngresses sets up a watch on Ingress objects and returns a corresponding shared informer.
func (c *clientImpl) WatchIngresses(namespace string, labelSelector labels.Selector, watchCh chan<- interface{}) cache.SharedInformer {
listWatch := newListWatchFromClientWithLabelSelector(
c.clientset.ExtensionsV1beta1().RESTClient(),
kindIngresses,
namespace,
fields.Everything(),
labelSelector)
informer := loadInformer(listWatch, &v1beta1.Ingress{}, watchCh)
listOptions := metav1.ListOptions{
LabelSelector: labelSelector.String(),
FieldSelector: fields.Everything().String(),
}
informer := loadInformer(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return c.clientset.ExtensionsV1beta1().Ingresses(namespace).List(listOptions)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return c.clientset.ExtensionsV1beta1().Ingresses(namespace).Watch(listOptions)
},
}, &extensionsv1beta1.Ingress{}, watchCh)
c.ingStores = append(c.ingStores, informer.GetStore())
return informer
}
@ -220,22 +224,17 @@ func loadInformer(listWatch cache.ListerWatcher, object runtime.Object, watchCh
resyncPeriod,
)
if err := informer.AddEventHandler(newResourceEventHandler(watchCh)); err != nil {
// This should only ever fail if we add an event handler after the
// informer has been started already, which would be a programming bug.
panic(err)
}
informer.AddEventHandler(newResourceEventHandler(watchCh))
return informer
}
// GetIngresses returns all Ingresses for observed namespaces in the cluster.
func (c *clientImpl) GetIngresses() []*v1beta1.Ingress {
var result []*v1beta1.Ingress
func (c *clientImpl) GetIngresses() []*extensionsv1beta1.Ingress {
var result []*extensionsv1beta1.Ingress
for _, store := range c.ingStores {
for _, obj := range store.List() {
ing := obj.(*v1beta1.Ingress)
ing := obj.(*extensionsv1beta1.Ingress)
result = append(result, ing)
}
}
@ -244,34 +243,34 @@ func (c *clientImpl) GetIngresses() []*v1beta1.Ingress {
}
// GetService returns the named service from the given namespace.
func (c *clientImpl) GetService(namespace, name string) (*v1.Service, bool, error) {
var service *v1.Service
func (c *clientImpl) GetService(namespace, name string) (*corev1.Service, bool, error) {
var service *corev1.Service
item, exists, err := c.svcStores[c.lookupNamespace(namespace)].GetByKey(namespace + "/" + name)
if item != nil {
service = item.(*v1.Service)
service = item.(*corev1.Service)
}
return service, exists, err
}
// GetEndpoints returns the named endpoints from the given namespace.
func (c *clientImpl) GetEndpoints(namespace, name string) (*v1.Endpoints, bool, error) {
var endpoint *v1.Endpoints
func (c *clientImpl) GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error) {
var endpoint *corev1.Endpoints
item, exists, err := c.epStores[c.lookupNamespace(namespace)].GetByKey(namespace + "/" + name)
if item != nil {
endpoint = item.(*v1.Endpoints)
endpoint = item.(*corev1.Endpoints)
}
return endpoint, exists, err
}
// GetSecret returns the named secret from the given namespace.
func (c *clientImpl) GetSecret(namespace, name string) (*v1.Secret, bool, error) {
var secret *v1.Secret
func (c *clientImpl) GetSecret(namespace, name string) (*corev1.Secret, bool, error) {
var secret *corev1.Secret
item, exists, err := c.secStores[c.lookupNamespace(namespace)].GetByKey(namespace + "/" + name)
if err == nil && item != nil {
secret = item.(*v1.Secret)
secret = item.(*corev1.Secret)
}
return secret, exists, err
@ -285,37 +284,11 @@ func (c *clientImpl) GetSecret(namespace, name string) (*v1.Secret, bool, error)
// identifiers from the Kubernetes API, so we have to bridge this gap.
func (c *clientImpl) lookupNamespace(ns string) string {
if c.isNamespaceAll {
return api.NamespaceAll
return metav1.NamespaceAll
}
return ns
}
// newListWatchFromClientWithLabelSelector creates a new ListWatch from the given parameters.
// It extends cache.NewListWatchFromClient to support label selectors.
func newListWatchFromClientWithLabelSelector(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, labelSelector labels.Selector) *cache.ListWatch {
listFunc := func(options api.ListOptions) (runtime.Object, error) {
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, api.ParameterCodec).
FieldsSelectorParam(fieldSelector).
LabelsSelectorParam(labelSelector).
Do().
Get()
}
watchFunc := func(options api.ListOptions) (watch.Interface, error) {
return c.Get().
Prefix("watch").
Namespace(namespace).
Resource(resource).
VersionedParams(&options, api.ParameterCodec).
FieldsSelectorParam(fieldSelector).
LabelsSelectorParam(labelSelector).
Watch()
}
return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
func newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
return &resourceEventHandler{events}
}

View file

@ -1,15 +1,15 @@
package kubernetes
import (
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
)
type clientMock struct {
ingresses []*v1beta1.Ingress
services []*v1.Service
secrets []*v1.Secret
endpoints []*v1.Endpoints
ingresses []*extensionsv1beta1.Ingress
services []*corev1.Service
secrets []*corev1.Secret
endpoints []*corev1.Endpoints
watchChan chan interface{}
apiServiceError error
@ -17,11 +17,11 @@ type clientMock struct {
apiEndpointsError error
}
func (c clientMock) GetIngresses() []*v1beta1.Ingress {
func (c clientMock) GetIngresses() []*extensionsv1beta1.Ingress {
return c.ingresses
}
func (c clientMock) GetService(namespace, name string) (*v1.Service, bool, error) {
func (c clientMock) GetService(namespace, name string) (*corev1.Service, bool, error) {
if c.apiServiceError != nil {
return nil, false, c.apiServiceError
}
@ -34,7 +34,7 @@ func (c clientMock) GetService(namespace, name string) (*v1.Service, bool, error
return nil, false, nil
}
func (c clientMock) GetEndpoints(namespace, name string) (*v1.Endpoints, bool, error) {
func (c clientMock) GetEndpoints(namespace, name string) (*corev1.Endpoints, bool, error) {
if c.apiEndpointsError != nil {
return nil, false, c.apiEndpointsError
}
@ -45,10 +45,10 @@ func (c clientMock) GetEndpoints(namespace, name string) (*v1.Endpoints, bool, e
}
}
return &v1.Endpoints{}, false, nil
return &corev1.Endpoints{}, false, nil
}
func (c clientMock) GetSecret(namespace, name string) (*v1.Secret, bool, error) {
func (c clientMock) GetSecret(namespace, name string) (*corev1.Secret, bool, error) {
if c.apiSecretError != nil {
return nil, false, c.apiSecretError
}

View file

@ -22,9 +22,9 @@ import (
"github.com/containous/traefik/tls"
"github.com/containous/traefik/types"
"gopkg.in/yaml.v2"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/util/intstr"
)
var _ provider.Provider = (*Provider)(nil)
@ -313,7 +313,7 @@ func (p *Provider) loadConfig(templateObjects types.Configuration) *types.Config
return configuration
}
func getRuleForPath(pa v1beta1.HTTPIngressPath, i *v1beta1.Ingress) string {
func getRuleForPath(pa extensionsv1beta1.HTTPIngressPath, i *extensionsv1beta1.Ingress) string {
if len(pa.Path) == 0 {
return ""
}
@ -335,7 +335,7 @@ func getRuleForHost(host string) string {
return "Host:" + host
}
func handleBasicAuthConfig(i *v1beta1.Ingress, k8sClient Client) ([]string, error) {
func handleBasicAuthConfig(i *extensionsv1beta1.Ingress, k8sClient Client) ([]string, error) {
annotationAuthType := getAnnotationName(i.Annotations, annotationKubernetesAuthType)
authType, exists := i.Annotations[annotationAuthType]
if !exists {
@ -391,7 +391,7 @@ func loadAuthCredentials(namespace, secretName string, k8sClient Client) ([]stri
return creds, nil
}
func getTLS(ingress *v1beta1.Ingress, k8sClient Client) ([]*tls.Configuration, error) {
func getTLS(ingress *extensionsv1beta1.Ingress, k8sClient Client) ([]*tls.Configuration, error) {
var tlsConfigs []*tls.Configuration
for _, t := range ingress.Spec.TLS {
@ -434,7 +434,7 @@ func getTLS(ingress *v1beta1.Ingress, k8sClient Client) ([]*tls.Configuration, e
return tlsConfigs, nil
}
func endpointPortNumber(servicePort v1.ServicePort, endpointPorts []v1.EndpointPort) int {
func endpointPortNumber(servicePort corev1.ServicePort, endpointPorts []corev1.EndpointPort) int {
if len(endpointPorts) > 0 {
//name is optional if there is only one port
port := endpointPorts[0]
@ -448,7 +448,7 @@ func endpointPortNumber(servicePort v1.ServicePort, endpointPorts []v1.EndpointP
return int(servicePort.Port)
}
func equalPorts(servicePort v1.ServicePort, ingressPort intstr.IntOrString) bool {
func equalPorts(servicePort corev1.ServicePort, ingressPort intstr.IntOrString) bool {
if int(servicePort.Port) == ingressPort.IntValue() {
return true
}
@ -465,7 +465,7 @@ func (p *Provider) shouldProcessIngress(annotationIngressClass string) bool {
return annotationIngressClass == p.IngressClass
}
func getFrontendRedirect(i *v1beta1.Ingress) *types.Redirect {
func getFrontendRedirect(i *extensionsv1beta1.Ingress) *types.Redirect {
permanent := getBoolValue(i.Annotations, annotationKubernetesRedirectPermanent, false)
redirectEntryPoint := getStringValue(i.Annotations, annotationKubernetesRedirectEntryPoint, "")
@ -489,7 +489,7 @@ func getFrontendRedirect(i *v1beta1.Ingress) *types.Redirect {
return nil
}
func getBuffering(service *v1.Service) *types.Buffering {
func getBuffering(service *corev1.Service) *types.Buffering {
var buffering *types.Buffering
bufferingRaw := getStringValue(service.Annotations, annotationKubernetesBuffering, "")
@ -506,7 +506,7 @@ func getBuffering(service *v1.Service) *types.Buffering {
return buffering
}
func getLoadBalancer(service *v1.Service) *types.LoadBalancer {
func getLoadBalancer(service *corev1.Service) *types.LoadBalancer {
loadBalancer := &types.LoadBalancer{
Method: "wrr",
}
@ -527,7 +527,7 @@ func getLoadBalancer(service *v1.Service) *types.LoadBalancer {
return loadBalancer
}
func getStickiness(service *v1.Service) *types.Stickiness {
func getStickiness(service *corev1.Service) *types.Stickiness {
if getBoolValue(service.Annotations, annotationKubernetesAffinity, false) {
stickiness := &types.Stickiness{}
if cookieName := getStringValue(service.Annotations, annotationKubernetesSessionCookieName, ""); len(cookieName) > 0 {
@ -538,7 +538,7 @@ func getStickiness(service *v1.Service) *types.Stickiness {
return nil
}
func getHeader(i *v1beta1.Ingress) *types.Headers {
func getHeader(i *extensionsv1beta1.Ingress) *types.Headers {
headers := &types.Headers{
CustomRequestHeaders: getMapValue(i.Annotations, annotationKubernetesCustomRequestHeaders),
CustomResponseHeaders: getMapValue(i.Annotations, annotationKubernetesCustomResponseHeaders),
@ -569,7 +569,7 @@ func getHeader(i *v1beta1.Ingress) *types.Headers {
return headers
}
func getMaxConn(service *v1.Service) *types.MaxConn {
func getMaxConn(service *corev1.Service) *types.MaxConn {
amount := getInt64Value(service.Annotations, annotationKubernetesMaxConnAmount, -1)
extractorFunc := getStringValue(service.Annotations, annotationKubernetesMaxConnExtractorFunc, "")
if amount >= 0 && len(extractorFunc) > 0 {
@ -581,7 +581,7 @@ func getMaxConn(service *v1.Service) *types.MaxConn {
return nil
}
func getCircuitBreaker(service *v1.Service) *types.CircuitBreaker {
func getCircuitBreaker(service *corev1.Service) *types.CircuitBreaker {
if expression := getStringValue(service.Annotations, annotationKubernetesCircuitBreakerExpression, ""); expression != "" {
return &types.CircuitBreaker{
Expression: expression,
@ -590,7 +590,7 @@ func getCircuitBreaker(service *v1.Service) *types.CircuitBreaker {
return nil
}
func getErrorPages(i *v1beta1.Ingress) map[string]*types.ErrorPage {
func getErrorPages(i *extensionsv1beta1.Ingress) map[string]*types.ErrorPage {
var errorPages map[string]*types.ErrorPage
pagesRaw := getStringValue(i.Annotations, annotationKubernetesErrorPages, "")
@ -606,7 +606,7 @@ func getErrorPages(i *v1beta1.Ingress) map[string]*types.ErrorPage {
return errorPages
}
func getRateLimit(i *v1beta1.Ingress) *types.RateLimit {
func getRateLimit(i *extensionsv1beta1.Ingress) *types.RateLimit {
var rateLimit *types.RateLimit
rateRaw := getStringValue(i.Annotations, annotationKubernetesRateLimit, "")

View file

@ -12,13 +12,14 @@ import (
"github.com/containous/traefik/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestLoadIngresses(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iRules(
@ -36,7 +37,7 @@ func TestLoadIngresses(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -76,7 +77,7 @@ func TestLoadIngresses(t *testing.T) {
),
}
endpoints := []*v1.Endpoints{
endpoints := []*corev1.Endpoints{
buildEndpoint(
eNamespace("testing"),
eName("service1"),
@ -215,8 +216,8 @@ func TestRuleType(t *testing.T) {
watchChan := make(chan interface{})
client := clientMock{
ingresses: []*v1beta1.Ingress{ingress},
services: []*v1.Service{service},
ingresses: []*extensionsv1beta1.Ingress{ingress},
services: []*corev1.Service{service},
watchChan: watchChan,
}
provider := Provider{DisablePassHostHeaders: true}
@ -236,7 +237,7 @@ func TestRuleType(t *testing.T) {
}
func TestGetPassHostHeader(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("awesome"),
iRules(iRule(
@ -248,7 +249,7 @@ func TestGetPassHostHeader(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sNamespace("awesome"), sName("service1"), sUID("1"),
sSpec(sPorts(sPort(801, "http"))),
@ -281,7 +282,7 @@ func TestGetPassHostHeader(t *testing.T) {
}
func TestGetPassTLSCert(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(iNamespace("awesome"),
iRules(iRule(
iHost("foo"),
@ -290,7 +291,7 @@ func TestGetPassTLSCert(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("awesome"),
@ -325,7 +326,7 @@ func TestGetPassTLSCert(t *testing.T) {
}
func TestOnlyReferencesServicesFromOwnNamespace(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(iNamespace("awesome"),
iRules(iRule(
iHost("foo"),
@ -334,7 +335,7 @@ func TestOnlyReferencesServicesFromOwnNamespace(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sNamespace("awesome"),
sName("service"),
@ -376,7 +377,7 @@ func TestOnlyReferencesServicesFromOwnNamespace(t *testing.T) {
}
func TestHostlessIngress(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(iNamespace("awesome"),
iRules(iRule(
iPaths(onePath(iPath("/bar"), iBackend("service1", intstr.FromInt(801))))),
@ -384,7 +385,7 @@ func TestHostlessIngress(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("awesome"),
@ -416,7 +417,7 @@ func TestHostlessIngress(t *testing.T) {
}
func TestServiceAnnotations(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(iNamespace("testing"),
iRules(
iRule(
@ -435,7 +436,7 @@ func TestServiceAnnotations(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -483,7 +484,7 @@ retryexpression: IsNetworkError() && Attempts() <= 2
),
}
endpoints := []*v1.Endpoints{
endpoints := []*corev1.Endpoints{
buildEndpoint(
eNamespace("testing"),
eName("service1"),
@ -603,7 +604,7 @@ retryexpression: IsNetworkError() && Attempts() <= 2
}
func TestIngressAnnotations(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesPreserveHost, "false"),
@ -767,7 +768,7 @@ rateset:
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -788,9 +789,9 @@ rateset:
),
}
secrets := []*v1.Secret{
secrets := []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "mySecret",
UID: "1",
Namespace: "testing",
@ -1000,7 +1001,7 @@ rateset:
}
func TestIngressClassAnnotation(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesIngressClass, traefikDefaultIngressClass),
@ -1038,7 +1039,7 @@ func TestIngressClassAnnotation(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1147,7 +1148,7 @@ func TestIngressClassAnnotation(t *testing.T) {
}
func TestPriorityHeaderValue(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesPriority, "1337"),
@ -1159,7 +1160,7 @@ func TestPriorityHeaderValue(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1172,7 +1173,7 @@ func TestPriorityHeaderValue(t *testing.T) {
),
}
var endpoints []*v1.Endpoints
var endpoints []*corev1.Endpoints
watchChan := make(chan interface{})
client := clientMock{
ingresses: ingresses,
@ -1207,7 +1208,7 @@ func TestPriorityHeaderValue(t *testing.T) {
}
func TestInvalidPassTLSCertValue(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesPassTLSCert, "herpderp"),
@ -1219,7 +1220,7 @@ func TestInvalidPassTLSCertValue(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1264,7 +1265,7 @@ func TestInvalidPassTLSCertValue(t *testing.T) {
}
func TestInvalidPassHostHeaderValue(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesPreserveHost, "herpderp"),
@ -1276,7 +1277,7 @@ func TestInvalidPassHostHeaderValue(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1321,7 +1322,7 @@ func TestInvalidPassHostHeaderValue(t *testing.T) {
}
func TestKubeAPIErrors(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iRules(
@ -1332,7 +1333,7 @@ func TestKubeAPIErrors(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1384,7 +1385,7 @@ func TestKubeAPIErrors(t *testing.T) {
}
func TestMissingResources(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iRules(
@ -1404,7 +1405,7 @@ func TestMissingResources(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("fully_working_service"),
sNamespace("testing"),
@ -1431,7 +1432,7 @@ func TestMissingResources(t *testing.T) {
),
}
endpoints := []*v1.Endpoints{
endpoints := []*corev1.Endpoints{
buildEndpoint(
eName("fully_working_service"),
eUID("1"),
@ -1500,7 +1501,7 @@ func TestMissingResources(t *testing.T) {
}
func TestBasicAuthInTemplate(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesAuthType, "basic"),
@ -1513,7 +1514,7 @@ func TestBasicAuthInTemplate(t *testing.T) {
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("service1"),
sNamespace("testing"),
@ -1526,8 +1527,8 @@ func TestBasicAuthInTemplate(t *testing.T) {
),
}
secrets := []*v1.Secret{{
ObjectMeta: v1.ObjectMeta{
secrets := []*corev1.Secret{{
ObjectMeta: metav1.ObjectMeta{
Name: "mySecret",
UID: "1",
Namespace: "testing",
@ -1537,7 +1538,7 @@ func TestBasicAuthInTemplate(t *testing.T) {
},
}}
var endpoints []*v1.Endpoints
var endpoints []*corev1.Endpoints
watchChan := make(chan interface{})
client := clientMock{
ingresses: ingresses,
@ -1560,7 +1561,7 @@ func TestBasicAuthInTemplate(t *testing.T) {
}
func TestTLSSecretLoad(t *testing.T) {
ingresses := []*v1beta1.Ingress{
ingresses := []*extensionsv1beta1.Ingress{
buildIngress(
iNamespace("testing"),
iAnnotation(annotationKubernetesFrontendEntryPoints, "ep1,ep2"),
@ -1589,7 +1590,7 @@ func TestTLSSecretLoad(t *testing.T) {
),
),
}
services := []*v1.Service{
services := []*corev1.Service{
buildService(
sName("example-com"),
sNamespace("testing"),
@ -1609,9 +1610,9 @@ func TestTLSSecretLoad(t *testing.T) {
sPorts(sPort(80, "http"))),
),
}
secrets := []*v1.Secret{
secrets := []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "myTlsSecret",
UID: "1",
Namespace: "testing",
@ -1622,7 +1623,7 @@ func TestTLSSecretLoad(t *testing.T) {
},
},
}
endpoints := []*v1.Endpoints{}
endpoints := []*corev1.Endpoints{}
watchChan := make(chan interface{})
client := clientMock{
ingresses: ingresses,
@ -1691,7 +1692,7 @@ func TestGetTLS(t *testing.T) {
tests := []struct {
desc string
ingress *v1beta1.Ingress
ingress *extensionsv1beta1.Ingress
client Client
result []*tls.Configuration
errResult string
@ -1714,9 +1715,9 @@ func TestGetTLS(t *testing.T) {
desc: "entry 'tls.crt' in secret missing",
ingress: testIngressWithoutHostname,
client: clientMock{
secrets: []*v1.Secret{
secrets: []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "testing",
},
@ -1732,9 +1733,9 @@ func TestGetTLS(t *testing.T) {
desc: "entry 'tls.key' in secret missing",
ingress: testIngressWithoutHostname,
client: clientMock{
secrets: []*v1.Secret{
secrets: []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "testing",
},
@ -1750,9 +1751,9 @@ func TestGetTLS(t *testing.T) {
desc: "secret doesn't provide any of the required fields",
ingress: testIngressWithoutHostname,
client: clientMock{
secrets: []*v1.Secret{
secrets: []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "testing",
},
@ -1777,9 +1778,9 @@ func TestGetTLS(t *testing.T) {
),
),
client: clientMock{
secrets: []*v1.Secret{
secrets: []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "testing",
},
@ -1814,9 +1815,9 @@ func TestGetTLS(t *testing.T) {
iTLSes(iTLS("test-secret")),
),
client: clientMock{
secrets: []*v1.Secret{
secrets: []*corev1.Secret{
{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "test-secret",
Namespace: "testing",
},

View file

@ -1,23 +0,0 @@
package semver
import (
"encoding/json"
)
// MarshalJSON implements the encoding/json.Marshaler interface.
func (v Version) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (v *Version) UnmarshalJSON(data []byte) (err error) {
var versionString string
if err = json.Unmarshal(data, &versionString); err != nil {
return
}
*v, err = Parse(versionString)
return
}

View file

@ -1,395 +0,0 @@
package semver
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
numbers string = "0123456789"
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
alphanum = alphas + numbers
)
// SpecVersion is the latest fully supported spec version of semver
var SpecVersion = Version{
Major: 2,
Minor: 0,
Patch: 0,
}
// Version represents a semver compatible version
type Version struct {
Major uint64
Minor uint64
Patch uint64
Pre []PRVersion
Build []string //No Precendence
}
// Version to string
func (v Version) String() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
if len(v.Pre) > 0 {
b = append(b, '-')
b = append(b, v.Pre[0].String()...)
for _, pre := range v.Pre[1:] {
b = append(b, '.')
b = append(b, pre.String()...)
}
}
if len(v.Build) > 0 {
b = append(b, '+')
b = append(b, v.Build[0]...)
for _, build := range v.Build[1:] {
b = append(b, '.')
b = append(b, build...)
}
}
return string(b)
}
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
}
// EQ checks if v is equal to o.
func (v Version) EQ(o Version) bool {
return (v.Compare(o) == 0)
}
// NE checks if v is not equal to o.
func (v Version) NE(o Version) bool {
return (v.Compare(o) != 0)
}
// GT checks if v is greater than o.
func (v Version) GT(o Version) bool {
return (v.Compare(o) == 1)
}
// GTE checks if v is greater than or equal to o.
func (v Version) GTE(o Version) bool {
return (v.Compare(o) >= 0)
}
// GE checks if v is greater than or equal to o.
func (v Version) GE(o Version) bool {
return (v.Compare(o) >= 0)
}
// LT checks if v is less than o.
func (v Version) LT(o Version) bool {
return (v.Compare(o) == -1)
}
// LTE checks if v is less than or equal to o.
func (v Version) LTE(o Version) bool {
return (v.Compare(o) <= 0)
}
// LE checks if v is less than or equal to o.
func (v Version) LE(o Version) bool {
return (v.Compare(o) <= 0)
}
// Compare compares Versions v to o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major > o.Major {
return 1
}
return -1
}
if v.Minor != o.Minor {
if v.Minor > o.Minor {
return 1
}
return -1
}
if v.Patch != o.Patch {
if v.Patch > o.Patch {
return 1
}
return -1
}
// Quick comparison if a version has no prerelease versions
if len(v.Pre) == 0 && len(o.Pre) == 0 {
return 0
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
return 1
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
return -1
}
i := 0
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
continue
} else if comp == 1 {
return 1
} else {
return -1
}
}
// If all pr versions are the equal but one has further prversion, this one greater
if i == len(v.Pre) && i == len(o.Pre) {
return 0
} else if i == len(v.Pre) && i < len(o.Pre) {
return -1
} else {
return 1
}
}
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
for _, pre := range v.Pre {
if !pre.IsNum { //Numeric prerelease versions already uint64
if len(pre.VersionStr) == 0 {
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
}
if !containsOnly(pre.VersionStr, alphanum) {
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
}
}
}
for _, build := range v.Build {
if len(build) == 0 {
return fmt.Errorf("Build meta data can not be empty %q", build)
}
if !containsOnly(build, alphanum) {
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
}
}
return nil
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) {
v, err := Parse(s)
vp = &v
return
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
func Make(s string) (Version, error) {
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
return Version{}, errors.New("Version string empty")
}
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) != 3 {
return Version{}, errors.New("No Major.Minor.Patch elements found")
}
// Major
if !containsOnly(parts[0], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
}
if hasLeadingZeroes(parts[0]) {
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
}
major, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return Version{}, err
}
// Minor
if !containsOnly(parts[1], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
}
if hasLeadingZeroes(parts[1]) {
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
}
minor, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return Version{}, err
}
v := Version{}
v.Major = major
v.Minor = minor
var build, prerelease []string
patchStr := parts[2]
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
build = strings.Split(patchStr[buildIndex+1:], ".")
patchStr = patchStr[:buildIndex]
}
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
prerelease = strings.Split(patchStr[preIndex+1:], ".")
patchStr = patchStr[:preIndex]
}
if !containsOnly(patchStr, numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
}
if hasLeadingZeroes(patchStr) {
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
}
patch, err := strconv.ParseUint(patchStr, 10, 64)
if err != nil {
return Version{}, err
}
v.Patch = patch
// Prerelease
for _, prstr := range prerelease {
parsedPR, err := NewPRVersion(prstr)
if err != nil {
return Version{}, err
}
v.Pre = append(v.Pre, parsedPR)
}
// Build meta data
for _, str := range build {
if len(str) == 0 {
return Version{}, errors.New("Build meta data is empty")
}
if !containsOnly(str, alphanum) {
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
}
v.Build = append(v.Build, str)
}
return v, nil
}
// MustParse is like Parse but panics if the version cannot be parsed.
func MustParse(s string) Version {
v, err := Parse(s)
if err != nil {
panic(`semver: Parse(` + s + `): ` + err.Error())
}
return v
}
// PRVersion represents a PreRelease Version
type PRVersion struct {
VersionStr string
VersionNum uint64
IsNum bool
}
// NewPRVersion creates a new valid prerelease version
func NewPRVersion(s string) (PRVersion, error) {
if len(s) == 0 {
return PRVersion{}, errors.New("Prerelease is empty")
}
v := PRVersion{}
if containsOnly(s, numbers) {
if hasLeadingZeroes(s) {
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
}
num, err := strconv.ParseUint(s, 10, 64)
// Might never be hit, but just in case
if err != nil {
return PRVersion{}, err
}
v.VersionNum = num
v.IsNum = true
} else if containsOnly(s, alphanum) {
v.VersionStr = s
v.IsNum = false
} else {
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
}
return v, nil
}
// IsNumeric checks if prerelease-version is numeric
func (v PRVersion) IsNumeric() bool {
return v.IsNum
}
// Compare compares two PreRelease Versions v and o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v PRVersion) Compare(o PRVersion) int {
if v.IsNum && !o.IsNum {
return -1
} else if !v.IsNum && o.IsNum {
return 1
} else if v.IsNum && o.IsNum {
if v.VersionNum == o.VersionNum {
return 0
} else if v.VersionNum > o.VersionNum {
return 1
} else {
return -1
}
} else { // both are Alphas
if v.VersionStr == o.VersionStr {
return 0
} else if v.VersionStr > o.VersionStr {
return 1
} else {
return -1
}
}
}
// PreRelease version to string
func (v PRVersion) String() string {
if v.IsNum {
return strconv.FormatUint(v.VersionNum, 10)
}
return v.VersionStr
}
func containsOnly(s string, set string) bool {
return strings.IndexFunc(s, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) == -1
}
func hasLeadingZeroes(s string) bool {
return len(s) > 1 && s[0] == '0'
}
// NewBuildVersion creates a new valid build version
func NewBuildVersion(s string) (string, error) {
if len(s) == 0 {
return "", errors.New("Buildversion is empty")
}
if !containsOnly(s, alphanum) {
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
}
return s, nil
}

View file

@ -1,28 +0,0 @@
package semver
import (
"sort"
)
// Versions represents multiple versions.
type Versions []Version
// Len returns length of version collection
func (s Versions) Len() int {
return len(s)
}
// Swap swaps two versions inside the collection by its indices
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less checks if version at index i is less than version at index j
func (s Versions) Less(i, j int) bool {
return s[i].LT(s[j])
}
// Sort sorts a slice of versions
func Sort(versions []Version) {
sort.Sort(Versions(versions))
}

View file

@ -1,30 +0,0 @@
package semver
import (
"database/sql/driver"
"fmt"
)
// Scan implements the database/sql.Scanner interface.
func (v *Version) Scan(src interface{}) (err error) {
var str string
switch src := src.(type) {
case string:
str = src
case []byte:
str = string(src)
default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
}
if t, err := Parse(str); err == nil {
*v = t
}
return
}
// Value implements the database/sql/driver.Valuer interface.
func (v Version) Value() (driver.Value, error) {
return v.String(), nil
}

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,7 +0,0 @@
package http
import "net/http"
type Client interface {
Do(*http.Request) (*http.Response, error)
}

View file

@ -1,156 +0,0 @@
package http
import (
"encoding/base64"
"encoding/json"
"errors"
"log"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
)
func WriteError(w http.ResponseWriter, code int, msg string) {
e := struct {
Error string `json:"error"`
}{
Error: msg,
}
b, err := json.Marshal(e)
if err != nil {
log.Printf("go-oidc: failed to marshal %#v: %v", e, err)
code = http.StatusInternalServerError
b = []byte(`{"error":"server_error"}`)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(b)
}
// BasicAuth parses a username and password from the request's
// Authorization header. This was pulled from golang master:
// https://codereview.appspot.com/76540043
func BasicAuth(r *http.Request) (username, password string, ok bool) {
auth := r.Header.Get("Authorization")
if auth == "" {
return
}
if !strings.HasPrefix(auth, "Basic ") {
return
}
c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
if err != nil {
return
}
cs := string(c)
s := strings.IndexByte(cs, ':')
if s < 0 {
return
}
return cs[:s], cs[s+1:], true
}
func cacheControlMaxAge(hdr string) (time.Duration, bool, error) {
for _, field := range strings.Split(hdr, ",") {
parts := strings.SplitN(strings.TrimSpace(field), "=", 2)
k := strings.ToLower(strings.TrimSpace(parts[0]))
if k != "max-age" {
continue
}
if len(parts) == 1 {
return 0, false, errors.New("max-age has no value")
}
v := strings.TrimSpace(parts[1])
if v == "" {
return 0, false, errors.New("max-age has empty value")
}
age, err := strconv.Atoi(v)
if err != nil {
return 0, false, err
}
if age <= 0 {
return 0, false, nil
}
return time.Duration(age) * time.Second, true, nil
}
return 0, false, nil
}
func expires(date, expires string) (time.Duration, bool, error) {
if date == "" || expires == "" {
return 0, false, nil
}
te, err := time.Parse(time.RFC1123, expires)
if err != nil {
return 0, false, err
}
td, err := time.Parse(time.RFC1123, date)
if err != nil {
return 0, false, err
}
ttl := te.Sub(td)
// headers indicate data already expired, caller should not
// have to care about this case
if ttl <= 0 {
return 0, false, nil
}
return ttl, true, nil
}
func Cacheable(hdr http.Header) (time.Duration, bool, error) {
ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control"))
if err != nil || ok {
return ttl, ok, err
}
return expires(hdr.Get("Date"), hdr.Get("Expires"))
}
// MergeQuery appends additional query values to an existing URL.
func MergeQuery(u url.URL, q url.Values) url.URL {
uv := u.Query()
for k, vs := range q {
for _, v := range vs {
uv.Add(k, v)
}
}
u.RawQuery = uv.Encode()
return u
}
// NewResourceLocation appends a resource id to the end of the requested URL path.
func NewResourceLocation(reqURL *url.URL, id string) string {
var u url.URL
u = *reqURL
u.Path = path.Join(u.Path, id)
u.RawQuery = ""
u.Fragment = ""
return u.String()
}
// CopyRequest returns a clone of the provided *http.Request.
// The returned object is a shallow copy of the struct and a
// deep copy of its Header field.
func CopyRequest(r *http.Request) *http.Request {
r2 := *r
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return &r2
}

View file

@ -1,29 +0,0 @@
package http
import (
"errors"
"net/url"
)
// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty
// since `url.Parse("")` does not return an error. Must contian a scheme and a host.
func ParseNonEmptyURL(u string) (*url.URL, error) {
if u == "" {
return nil, errors.New("url is empty")
}
ur, err := url.Parse(u)
if err != nil {
return nil, err
}
if ur.Scheme == "" {
return nil, errors.New("url scheme is empty")
}
if ur.Host == "" {
return nil, errors.New("url host is empty")
}
return ur, nil
}

View file

@ -1,126 +0,0 @@
package jose
import (
"encoding/json"
"fmt"
"math"
"time"
)
type Claims map[string]interface{}
func (c Claims) Add(name string, value interface{}) {
c[name] = value
}
func (c Claims) StringClaim(name string) (string, bool, error) {
cl, ok := c[name]
if !ok {
return "", false, nil
}
v, ok := cl.(string)
if !ok {
return "", false, fmt.Errorf("unable to parse claim as string: %v", name)
}
return v, true, nil
}
func (c Claims) StringsClaim(name string) ([]string, bool, error) {
cl, ok := c[name]
if !ok {
return nil, false, nil
}
if v, ok := cl.([]string); ok {
return v, true, nil
}
// When unmarshaled, []string will become []interface{}.
if v, ok := cl.([]interface{}); ok {
var ret []string
for _, vv := range v {
str, ok := vv.(string)
if !ok {
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
}
ret = append(ret, str)
}
return ret, true, nil
}
return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name)
}
func (c Claims) Int64Claim(name string) (int64, bool, error) {
cl, ok := c[name]
if !ok {
return 0, false, nil
}
v, ok := cl.(int64)
if !ok {
vf, ok := cl.(float64)
if !ok {
return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name)
}
v = int64(vf)
}
return v, true, nil
}
func (c Claims) Float64Claim(name string) (float64, bool, error) {
cl, ok := c[name]
if !ok {
return 0, false, nil
}
v, ok := cl.(float64)
if !ok {
vi, ok := cl.(int64)
if !ok {
return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
}
v = float64(vi)
}
return v, true, nil
}
func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
v, ok, err := c.Float64Claim(name)
if !ok || err != nil {
return time.Time{}, ok, err
}
s := math.Trunc(v)
ns := (v - s) * math.Pow(10, 9)
return time.Unix(int64(s), int64(ns)).UTC(), true, nil
}
func decodeClaims(payload []byte) (Claims, error) {
var c Claims
if err := json.Unmarshal(payload, &c); err != nil {
return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err)
}
return c, nil
}
func marshalClaims(c Claims) ([]byte, error) {
b, err := json.Marshal(c)
if err != nil {
return nil, err
}
return b, nil
}
func encodeClaims(c Claims) (string, error) {
b, err := marshalClaims(c)
if err != nil {
return "", err
}
return encodeSegment(b), nil
}

View file

@ -1,112 +0,0 @@
package jose
import (
"encoding/base64"
"encoding/json"
"fmt"
"strings"
)
const (
HeaderMediaType = "typ"
HeaderKeyAlgorithm = "alg"
HeaderKeyID = "kid"
)
const (
// Encryption Algorithm Header Parameter Values for JWS
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
AlgHS256 = "HS256"
AlgHS384 = "HS384"
AlgHS512 = "HS512"
AlgRS256 = "RS256"
AlgRS384 = "RS384"
AlgRS512 = "RS512"
AlgES256 = "ES256"
AlgES384 = "ES384"
AlgES512 = "ES512"
AlgPS256 = "PS256"
AlgPS384 = "PS384"
AlgPS512 = "PS512"
AlgNone = "none"
)
const (
// Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
AlgRSA15 = "RSA1_5"
AlgRSAOAEP = "RSA-OAEP"
AlgRSAOAEP256 = "RSA-OAEP-256"
AlgA128KW = "A128KW"
AlgA192KW = "A192KW"
AlgA256KW = "A256KW"
AlgDir = "dir"
AlgECDHES = "ECDH-ES"
AlgECDHESA128KW = "ECDH-ES+A128KW"
AlgECDHESA192KW = "ECDH-ES+A192KW"
AlgECDHESA256KW = "ECDH-ES+A256KW"
AlgA128GCMKW = "A128GCMKW"
AlgA192GCMKW = "A192GCMKW"
AlgA256GCMKW = "A256GCMKW"
AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
)
const (
// Encryption Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
EncA128CBCHS256 = "A128CBC-HS256"
EncA128CBCHS384 = "A128CBC-HS384"
EncA256CBCHS512 = "A256CBC-HS512"
EncA128GCM = "A128GCM"
EncA192GCM = "A192GCM"
EncA256GCM = "A256GCM"
)
type JOSEHeader map[string]string
func (j JOSEHeader) Validate() error {
if _, exists := j[HeaderKeyAlgorithm]; !exists {
return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm)
}
return nil
}
func decodeHeader(seg string) (JOSEHeader, error) {
b, err := decodeSegment(seg)
if err != nil {
return nil, err
}
var h JOSEHeader
err = json.Unmarshal(b, &h)
if err != nil {
return nil, err
}
return h, nil
}
func encodeHeader(h JOSEHeader) (string, error) {
b, err := json.Marshal(h)
if err != nil {
return "", err
}
return encodeSegment(b), nil
}
// Decode JWT specific base64url encoding with padding stripped
func decodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l != 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}
// Encode JWT specific base64url encoding with padding stripped
func encodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}

View file

@ -1,135 +0,0 @@
package jose
import (
"bytes"
"encoding/base64"
"encoding/binary"
"encoding/json"
"math/big"
"strings"
)
// JSON Web Key
// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5
type JWK struct {
ID string
Type string
Alg string
Use string
Exponent int
Modulus *big.Int
Secret []byte
}
type jwkJSON struct {
ID string `json:"kid"`
Type string `json:"kty"`
Alg string `json:"alg"`
Use string `json:"use"`
Exponent string `json:"e"`
Modulus string `json:"n"`
}
func (j *JWK) MarshalJSON() ([]byte, error) {
t := jwkJSON{
ID: j.ID,
Type: j.Type,
Alg: j.Alg,
Use: j.Use,
Exponent: encodeExponent(j.Exponent),
Modulus: encodeModulus(j.Modulus),
}
return json.Marshal(&t)
}
func (j *JWK) UnmarshalJSON(data []byte) error {
var t jwkJSON
err := json.Unmarshal(data, &t)
if err != nil {
return err
}
e, err := decodeExponent(t.Exponent)
if err != nil {
return err
}
n, err := decodeModulus(t.Modulus)
if err != nil {
return err
}
j.ID = t.ID
j.Type = t.Type
j.Alg = t.Alg
j.Use = t.Use
j.Exponent = e
j.Modulus = n
return nil
}
type JWKSet struct {
Keys []JWK `json:"keys"`
}
func decodeExponent(e string) (int, error) {
decE, err := decodeBase64URLPaddingOptional(e)
if err != nil {
return 0, err
}
var eBytes []byte
if len(decE) < 8 {
eBytes = make([]byte, 8-len(decE), 8)
eBytes = append(eBytes, decE...)
} else {
eBytes = decE
}
eReader := bytes.NewReader(eBytes)
var E uint64
err = binary.Read(eReader, binary.BigEndian, &E)
if err != nil {
return 0, err
}
return int(E), nil
}
func encodeExponent(e int) string {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(e))
var idx int
for ; idx < 8; idx++ {
if b[idx] != 0x0 {
break
}
}
return base64.URLEncoding.EncodeToString(b[idx:])
}
// Turns a URL encoded modulus of a key into a big int.
func decodeModulus(n string) (*big.Int, error) {
decN, err := decodeBase64URLPaddingOptional(n)
if err != nil {
return nil, err
}
N := big.NewInt(0)
N.SetBytes(decN)
return N, nil
}
func encodeModulus(n *big.Int) string {
return base64.URLEncoding.EncodeToString(n.Bytes())
}
// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.
// The stdlib version currently doesn't handle this.
// We can get rid of this is if this bug:
// https://github.com/golang/go/issues/4237
// ever closes.
func decodeBase64URLPaddingOptional(e string) ([]byte, error) {
if m := len(e) % 4; m != 0 {
e += strings.Repeat("=", 4-m)
}
return base64.URLEncoding.DecodeString(e)
}

View file

@ -1,51 +0,0 @@
package jose
import (
"fmt"
"strings"
)
type JWS struct {
RawHeader string
Header JOSEHeader
RawPayload string
Payload []byte
Signature []byte
}
// Given a raw encoded JWS token parses it and verifies the structure.
func ParseJWS(raw string) (JWS, error) {
parts := strings.Split(raw, ".")
if len(parts) != 3 {
return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts))
}
rawSig := parts[2]
jws := JWS{
RawHeader: parts[0],
RawPayload: parts[1],
}
header, err := decodeHeader(jws.RawHeader)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err)
}
if err = header.Validate(); err != nil {
return JWS{}, fmt.Errorf("malformed JWS, %s", err)
}
jws.Header = header
payload, err := decodeSegment(jws.RawPayload)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err)
}
jws.Payload = payload
sig, err := decodeSegment(rawSig)
if err != nil {
return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err)
}
jws.Signature = sig
return jws, nil
}

View file

@ -1,82 +0,0 @@
package jose
import "strings"
type JWT JWS
func ParseJWT(token string) (jwt JWT, err error) {
jws, err := ParseJWS(token)
if err != nil {
return
}
return JWT(jws), nil
}
func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) {
jwt = JWT{}
jwt.Header = header
jwt.Header[HeaderMediaType] = "JWT"
claimBytes, err := marshalClaims(claims)
if err != nil {
return
}
jwt.Payload = claimBytes
eh, err := encodeHeader(header)
if err != nil {
return
}
jwt.RawHeader = eh
ec, err := encodeClaims(claims)
if err != nil {
return
}
jwt.RawPayload = ec
return
}
func (j *JWT) KeyID() (string, bool) {
kID, ok := j.Header[HeaderKeyID]
return kID, ok
}
func (j *JWT) Claims() (Claims, error) {
return decodeClaims(j.Payload)
}
// Encoded data part of the token which may be signed.
func (j *JWT) Data() string {
return strings.Join([]string{j.RawHeader, j.RawPayload}, ".")
}
// Full encoded JWT token string in format: header.claims.signature
func (j *JWT) Encode() string {
d := j.Data()
s := encodeSegment(j.Signature)
return strings.Join([]string{d, s}, ".")
}
func NewSignedJWT(claims Claims, s Signer) (*JWT, error) {
header := JOSEHeader{
HeaderKeyAlgorithm: s.Alg(),
HeaderKeyID: s.ID(),
}
jwt, err := NewJWT(header, claims)
if err != nil {
return nil, err
}
sig, err := s.Sign([]byte(jwt.Data()))
if err != nil {
return nil, err
}
jwt.Signature = sig
return &jwt, nil
}

View file

@ -1,24 +0,0 @@
package jose
import (
"fmt"
)
type Verifier interface {
ID() string
Alg() string
Verify(sig []byte, data []byte) error
}
type Signer interface {
Verifier
Sign(data []byte) (sig []byte, err error)
}
func NewVerifier(jwk JWK) (Verifier, error) {
if jwk.Type != "RSA" {
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
}
return NewVerifierRSA(jwk)
}

View file

@ -1,67 +0,0 @@
package jose
import (
"bytes"
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"errors"
"fmt"
)
type VerifierHMAC struct {
KeyID string
Hash crypto.Hash
Secret []byte
}
type SignerHMAC struct {
VerifierHMAC
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierHMAC{
KeyID: jwk.ID,
Secret: jwk.Secret,
Hash: crypto.SHA256,
}
return &v, nil
}
func (v *VerifierHMAC) ID() string {
return v.KeyID
}
func (v *VerifierHMAC) Alg() string {
return "HS256"
}
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
h := hmac.New(v.Hash.New, v.Secret)
h.Write(data)
if !bytes.Equal(sig, h.Sum(nil)) {
return errors.New("invalid hmac signature")
}
return nil
}
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
return &SignerHMAC{
VerifierHMAC: VerifierHMAC{
KeyID: kid,
Secret: secret,
Hash: crypto.SHA256,
},
}
}
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
h := hmac.New(s.Hash.New, s.Secret)
h.Write(data)
return h.Sum(nil), nil
}

View file

@ -1,67 +0,0 @@
package jose
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"fmt"
)
type VerifierRSA struct {
KeyID string
Hash crypto.Hash
PublicKey rsa.PublicKey
}
type SignerRSA struct {
PrivateKey rsa.PrivateKey
VerifierRSA
}
func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
if jwk.Alg != "" && jwk.Alg != "RS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierRSA{
KeyID: jwk.ID,
PublicKey: rsa.PublicKey{
N: jwk.Modulus,
E: jwk.Exponent,
},
Hash: crypto.SHA256,
}
return &v, nil
}
func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA {
return &SignerRSA{
PrivateKey: key,
VerifierRSA: VerifierRSA{
KeyID: kid,
PublicKey: key.PublicKey,
Hash: crypto.SHA256,
},
}
}
func (v *VerifierRSA) ID() string {
return v.KeyID
}
func (v *VerifierRSA) Alg() string {
return "RS256"
}
func (v *VerifierRSA) Verify(sig []byte, data []byte) error {
h := v.Hash.New()
h.Write(data)
return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig)
}
func (s *SignerRSA) Sign(data []byte) ([]byte, error) {
h := s.Hash.New()
h.Write(data)
return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil))
}

View file

@ -1,153 +0,0 @@
package key
import (
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"io"
"time"
"github.com/coreos/go-oidc/jose"
)
func NewPublicKey(jwk jose.JWK) *PublicKey {
return &PublicKey{jwk: jwk}
}
type PublicKey struct {
jwk jose.JWK
}
func (k *PublicKey) MarshalJSON() ([]byte, error) {
return json.Marshal(&k.jwk)
}
func (k *PublicKey) UnmarshalJSON(data []byte) error {
var jwk jose.JWK
if err := json.Unmarshal(data, &jwk); err != nil {
return err
}
k.jwk = jwk
return nil
}
func (k *PublicKey) ID() string {
return k.jwk.ID
}
func (k *PublicKey) Verifier() (jose.Verifier, error) {
return jose.NewVerifierRSA(k.jwk)
}
type PrivateKey struct {
KeyID string
PrivateKey *rsa.PrivateKey
}
func (k *PrivateKey) ID() string {
return k.KeyID
}
func (k *PrivateKey) Signer() jose.Signer {
return jose.NewSignerRSA(k.ID(), *k.PrivateKey)
}
func (k *PrivateKey) JWK() jose.JWK {
return jose.JWK{
ID: k.KeyID,
Type: "RSA",
Alg: "RS256",
Use: "sig",
Exponent: k.PrivateKey.PublicKey.E,
Modulus: k.PrivateKey.PublicKey.N,
}
}
type KeySet interface {
ExpiresAt() time.Time
}
type PublicKeySet struct {
keys []PublicKey
index map[string]*PublicKey
expiresAt time.Time
}
func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet {
keys := make([]PublicKey, len(jwks))
index := make(map[string]*PublicKey)
for i, jwk := range jwks {
keys[i] = *NewPublicKey(jwk)
index[keys[i].ID()] = &keys[i]
}
return &PublicKeySet{
keys: keys,
index: index,
expiresAt: exp,
}
}
func (s *PublicKeySet) ExpiresAt() time.Time {
return s.expiresAt
}
func (s *PublicKeySet) Keys() []PublicKey {
return s.keys
}
func (s *PublicKeySet) Key(id string) *PublicKey {
return s.index[id]
}
type PrivateKeySet struct {
keys []*PrivateKey
ActiveKeyID string
expiresAt time.Time
}
func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet {
return &PrivateKeySet{
keys: keys,
ActiveKeyID: keys[0].ID(),
expiresAt: exp.UTC(),
}
}
func (s *PrivateKeySet) Keys() []*PrivateKey {
return s.keys
}
func (s *PrivateKeySet) ExpiresAt() time.Time {
return s.expiresAt
}
func (s *PrivateKeySet) Active() *PrivateKey {
for i, k := range s.keys {
if k.ID() == s.ActiveKeyID {
return s.keys[i]
}
}
return nil
}
type GeneratePrivateKeyFunc func() (*PrivateKey, error)
func GeneratePrivateKey() (*PrivateKey, error) {
pk, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, err
}
keyID := make([]byte, 20)
if _, err := io.ReadFull(rand.Reader, keyID); err != nil {
return nil, err
}
k := PrivateKey{
KeyID: hex.EncodeToString(keyID),
PrivateKey: pk,
}
return &k, nil
}

View file

@ -1,99 +0,0 @@
package key
import (
"errors"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/pkg/health"
)
type PrivateKeyManager interface {
ExpiresAt() time.Time
Signer() (jose.Signer, error)
JWKs() ([]jose.JWK, error)
PublicKeys() ([]PublicKey, error)
WritableKeySetRepo
health.Checkable
}
func NewPrivateKeyManager() PrivateKeyManager {
return &privateKeyManager{
clock: clockwork.NewRealClock(),
}
}
type privateKeyManager struct {
keySet *PrivateKeySet
clock clockwork.Clock
}
func (m *privateKeyManager) ExpiresAt() time.Time {
if m.keySet == nil {
return m.clock.Now().UTC()
}
return m.keySet.ExpiresAt()
}
func (m *privateKeyManager) Signer() (jose.Signer, error) {
if err := m.Healthy(); err != nil {
return nil, err
}
return m.keySet.Active().Signer(), nil
}
func (m *privateKeyManager) JWKs() ([]jose.JWK, error) {
if err := m.Healthy(); err != nil {
return nil, err
}
keys := m.keySet.Keys()
jwks := make([]jose.JWK, len(keys))
for i, k := range keys {
jwks[i] = k.JWK()
}
return jwks, nil
}
func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) {
jwks, err := m.JWKs()
if err != nil {
return nil, err
}
keys := make([]PublicKey, len(jwks))
for i, jwk := range jwks {
keys[i] = *NewPublicKey(jwk)
}
return keys, nil
}
func (m *privateKeyManager) Healthy() error {
if m.keySet == nil {
return errors.New("private key manager uninitialized")
}
if len(m.keySet.Keys()) == 0 {
return errors.New("private key manager zero keys")
}
if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) {
return errors.New("private key manager keys expired")
}
return nil
}
func (m *privateKeyManager) Set(keySet KeySet) error {
privKeySet, ok := keySet.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
}
m.keySet = privKeySet
return nil
}

View file

@ -1,55 +0,0 @@
package key
import (
"errors"
"sync"
)
var ErrorNoKeys = errors.New("no keys found")
type WritableKeySetRepo interface {
Set(KeySet) error
}
type ReadableKeySetRepo interface {
Get() (KeySet, error)
}
type PrivateKeySetRepo interface {
WritableKeySetRepo
ReadableKeySetRepo
}
func NewPrivateKeySetRepo() PrivateKeySetRepo {
return &memPrivateKeySetRepo{}
}
type memPrivateKeySetRepo struct {
mu sync.RWMutex
pks PrivateKeySet
}
func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
pks, ok := ks.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
} else if pks == nil {
return errors.New("nil KeySet")
}
r.mu.Lock()
defer r.mu.Unlock()
r.pks = *pks
return nil
}
func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.pks.keys == nil {
return nil, ErrorNoKeys
}
return KeySet(&r.pks), nil
}

View file

@ -1,159 +0,0 @@
package key
import (
"errors"
"log"
"time"
ptime "github.com/coreos/pkg/timeutil"
"github.com/jonboulle/clockwork"
)
var (
ErrorPrivateKeysExpired = errors.New("private keys have expired")
)
func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator {
return &PrivateKeyRotator{
repo: repo,
ttl: ttl,
keep: 2,
generateKey: GeneratePrivateKey,
clock: clockwork.NewRealClock(),
}
}
type PrivateKeyRotator struct {
repo PrivateKeySetRepo
generateKey GeneratePrivateKeyFunc
clock clockwork.Clock
keep int
ttl time.Duration
}
func (r *PrivateKeyRotator) expiresAt() time.Time {
return r.clock.Now().UTC().Add(r.ttl)
}
func (r *PrivateKeyRotator) Healthy() error {
pks, err := r.privateKeySet()
if err != nil {
return err
}
if r.clock.Now().After(pks.ExpiresAt()) {
return ErrorPrivateKeysExpired
}
return nil
}
func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) {
ks, err := r.repo.Get()
if err != nil {
return nil, err
}
pks, ok := ks.(*PrivateKeySet)
if !ok {
return nil, errors.New("unable to cast to PrivateKeySet")
}
return pks, nil
}
func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) {
pks, err := r.privateKeySet()
if err == ErrorNoKeys {
return 0, nil
}
if err != nil {
return 0, err
}
now := r.clock.Now()
// Ideally, we want to rotate after half the TTL has elapsed.
idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2)
// If we are past the ideal rotation time, rotate immediatly.
return max(0, idealRotationTime.Sub(now)), nil
}
func max(a, b time.Duration) time.Duration {
if a > b {
return a
}
return b
}
func (r *PrivateKeyRotator) Run() chan struct{} {
attempt := func() {
k, err := r.generateKey()
if err != nil {
log.Printf("go-oidc: failed generating signing key: %v", err)
return
}
exp := r.expiresAt()
if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil {
log.Printf("go-oidc: key rotation failed: %v", err)
return
}
}
stop := make(chan struct{})
go func() {
for {
var nextRotation time.Duration
var sleep time.Duration
var err error
for {
if nextRotation, err = r.nextRotation(); err == nil {
break
}
sleep = ptime.ExpBackoff(sleep, time.Minute)
log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err)
time.Sleep(sleep)
}
select {
case <-r.clock.After(nextRotation):
attempt()
case <-stop:
return
}
}
}()
return stop
}
func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error {
ks, err := repo.Get()
if err != nil && err != ErrorNoKeys {
return err
}
var keys []*PrivateKey
if ks != nil {
pks, ok := ks.(*PrivateKeySet)
if !ok {
return errors.New("unable to cast to PrivateKeySet")
}
keys = pks.Keys()
}
keys = append([]*PrivateKey{k}, keys...)
if l := len(keys); l > keep {
keys = keys[0:keep]
}
nks := PrivateKeySet{
keys: keys,
ActiveKeyID: k.ID(),
expiresAt: exp,
}
return repo.Set(KeySet(&nks))
}

View file

@ -1,91 +0,0 @@
package key
import (
"errors"
"log"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/pkg/timeutil"
)
func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer {
return &KeySetSyncer{
readable: r,
writable: w,
clock: clockwork.NewRealClock(),
}
}
type KeySetSyncer struct {
readable ReadableKeySetRepo
writable WritableKeySetRepo
clock clockwork.Clock
}
func (s *KeySetSyncer) Run() chan struct{} {
stop := make(chan struct{})
go func() {
var failing bool
var next time.Duration
for {
exp, err := syncKeySet(s.readable, s.writable, s.clock)
if err != nil || exp == 0 {
if !failing {
failing = true
next = time.Second
} else {
next = timeutil.ExpBackoff(next, time.Minute)
}
if exp == 0 {
log.Printf("Synced to already expired key set, retrying in %v: %v", next, err)
} else {
log.Printf("Failed syncing key set, retrying in %v: %v", next, err)
}
} else {
failing = false
next = exp / 2
}
select {
case <-s.clock.After(next):
continue
case <-stop:
return
}
}
}()
return stop
}
func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
return syncKeySet(r, w, clockwork.NewRealClock())
}
// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
// If keyset has already expired, returns a zero duration.
func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
var ks KeySet
ks, err = r.Get()
if err != nil {
return
}
if ks == nil {
err = errors.New("no source KeySet")
return
}
if err = w.Set(ks); err != nil {
return
}
now := clock.Now()
if ks.ExpiresAt().After(now) {
exp = ks.ExpiresAt().Sub(now)
}
return
}

View file

@ -1,29 +0,0 @@
package oauth2
const (
ErrorAccessDenied = "access_denied"
ErrorInvalidClient = "invalid_client"
ErrorInvalidGrant = "invalid_grant"
ErrorInvalidRequest = "invalid_request"
ErrorServerError = "server_error"
ErrorUnauthorizedClient = "unauthorized_client"
ErrorUnsupportedGrantType = "unsupported_grant_type"
ErrorUnsupportedResponseType = "unsupported_response_type"
)
type Error struct {
Type string `json:"error"`
Description string `json:"error_description,omitempty"`
State string `json:"state,omitempty"`
}
func (e *Error) Error() string {
if e.Description != "" {
return e.Type + ": " + e.Description
}
return e.Type
}
func NewError(typ string) *Error {
return &Error{Type: typ}
}

View file

@ -1,416 +0,0 @@
package oauth2
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"mime"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
phttp "github.com/coreos/go-oidc/http"
)
// ResponseTypesEqual compares two response_type values. If either
// contains a space, it is treated as an unordered list. For example,
// comparing "code id_token" and "id_token code" would evaluate to true.
func ResponseTypesEqual(r1, r2 string) bool {
if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
// fast route, no split needed
return r1 == r2
}
// split, sort, and compare
r1Fields := strings.Fields(r1)
r2Fields := strings.Fields(r2)
if len(r1Fields) != len(r2Fields) {
return false
}
sort.Strings(r1Fields)
sort.Strings(r2Fields)
for i, r1Field := range r1Fields {
if r1Field != r2Fields[i] {
return false
}
}
return true
}
const (
// OAuth2.0 response types registered by OIDC.
//
// See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
ResponseTypeCode = "code"
ResponseTypeCodeIDToken = "code id_token"
ResponseTypeCodeIDTokenToken = "code id_token token"
ResponseTypeIDToken = "id_token"
ResponseTypeIDTokenToken = "id_token token"
ResponseTypeToken = "token"
ResponseTypeNone = "none"
)
const (
GrantTypeAuthCode = "authorization_code"
GrantTypeClientCreds = "client_credentials"
GrantTypeUserCreds = "password"
GrantTypeImplicit = "implicit"
GrantTypeRefreshToken = "refresh_token"
AuthMethodClientSecretPost = "client_secret_post"
AuthMethodClientSecretBasic = "client_secret_basic"
AuthMethodClientSecretJWT = "client_secret_jwt"
AuthMethodPrivateKeyJWT = "private_key_jwt"
)
type Config struct {
Credentials ClientCredentials
Scope []string
RedirectURL string
AuthURL string
TokenURL string
// Must be one of the AuthMethodXXX methods above. Right now, only
// AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported.
AuthMethod string
}
type Client struct {
hc phttp.Client
creds ClientCredentials
scope []string
authURL *url.URL
redirectURL *url.URL
tokenURL *url.URL
authMethod string
}
type ClientCredentials struct {
ID string
Secret string
}
func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
if len(cfg.Credentials.ID) == 0 {
err = errors.New("missing client id")
return
}
if len(cfg.Credentials.Secret) == 0 {
err = errors.New("missing client secret")
return
}
if cfg.AuthMethod == "" {
cfg.AuthMethod = AuthMethodClientSecretBasic
} else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic {
err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod)
return
}
au, err := phttp.ParseNonEmptyURL(cfg.AuthURL)
if err != nil {
return
}
tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL)
if err != nil {
return
}
// Allow empty redirect URL in the case where the client
// only needs to verify a given token.
ru, err := url.Parse(cfg.RedirectURL)
if err != nil {
return
}
c = &Client{
creds: cfg.Credentials,
scope: cfg.Scope,
redirectURL: ru,
authURL: au,
tokenURL: tu,
hc: hc,
authMethod: cfg.AuthMethod,
}
return
}
// Return the embedded HTTP client
func (c *Client) HttpClient() phttp.Client {
return c.hc
}
// Generate the url for initial redirect to oauth provider.
func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
v := c.commonURLValues()
v.Set("state", state)
if strings.ToLower(accessType) == "offline" {
v.Set("access_type", "offline")
}
if prompt != "" {
v.Set("prompt", prompt)
}
v.Set("response_type", "code")
q := v.Encode()
u := *c.authURL
if u.RawQuery == "" {
u.RawQuery = q
} else {
u.RawQuery += "&" + q
}
return u.String()
}
func (c *Client) commonURLValues() url.Values {
return url.Values{
"redirect_uri": {c.redirectURL.String()},
"scope": {strings.Join(c.scope, " ")},
"client_id": {c.creds.ID},
}
}
func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
var req *http.Request
var err error
switch c.authMethod {
case AuthMethodClientSecretPost:
values.Set("client_secret", c.creds.Secret)
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
case AuthMethodClientSecretBasic:
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
encodedID := url.QueryEscape(c.creds.ID)
encodedSecret := url.QueryEscape(c.creds.Secret)
req.SetBasicAuth(encodedID, encodedSecret)
default:
panic("misconfigured client: auth method not supported")
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req, nil
}
// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type.
// May not be supported by all OAuth2 servers.
func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) {
v := url.Values{
"scope": {strings.Join(scope, " ")},
"grant_type": {GrantTypeClientCreds},
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
// May not be supported by all OAuth2 servers.
func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
v := url.Values{
"scope": {strings.Join(c.scope, " ")},
"grant_type": {GrantTypeUserCreds},
"username": {username},
"password": {password},
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
// RequestToken requests a token from the Token Endpoint with the specified grantType.
// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.
func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) {
v := c.commonURLValues()
v.Set("grant_type", grantType)
v.Set("client_secret", c.creds.Secret)
switch grantType {
case GrantTypeAuthCode:
v.Set("code", value)
case GrantTypeRefreshToken:
v.Set("refresh_token", value)
default:
err = fmt.Errorf("unsupported grant_type: %v", grantType)
return
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299
contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
return
}
result = TokenResponse{
RawBody: body,
}
newError := func(typ, desc, state string) error {
if typ == "" {
return fmt.Errorf("unrecognized error %s", body)
}
return &Error{typ, desc, state}
}
if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" {
var vals url.Values
vals, err = url.ParseQuery(string(body))
if err != nil {
return
}
if error := vals.Get("error"); error != "" || badStatusCode {
err = newError(error, vals.Get("error_description"), vals.Get("state"))
return
}
e := vals.Get("expires_in")
if e == "" {
e = vals.Get("expires")
}
if e != "" {
result.Expires, err = strconv.Atoi(e)
if err != nil {
return
}
}
result.AccessToken = vals.Get("access_token")
result.TokenType = vals.Get("token_type")
result.IDToken = vals.Get("id_token")
result.RefreshToken = vals.Get("refresh_token")
result.Scope = vals.Get("scope")
} else {
var r struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
RefreshToken string `json:"refresh_token"`
Scope string `json:"scope"`
State string `json:"state"`
ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string
Expires int `json:"expires"`
Error string `json:"error"`
Desc string `json:"error_description"`
}
if err = json.Unmarshal(body, &r); err != nil {
return
}
if r.Error != "" || badStatusCode {
err = newError(r.Error, r.Desc, r.State)
return
}
result.AccessToken = r.AccessToken
result.TokenType = r.TokenType
result.IDToken = r.IDToken
result.RefreshToken = r.RefreshToken
result.Scope = r.Scope
if expiresIn, err := r.ExpiresIn.Int64(); err != nil {
result.Expires = r.Expires
} else {
result.Expires = int(expiresIn)
}
}
return
}
type TokenResponse struct {
AccessToken string
TokenType string
Expires int
IDToken string
RefreshToken string // OPTIONAL.
Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED.
RawBody []byte // In case callers need some other non-standard info from the token response
}
type AuthCodeRequest struct {
ResponseType string
ClientID string
RedirectURL *url.URL
Scope []string
State string
}
func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) {
acr := AuthCodeRequest{
ResponseType: q.Get("response_type"),
ClientID: q.Get("client_id"),
State: q.Get("state"),
Scope: make([]string, 0),
}
qs := strings.TrimSpace(q.Get("scope"))
if qs != "" {
acr.Scope = strings.Split(qs, " ")
}
err := func() error {
if acr.ClientID == "" {
return NewError(ErrorInvalidRequest)
}
redirectURL := q.Get("redirect_uri")
if redirectURL != "" {
ru, err := url.Parse(redirectURL)
if err != nil {
return NewError(ErrorInvalidRequest)
}
acr.RedirectURL = ru
}
return nil
}()
return acr, err
}

View file

@ -1,846 +0,0 @@
package oidc
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/mail"
"net/url"
"sync"
"time"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
"github.com/coreos/go-oidc/oauth2"
)
const (
// amount of time that must pass after the last key sync
// completes before another attempt may begin
keySyncWindow = 5 * time.Second
)
var (
DefaultScope = []string{"openid", "email", "profile"}
supportedAuthMethods = map[string]struct{}{
oauth2.AuthMethodClientSecretBasic: struct{}{},
oauth2.AuthMethodClientSecretPost: struct{}{},
}
)
type ClientCredentials oauth2.ClientCredentials
type ClientIdentity struct {
Credentials ClientCredentials
Metadata ClientMetadata
}
type JWAOptions struct {
// SigningAlg specifies an JWA alg for signing JWTs.
//
// Specifying this field implies different actions depending on the context. It may
// require objects be serialized and signed as a JWT instead of plain JSON, or
// require an existing JWT object use the specified alg.
//
// See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
SigningAlg string
// EncryptionAlg, if provided, specifies that the returned or sent object be stored
// (or nested) within a JWT object and encrypted with the provided JWA alg.
EncryptionAlg string
// EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
// EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
// to A128CBC-HS256.
//
// If EncryptionEnc is provided EncryptionAlg must also be specified.
EncryptionEnc string
}
func (opt JWAOptions) valid() error {
if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
return errors.New("encryption encoding provided with no encryption algorithm")
}
return nil
}
func (opt JWAOptions) defaults() JWAOptions {
if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
opt.EncryptionEnc = jose.EncA128CBCHS256
}
return opt
}
var (
// Ensure ClientMetadata satisfies these interfaces.
_ json.Marshaler = &ClientMetadata{}
_ json.Unmarshaler = &ClientMetadata{}
)
// ClientMetadata holds metadata that the authorization server associates
// with a client identifier. The fields range from human-facing display
// strings such as client name, to items that impact the security of the
// protocol, such as the list of valid redirect URIs.
//
// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
//
// TODO: support language specific claim representations
// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
type ClientMetadata struct {
RedirectURIs []url.URL // Required
// A list of OAuth 2.0 "response_type" values that the client wishes to restrict
// itself to. Either "code", "token", or another registered extension.
//
// If omitted, only "code" will be used.
ResponseTypes []string
// A list of OAuth 2.0 grant types the client wishes to restrict itself to.
// The grant type values used by OIDC are "authorization_code", "implicit",
// and "refresh_token".
//
// If ommitted, only "authorization_code" will be used.
GrantTypes []string
// "native" or "web". If omitted, "web".
ApplicationType string
// List of email addresses.
Contacts []mail.Address
// Name of client to be presented to the end-user.
ClientName string
// URL that references a logo for the Client application.
LogoURI *url.URL
// URL of the home page of the Client.
ClientURI *url.URL
// Profile data policies and terms of use to be provided to the end user.
PolicyURI *url.URL
TermsOfServiceURI *url.URL
// URL to or the value of the client's JSON Web Key Set document.
JWKSURI *url.URL
JWKS *jose.JWKSet
// URL referencing a flie with a single JSON array of redirect URIs.
SectorIdentifierURI *url.URL
SubjectType string
// Options to restrict the JWS alg and enc values used for server responses and requests.
IDTokenResponseOptions JWAOptions
UserInfoResponseOptions JWAOptions
RequestObjectOptions JWAOptions
// Client requested authorization method and signing options for the token endpoint.
//
// Defaults to "client_secret_basic"
TokenEndpointAuthMethod string
TokenEndpointAuthSigningAlg string
// DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
// user must reauthroize.
//
// If 0, no limitation is placed on the maximum.
DefaultMaxAge int64
// RequireAuthTime specifies if the auth_time claim in the ID token is required.
RequireAuthTime bool
// Default Authentication Context Class Reference values for authentication requests.
DefaultACRValues []string
// URI that a third party can use to initiate a login by the relaying party.
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
InitiateLoginURI *url.URL
// Pre-registered request_uri values that may be cached by the server.
RequestURIs []url.URL
}
// Defaults returns a shallow copy of ClientMetadata with default
// values replacing omitted fields.
func (m ClientMetadata) Defaults() ClientMetadata {
if len(m.ResponseTypes) == 0 {
m.ResponseTypes = []string{oauth2.ResponseTypeCode}
}
if len(m.GrantTypes) == 0 {
m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
}
if m.ApplicationType == "" {
m.ApplicationType = "web"
}
if m.TokenEndpointAuthMethod == "" {
m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
}
m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
m.RequestObjectOptions = m.RequestObjectOptions.defaults()
return m
}
func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
e := m.toEncodableStruct()
return json.Marshal(&e)
}
func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
var e encodableClientMetadata
if err := json.Unmarshal(data, &e); err != nil {
return err
}
meta, err := e.toStruct()
if err != nil {
return err
}
if err := meta.Valid(); err != nil {
return err
}
*m = meta
return nil
}
type encodableClientMetadata struct {
RedirectURIs []string `json:"redirect_uris"` // Required
ResponseTypes []string `json:"response_types,omitempty"`
GrantTypes []string `json:"grant_types,omitempty"`
ApplicationType string `json:"application_type,omitempty"`
Contacts []string `json:"contacts,omitempty"`
ClientName string `json:"client_name,omitempty"`
LogoURI string `json:"logo_uri,omitempty"`
ClientURI string `json:"client_uri,omitempty"`
PolicyURI string `json:"policy_uri,omitempty"`
TermsOfServiceURI string `json:"tos_uri,omitempty"`
JWKSURI string `json:"jwks_uri,omitempty"`
JWKS *jose.JWKSet `json:"jwks,omitempty"`
SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
SubjectType string `json:"subject_type,omitempty"`
IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
DefaultMaxAge int64 `json:"default_max_age,omitempty"`
RequireAuthTime bool `json:"require_auth_time,omitempty"`
DefaultACRValues []string `json:"default_acr_values,omitempty"`
InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
RequestURIs []string `json:"request_uris,omitempty"`
}
func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
p := stickyErrParser{}
m := ClientMetadata{
RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
ResponseTypes: c.ResponseTypes,
GrantTypes: c.GrantTypes,
ApplicationType: c.ApplicationType,
Contacts: p.parseEmails(c.Contacts, "contacts"),
ClientName: c.ClientName,
LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
ClientURI: p.parseURI(c.ClientURI, "client_uri"),
PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
JWKS: c.JWKS,
SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
SubjectType: c.SubjectType,
TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
DefaultMaxAge: c.DefaultMaxAge,
RequireAuthTime: c.RequireAuthTime,
DefaultACRValues: c.DefaultACRValues,
InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
IDTokenResponseOptions: JWAOptions{
c.IDTokenSignedResponseAlg,
c.IDTokenEncryptedResponseAlg,
c.IDTokenEncryptedResponseEnc,
},
UserInfoResponseOptions: JWAOptions{
c.UserInfoSignedResponseAlg,
c.UserInfoEncryptedResponseAlg,
c.UserInfoEncryptedResponseEnc,
},
RequestObjectOptions: JWAOptions{
c.RequestObjectSigningAlg,
c.RequestObjectEncryptionAlg,
c.RequestObjectEncryptionEnc,
},
}
if p.firstErr != nil {
return ClientMetadata{}, p.firstErr
}
return m, nil
}
// stickyErrParser parses URIs and email addresses. Once it encounters
// a parse error, subsequent calls become no-op.
type stickyErrParser struct {
firstErr error
}
func (p *stickyErrParser) parseURI(s, field string) *url.URL {
if p.firstErr != nil || s == "" {
return nil
}
u, err := url.Parse(s)
if err == nil {
if u.Host == "" {
err = errors.New("no host in URI")
} else if u.Scheme != "http" && u.Scheme != "https" {
err = errors.New("invalid URI scheme")
}
}
if err != nil {
p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
return nil
}
return u
}
func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
if p.firstErr != nil || len(s) == 0 {
return nil
}
uris := make([]url.URL, len(s))
for i, val := range s {
if val == "" {
p.firstErr = fmt.Errorf("invalid URI in field %s", field)
return nil
}
if u := p.parseURI(val, field); u != nil {
uris[i] = *u
}
}
return uris
}
func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
if p.firstErr != nil || len(s) == 0 {
return nil
}
addrs := make([]mail.Address, len(s))
for i, addr := range s {
if addr == "" {
p.firstErr = fmt.Errorf("invalid email in field %s", field)
return nil
}
a, err := mail.ParseAddress(addr)
if err != nil {
p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
return nil
}
addrs[i] = *a
}
return addrs
}
func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
return encodableClientMetadata{
RedirectURIs: urisToStrings(m.RedirectURIs),
ResponseTypes: m.ResponseTypes,
GrantTypes: m.GrantTypes,
ApplicationType: m.ApplicationType,
Contacts: emailsToStrings(m.Contacts),
ClientName: m.ClientName,
LogoURI: uriToString(m.LogoURI),
ClientURI: uriToString(m.ClientURI),
PolicyURI: uriToString(m.PolicyURI),
TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
JWKSURI: uriToString(m.JWKSURI),
JWKS: m.JWKS,
SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
SubjectType: m.SubjectType,
IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
DefaultMaxAge: m.DefaultMaxAge,
RequireAuthTime: m.RequireAuthTime,
DefaultACRValues: m.DefaultACRValues,
InitiateLoginURI: uriToString(m.InitiateLoginURI),
RequestURIs: urisToStrings(m.RequestURIs),
}
}
func uriToString(u *url.URL) string {
if u == nil {
return ""
}
return u.String()
}
func urisToStrings(urls []url.URL) []string {
if len(urls) == 0 {
return nil
}
sli := make([]string, len(urls))
for i, u := range urls {
sli[i] = u.String()
}
return sli
}
func emailsToStrings(addrs []mail.Address) []string {
if len(addrs) == 0 {
return nil
}
sli := make([]string, len(addrs))
for i, addr := range addrs {
sli[i] = addr.String()
}
return sli
}
// Valid determines if a ClientMetadata conforms with the OIDC specification.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (m *ClientMetadata) Valid() error {
if len(m.RedirectURIs) == 0 {
return errors.New("zero redirect URLs")
}
validURI := func(u *url.URL, fieldName string) error {
if u.Host == "" {
return fmt.Errorf("no host for uri field %s", fieldName)
}
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
}
return nil
}
uris := []struct {
val *url.URL
name string
}{
{m.LogoURI, "logo_uri"},
{m.ClientURI, "client_uri"},
{m.PolicyURI, "policy_uri"},
{m.TermsOfServiceURI, "tos_uri"},
{m.JWKSURI, "jwks_uri"},
{m.SectorIdentifierURI, "sector_identifier_uri"},
{m.InitiateLoginURI, "initiate_login_uri"},
}
for _, uri := range uris {
if uri.val == nil {
continue
}
if err := validURI(uri.val, uri.name); err != nil {
return err
}
}
uriLists := []struct {
vals []url.URL
name string
}{
{m.RedirectURIs, "redirect_uris"},
{m.RequestURIs, "request_uris"},
}
for _, list := range uriLists {
for _, uri := range list.vals {
if err := validURI(&uri, list.name); err != nil {
return err
}
}
}
options := []struct {
option JWAOptions
name string
}{
{m.IDTokenResponseOptions, "id_token response"},
{m.UserInfoResponseOptions, "userinfo response"},
{m.RequestObjectOptions, "request_object"},
}
for _, option := range options {
if err := option.option.valid(); err != nil {
return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
}
}
return nil
}
type ClientRegistrationResponse struct {
ClientID string // Required
ClientSecret string
RegistrationAccessToken string
RegistrationClientURI string
// If IsZero is true, unspecified.
ClientIDIssuedAt time.Time
// Time at which the client_secret will expire.
// If IsZero is true, it will not expire.
ClientSecretExpiresAt time.Time
ClientMetadata
}
type encodableClientRegistrationResponse struct {
ClientID string `json:"client_id"` // Required
ClientSecret string `json:"client_secret,omitempty"`
RegistrationAccessToken string `json:"registration_access_token,omitempty"`
RegistrationClientURI string `json:"registration_client_uri,omitempty"`
ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
// Time at which the client_secret will expire, in seconds since the epoch.
// If 0 it will not expire.
ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
encodableClientMetadata
}
func unixToSec(t time.Time) int64 {
if t.IsZero() {
return 0
}
return t.Unix()
}
func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
e := encodableClientRegistrationResponse{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RegistrationAccessToken: c.RegistrationAccessToken,
RegistrationClientURI: c.RegistrationClientURI,
ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
}
return json.Marshal(&e)
}
func secToUnix(sec int64) time.Time {
if sec == 0 {
return time.Time{}
}
return time.Unix(sec, 0)
}
func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
var e encodableClientRegistrationResponse
if err := json.Unmarshal(data, &e); err != nil {
return err
}
if e.ClientID == "" {
return errors.New("no client_id in client registration response")
}
metadata, err := e.encodableClientMetadata.toStruct()
if err != nil {
return err
}
*c = ClientRegistrationResponse{
ClientID: e.ClientID,
ClientSecret: e.ClientSecret,
RegistrationAccessToken: e.RegistrationAccessToken,
RegistrationClientURI: e.RegistrationClientURI,
ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
ClientMetadata: metadata,
}
return nil
}
type ClientConfig struct {
HTTPClient phttp.Client
Credentials ClientCredentials
Scope []string
RedirectURL string
ProviderConfig ProviderConfig
KeySet key.PublicKeySet
}
func NewClient(cfg ClientConfig) (*Client, error) {
// Allow empty redirect URL in the case where the client
// only needs to verify a given token.
ru, err := url.Parse(cfg.RedirectURL)
if err != nil {
return nil, fmt.Errorf("invalid redirect URL: %v", err)
}
c := Client{
credentials: cfg.Credentials,
httpClient: cfg.HTTPClient,
scope: cfg.Scope,
redirectURL: ru.String(),
providerConfig: newProviderConfigRepo(cfg.ProviderConfig),
keySet: cfg.KeySet,
}
if c.httpClient == nil {
c.httpClient = http.DefaultClient
}
if c.scope == nil {
c.scope = make([]string, len(DefaultScope))
copy(c.scope, DefaultScope)
}
return &c, nil
}
type Client struct {
httpClient phttp.Client
providerConfig *providerConfigRepo
credentials ClientCredentials
redirectURL string
scope []string
keySet key.PublicKeySet
providerSyncer *ProviderConfigSyncer
keySetSyncMutex sync.RWMutex
lastKeySetSync time.Time
}
func (c *Client) Healthy() error {
now := time.Now().UTC()
cfg := c.providerConfig.Get()
if cfg.Empty() {
return errors.New("oidc client provider config empty")
}
if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) {
return errors.New("oidc client provider config expired")
}
return nil
}
func (c *Client) OAuthClient() (*oauth2.Client, error) {
cfg := c.providerConfig.Get()
authMethod, err := chooseAuthMethod(cfg)
if err != nil {
return nil, err
}
ocfg := oauth2.Config{
Credentials: oauth2.ClientCredentials(c.credentials),
RedirectURL: c.redirectURL,
AuthURL: cfg.AuthEndpoint.String(),
TokenURL: cfg.TokenEndpoint.String(),
Scope: c.scope,
AuthMethod: authMethod,
}
return oauth2.NewClient(c.httpClient, ocfg)
}
func chooseAuthMethod(cfg ProviderConfig) (string, error) {
if len(cfg.TokenEndpointAuthMethodsSupported) == 0 {
return oauth2.AuthMethodClientSecretBasic, nil
}
for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported {
if _, ok := supportedAuthMethods[authMethod]; ok {
return authMethod, nil
}
}
return "", errors.New("no supported auth methods")
}
// SyncProviderConfig starts the provider config syncer
func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
s := NewProviderConfigSyncer(r, c.providerConfig)
stop := s.Run()
s.WaitUntilInitialSync()
return stop
}
func (c *Client) maybeSyncKeys() error {
tooSoon := func() bool {
return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow))
}
// ignore request to sync keys if a sync operation has been
// attempted too recently
if tooSoon() {
return nil
}
c.keySetSyncMutex.Lock()
defer c.keySetSyncMutex.Unlock()
// check again, as another goroutine may have been holding
// the lock while updating the keys
if tooSoon() {
return nil
}
cfg := c.providerConfig.Get()
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
w := &clientKeyRepo{client: c}
_, err := key.Sync(r, w)
c.lastKeySetSync = time.Now().UTC()
return err
}
type clientKeyRepo struct {
client *Client
}
func (r *clientKeyRepo) Set(ks key.KeySet) error {
pks, ok := ks.(*key.PublicKeySet)
if !ok {
return errors.New("unable to cast to PublicKey")
}
r.client.keySet = *pks
return nil
}
func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) {
cfg := c.providerConfig.Get()
if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) {
return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds)
}
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.ClientCredsToken(scope)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token.
func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) {
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token.
func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) {
oac, err := c.OAuthClient()
if err != nil {
return jose.JWT{}, err
}
t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken)
if err != nil {
return jose.JWT{}, err
}
jwt, err := jose.ParseJWT(t.IDToken)
if err != nil {
return jose.JWT{}, err
}
return jwt, c.VerifyJWT(jwt)
}
func (c *Client) VerifyJWT(jwt jose.JWT) error {
var keysFunc func() []key.PublicKey
if kID, ok := jwt.KeyID(); ok {
keysFunc = c.keysFuncWithID(kID)
} else {
keysFunc = c.keysFuncAll()
}
v := NewJWTVerifier(
c.providerConfig.Get().Issuer.String(),
c.credentials.ID,
c.maybeSyncKeys, keysFunc)
return v.Verify(jwt)
}
// keysFuncWithID returns a function that retrieves at most unexpired
// public key from the Client that matches the provided ID
func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey {
return func() []key.PublicKey {
c.keySetSyncMutex.RLock()
defer c.keySetSyncMutex.RUnlock()
if c.keySet.ExpiresAt().Before(time.Now()) {
return []key.PublicKey{}
}
k := c.keySet.Key(kID)
if k == nil {
return []key.PublicKey{}
}
return []key.PublicKey{*k}
}
}
// keysFuncAll returns a function that retrieves all unexpired public
// keys from the Client
func (c *Client) keysFuncAll() func() []key.PublicKey {
return func() []key.PublicKey {
c.keySetSyncMutex.RLock()
defer c.keySetSyncMutex.RUnlock()
if c.keySet.ExpiresAt().Before(time.Now()) {
return []key.PublicKey{}
}
return c.keySet.Keys()
}
}
type providerConfigRepo struct {
mu sync.RWMutex
config ProviderConfig // do not access directly, use Get()
}
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
return &providerConfigRepo{sync.RWMutex{}, pc}
}
// returns an error to implement ProviderConfigSetter
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
r.mu.Lock()
defer r.mu.Unlock()
r.config = cfg
return nil
}
func (r *providerConfigRepo) Get() ProviderConfig {
r.mu.RLock()
defer r.mu.RUnlock()
return r.config
}

View file

@ -1,44 +0,0 @@
package oidc
import (
"errors"
"time"
"github.com/coreos/go-oidc/jose"
)
type Identity struct {
ID string
Name string
Email string
ExpiresAt time.Time
}
func IdentityFromClaims(claims jose.Claims) (*Identity, error) {
if claims == nil {
return nil, errors.New("nil claim set")
}
var ident Identity
var err error
var ok bool
if ident.ID, ok, err = claims.StringClaim("sub"); err != nil {
return nil, err
} else if !ok {
return nil, errors.New("missing required claim: sub")
}
if ident.Email, _, err = claims.StringClaim("email"); err != nil {
return nil, err
}
exp, ok, err := claims.TimeClaim("exp")
if err != nil {
return nil, err
} else if ok {
ident.ExpiresAt = exp
}
return &ident, nil
}

View file

@ -1,3 +0,0 @@
package oidc
type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error)

View file

@ -1,67 +0,0 @@
package oidc
import (
"encoding/json"
"errors"
"net/http"
"time"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
)
// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
// Cache-Control header is provided by the JWK Set document endpoint.
const DefaultPublicKeySetTTL = 24 * time.Hour
// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
return &remotePublicKeyRepo{hc: hc, ep: ep}
}
type remotePublicKeyRepo struct {
hc phttp.Client
ep string
}
// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
// is set on the Key Set to avoid it having to be re-retrieved for every
// encryption event. This TTL is typically controlled by the endpoint returning
// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
// is found.
func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
req, err := http.NewRequest("GET", r.ep, nil)
if err != nil {
return nil, err
}
resp, err := r.hc.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var d struct {
Keys []jose.JWK `json:"keys"`
}
if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
return nil, err
}
if len(d.Keys) == 0 {
return nil, errors.New("zero keys in response")
}
ttl, ok, err := phttp.Cacheable(resp.Header)
if err != nil {
return nil, err
}
if !ok {
ttl = DefaultPublicKeySetTTL
}
exp := time.Now().UTC().Add(ttl)
ks := key.NewPublicKeySet(d.Keys, exp)
return ks, nil
}

View file

@ -1,690 +0,0 @@
package oidc
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/coreos/pkg/timeutil"
"github.com/jonboulle/clockwork"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/oauth2"
)
const (
// Subject Identifier types defined by the OIDC spec. Specifies if the provider
// should provide the same sub claim value to all clients (public) or a unique
// value for each client (pairwise).
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
SubjectTypePublic = "public"
SubjectTypePairwise = "pairwise"
)
var (
// Default values for omitted provider config fields.
//
// Use ProviderConfig's Defaults method to fill a provider config with these values.
DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
DefaultResponseModesSupported = []string{"query", "fragment"}
DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
DefaultClaimTypesSupported = []string{"normal"}
)
const (
MaximumProviderConfigSyncInterval = 24 * time.Hour
MinimumProviderConfigSyncInterval = time.Minute
discoveryConfigPath = "/.well-known/openid-configuration"
)
// internally configurable for tests
var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
var (
// Ensure ProviderConfig satisfies these interfaces.
_ json.Marshaler = &ProviderConfig{}
_ json.Unmarshaler = &ProviderConfig{}
)
// ProviderConfig represents the OpenID Provider Metadata specifying what
// configurations a provider supports.
//
// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
type ProviderConfig struct {
Issuer *url.URL // Required
AuthEndpoint *url.URL // Required
TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
UserInfoEndpoint *url.URL
KeysEndpoint *url.URL // Required
RegistrationEndpoint *url.URL
EndSessionEndpoint *url.URL
CheckSessionIFrame *url.URL
// Servers MAY choose not to advertise some supported scope values even when this
// parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
ScopesSupported []string
// OAuth2.0 response types supported.
ResponseTypesSupported []string // Required
// OAuth2.0 response modes supported.
//
// If omitted, defaults to DefaultResponseModesSupported.
ResponseModesSupported []string
// OAuth2.0 grant types supported.
//
// If omitted, defaults to DefaultGrantTypesSupported.
GrantTypesSupported []string
ACRValuesSupported []string
// SubjectTypesSupported specifies strategies for providing values for the sub claim.
SubjectTypesSupported []string // Required
// JWA signing and encryption algorith values supported for ID tokens.
IDTokenSigningAlgValues []string // Required
IDTokenEncryptionAlgValues []string
IDTokenEncryptionEncValues []string
// JWA signing and encryption algorith values supported for user info responses.
UserInfoSigningAlgValues []string
UserInfoEncryptionAlgValues []string
UserInfoEncryptionEncValues []string
// JWA signing and encryption algorith values supported for request objects.
ReqObjSigningAlgValues []string
ReqObjEncryptionAlgValues []string
ReqObjEncryptionEncValues []string
TokenEndpointAuthMethodsSupported []string
TokenEndpointAuthSigningAlgValuesSupported []string
DisplayValuesSupported []string
ClaimTypesSupported []string
ClaimsSupported []string
ServiceDocs *url.URL
ClaimsLocalsSupported []string
UILocalsSupported []string
ClaimsParameterSupported bool
RequestParameterSupported bool
RequestURIParamaterSupported bool
RequireRequestURIRegistration bool
Policy *url.URL
TermsOfService *url.URL
// Not part of the OpenID Provider Metadata
ExpiresAt time.Time
}
// Defaults returns a shallow copy of ProviderConfig with default
// values replacing omitted fields.
//
// var cfg oidc.ProviderConfig
// // Fill provider config with default values for omitted fields.
// cfg = cfg.Defaults()
//
func (p ProviderConfig) Defaults() ProviderConfig {
setDefault := func(val *[]string, defaultVal []string) {
if len(*val) == 0 {
*val = defaultVal
}
}
setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
return p
}
func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
e := p.toEncodableStruct()
return json.Marshal(&e)
}
func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
var e encodableProviderConfig
if err := json.Unmarshal(data, &e); err != nil {
return err
}
conf, err := e.toStruct()
if err != nil {
return err
}
if err := conf.Valid(); err != nil {
return err
}
*p = conf
return nil
}
type encodableProviderConfig struct {
Issuer string `json:"issuer"`
AuthEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
KeysEndpoint string `json:"jwks_uri"`
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
EndSessionEndpoint string `json:"end_session_endpoint,omitempty"`
CheckSessionIFrame string `json:"check_session_iframe,omitempty"`
// Use 'omitempty' for all slices as per OIDC spec:
// "Claims that return multiple values are represented as JSON arrays.
// Claims with zero elements MUST be omitted from the response."
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
ScopesSupported []string `json:"scopes_supported,omitempty"`
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
ClaimsSupported []string `json:"claims_supported,omitempty"`
ServiceDocs string `json:"service_documentation,omitempty"`
ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
Policy string `json:"op_policy_uri,omitempty"`
TermsOfService string `json:"op_tos_uri,omitempty"`
}
func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
return encodableProviderConfig{
Issuer: uriToString(cfg.Issuer),
AuthEndpoint: uriToString(cfg.AuthEndpoint),
TokenEndpoint: uriToString(cfg.TokenEndpoint),
UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
KeysEndpoint: uriToString(cfg.KeysEndpoint),
RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint),
CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame),
ScopesSupported: cfg.ScopesSupported,
ResponseTypesSupported: cfg.ResponseTypesSupported,
ResponseModesSupported: cfg.ResponseModesSupported,
GrantTypesSupported: cfg.GrantTypesSupported,
ACRValuesSupported: cfg.ACRValuesSupported,
SubjectTypesSupported: cfg.SubjectTypesSupported,
IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: cfg.DisplayValuesSupported,
ClaimTypesSupported: cfg.ClaimTypesSupported,
ClaimsSupported: cfg.ClaimsSupported,
ServiceDocs: uriToString(cfg.ServiceDocs),
ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
UILocalsSupported: cfg.UILocalsSupported,
ClaimsParameterSupported: cfg.ClaimsParameterSupported,
RequestParameterSupported: cfg.RequestParameterSupported,
RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
Policy: uriToString(cfg.Policy),
TermsOfService: uriToString(cfg.TermsOfService),
}
}
func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
p := stickyErrParser{}
conf := ProviderConfig{
Issuer: p.parseURI(e.Issuer, "issuer"),
AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"),
CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"),
ScopesSupported: e.ScopesSupported,
ResponseTypesSupported: e.ResponseTypesSupported,
ResponseModesSupported: e.ResponseModesSupported,
GrantTypesSupported: e.GrantTypesSupported,
ACRValuesSupported: e.ACRValuesSupported,
SubjectTypesSupported: e.SubjectTypesSupported,
IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: e.DisplayValuesSupported,
ClaimTypesSupported: e.ClaimTypesSupported,
ClaimsSupported: e.ClaimsSupported,
ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
ClaimsLocalsSupported: e.ClaimsLocalsSupported,
UILocalsSupported: e.UILocalsSupported,
ClaimsParameterSupported: e.ClaimsParameterSupported,
RequestParameterSupported: e.RequestParameterSupported,
RequestURIParamaterSupported: e.RequestURIParamaterSupported,
RequireRequestURIRegistration: e.RequireRequestURIRegistration,
Policy: p.parseURI(e.Policy, "op_policy-uri"),
TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
}
if p.firstErr != nil {
return ProviderConfig{}, p.firstErr
}
return conf, nil
}
// Empty returns if a ProviderConfig holds no information.
//
// This case generally indicates a ProviderConfigGetter has experienced an error
// and has nothing to report.
func (p ProviderConfig) Empty() bool {
return p.Issuer == nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
return true
}
}
return false
}
// Valid determines if a ProviderConfig conforms with the OIDC specification.
// If Valid returns successfully it guarantees required field are non-nil and
// URLs are well formed.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (p ProviderConfig) Valid() error {
grantTypes := p.GrantTypesSupported
if len(grantTypes) == 0 {
grantTypes = DefaultGrantTypesSupported
}
implicitOnly := true
for _, grantType := range grantTypes {
if grantType != oauth2.GrantTypeImplicit {
implicitOnly = false
break
}
}
if len(p.SubjectTypesSupported) == 0 {
return errors.New("missing required field subject_types_supported")
}
if len(p.IDTokenSigningAlgValues) == 0 {
return errors.New("missing required field id_token_signing_alg_values_supported")
}
if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
return errors.New("scoped_supported must be unspecified or include 'openid'")
}
if !contains(p.IDTokenSigningAlgValues, "RS256") {
return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
}
if contains(p.TokenEndpointAuthMethodsSupported, "none") {
return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
}
uris := []struct {
val *url.URL
name string
required bool
}{
{p.Issuer, "issuer", true},
{p.AuthEndpoint, "authorization_endpoint", true},
{p.TokenEndpoint, "token_endpoint", !implicitOnly},
{p.UserInfoEndpoint, "userinfo_endpoint", false},
{p.KeysEndpoint, "jwks_uri", true},
{p.RegistrationEndpoint, "registration_endpoint", false},
{p.EndSessionEndpoint, "end_session_endpoint", false},
{p.CheckSessionIFrame, "check_session_iframe", false},
{p.ServiceDocs, "service_documentation", false},
{p.Policy, "op_policy_uri", false},
{p.TermsOfService, "op_tos_uri", false},
}
for _, uri := range uris {
if uri.val == nil {
if !uri.required {
continue
}
return fmt.Errorf("empty value for required uri field %s", uri.name)
}
if uri.val.Host == "" {
return fmt.Errorf("no host for uri field %s", uri.name)
}
if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
}
}
return nil
}
// Supports determines if provider supports a client given their respective metadata.
func (p ProviderConfig) Supports(c ClientMetadata) error {
if err := p.Valid(); err != nil {
return fmt.Errorf("invalid provider config: %v", err)
}
if err := c.Valid(); err != nil {
return fmt.Errorf("invalid client config: %v", err)
}
// Fill default values for omitted fields
c = c.Defaults()
p = p.Defaults()
// Do the supported values list the requested one?
supports := []struct {
supported []string
requested string
name string
}{
{p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
{p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
{p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
{p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
{p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
{p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
{p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
{p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
{p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
}
for _, field := range supports {
if field.requested == "" {
continue
}
if !contains(field.supported, field.requested) {
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
// For lists, are the list of requested values a subset of the supported ones?
supportsAll := []struct {
supported []string
requested []string
name string
// OAuth2.0 response_type can be space separated lists where order doesn't matter.
// For example "id_token token" is the same as "token id_token"
// Support a custom compare method.
comp func(s1, s2 string) bool
}{
{p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
{p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
}
for _, field := range supportsAll {
requestLoop:
for _, req := range field.requested {
for _, sup := range field.supported {
if field.comp(req, sup) {
continue requestLoop
}
}
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
// TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
return nil
}
func (p ProviderConfig) SupportsGrantType(grantType string) bool {
var supported []string
if len(p.GrantTypesSupported) == 0 {
supported = DefaultGrantTypesSupported
} else {
supported = p.GrantTypesSupported
}
for _, t := range supported {
if t == grantType {
return true
}
}
return false
}
type ProviderConfigGetter interface {
Get() (ProviderConfig, error)
}
type ProviderConfigSetter interface {
Set(ProviderConfig) error
}
type ProviderConfigSyncer struct {
from ProviderConfigGetter
to ProviderConfigSetter
clock clockwork.Clock
initialSyncDone bool
initialSyncWait sync.WaitGroup
}
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
return &ProviderConfigSyncer{
from: from,
to: to,
clock: clockwork.NewRealClock(),
}
}
func (s *ProviderConfigSyncer) Run() chan struct{} {
stop := make(chan struct{})
var next pcsStepper
next = &pcsStepNext{aft: time.Duration(0)}
s.initialSyncWait.Add(1)
go func() {
for {
select {
case <-s.clock.After(next.after()):
next = next.step(s.sync)
case <-stop:
return
}
}
}()
return stop
}
func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
s.initialSyncWait.Wait()
}
func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
cfg, err := s.from.Get()
if err != nil {
return 0, err
}
if err = s.to.Set(cfg); err != nil {
return 0, fmt.Errorf("error setting provider config: %v", err)
}
if !s.initialSyncDone {
s.initialSyncWait.Done()
s.initialSyncDone = true
}
return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
}
type pcsStepFunc func() (time.Duration, error)
type pcsStepper interface {
after() time.Duration
step(pcsStepFunc) pcsStepper
}
type pcsStepNext struct {
aft time.Duration
}
func (n *pcsStepNext) after() time.Duration {
return n.aft
}
func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
ttl, err := fn()
if err == nil {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: time.Second}
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}
type pcsStepRetry struct {
aft time.Duration
}
func (r *pcsStepRetry) after() time.Duration {
return r.aft
}
func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
ttl, err := fn()
if err == nil {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}
func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration {
if exp.IsZero() {
return MaximumProviderConfigSyncInterval
}
t := exp.Sub(clock.Now()) / 2
if t > MaximumProviderConfigSyncInterval {
t = MaximumProviderConfigSyncInterval
} else if t < minimumProviderConfigSyncInterval {
t = minimumProviderConfigSyncInterval
}
return t
}
type httpProviderConfigGetter struct {
hc phttp.Client
issuerURL string
clock clockwork.Clock
}
func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter {
return &httpProviderConfigGetter{
hc: hc,
issuerURL: issuerURL,
clock: clockwork.NewRealClock(),
}
}
func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
// If the Issuer value contains a path component, any terminating / MUST be removed before
// appending /.well-known/openid-configuration.
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
req, err := http.NewRequest("GET", discoveryURL, nil)
if err != nil {
return
}
resp, err := r.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil {
return
}
var ttl time.Duration
var ok bool
ttl, ok, err = phttp.Cacheable(resp.Header)
if err != nil {
return
} else if ok {
cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl)
}
// The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
return
}
return
}
func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) {
if hc == nil {
hc = http.DefaultClient
}
g := NewHTTPProviderConfigGetter(hc, issuerURL)
return g.Get()
}
func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) {
return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock())
}
func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) {
var sleep time.Duration
var err error
for {
pcfg, err = FetchProviderConfig(hc, issuerURL)
if err == nil {
break
}
sleep = timeutil.ExpBackoff(sleep, time.Minute)
fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err)
time.Sleep(sleep)
}
return
}

View file

@ -1,88 +0,0 @@
package oidc
import (
"fmt"
"net/http"
"sync"
phttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
)
type TokenRefresher interface {
// Verify checks if the provided token is currently valid or not.
Verify(jose.JWT) error
// Refresh attempts to authenticate and retrieve a new token.
Refresh() (jose.JWT, error)
}
type ClientCredsTokenRefresher struct {
Issuer string
OIDCClient *Client
}
func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) {
_, err = VerifyClientClaims(jwt, c.Issuer)
return
}
func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) {
if err = c.OIDCClient.Healthy(); err != nil {
err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err)
return
}
jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"})
if err != nil {
err = fmt.Errorf("unable to verify auth code with issuer: %v", err)
return
}
return
}
type AuthenticatedTransport struct {
TokenRefresher
http.RoundTripper
mu sync.Mutex
jwt jose.JWT
}
func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
t.mu.Lock()
defer t.mu.Unlock()
if t.TokenRefresher.Verify(t.jwt) == nil {
return t.jwt, nil
}
jwt, err := t.TokenRefresher.Refresh()
if err != nil {
return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err)
}
t.jwt = jwt
return t.jwt, nil
}
// SetJWT sets the JWT held by the Transport.
// This is useful for cases in which you want to set an initial JWT.
func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
t.mu.Lock()
defer t.mu.Unlock()
t.jwt = jwt
}
func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
jwt, err := t.verifiedJWT()
if err != nil {
return nil, err
}
req := phttp.CopyRequest(r)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode()))
return t.RoundTripper.RoundTrip(req)
}

View file

@ -1,109 +0,0 @@
package oidc
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/go-oidc/jose"
)
// RequestTokenExtractor funcs extract a raw encoded token from a request.
type RequestTokenExtractor func(r *http.Request) (string, error)
// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's
// Authorization header.
func ExtractBearerToken(r *http.Request) (string, error) {
ah := r.Header.Get("Authorization")
if ah == "" {
return "", errors.New("missing Authorization header")
}
if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" {
return "", errors.New("should be a bearer token")
}
val := ah[7:]
if len(val) == 0 {
return "", errors.New("bearer token is empty")
}
return val, nil
}
// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request.
func CookieTokenExtractor(cookieName string) RequestTokenExtractor {
return func(r *http.Request) (string, error) {
ck, err := r.Cookie(cookieName)
if err != nil {
return "", fmt.Errorf("token cookie not found in request: %v", err)
}
if ck.Value == "" {
return "", errors.New("token cookie found but is empty")
}
return ck.Value, nil
}
}
func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims {
return jose.Claims{
// required
"iss": iss,
"sub": sub,
"aud": aud,
"iat": iat.Unix(),
"exp": exp.Unix(),
}
}
func GenClientID(hostport string) (string, error) {
b, err := randBytes(32)
if err != nil {
return "", err
}
var host string
if strings.Contains(hostport, ":") {
host, _, err = net.SplitHostPort(hostport)
if err != nil {
return "", err
}
} else {
host = hostport
}
return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil
}
func randBytes(n int) ([]byte, error) {
b := make([]byte, n)
got, err := rand.Read(b)
if err != nil {
return nil, err
} else if n != got {
return nil, errors.New("unable to generate enough random data")
}
return b, nil
}
// urlEqual checks two urls for equality using only the host and path portions.
func urlEqual(url1, url2 string) bool {
u1, err := url.Parse(url1)
if err != nil {
return false
}
u2, err := url.Parse(url2)
if err != nil {
return false
}
return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path)
}

View file

@ -1,190 +0,0 @@
package oidc
import (
"errors"
"fmt"
"time"
"github.com/jonboulle/clockwork"
"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/key"
)
func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) {
jwtBytes := []byte(jwt.Data())
for _, k := range keys {
v, err := k.Verifier()
if err != nil {
return false, err
}
if v.Verify(jwt.Signature, jwtBytes) == nil {
return true, nil
}
}
return false, nil
}
// containsString returns true if the given string(needle) is found
// in the string array(haystack).
func containsString(needle string, haystack []string) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}
// Verify claims in accordance with OIDC spec
// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation
func VerifyClaims(jwt jose.JWT, issuer, clientID string) error {
now := time.Now().UTC()
claims, err := jwt.Claims()
if err != nil {
return err
}
ident, err := IdentityFromClaims(claims)
if err != nil {
return err
}
if ident.ExpiresAt.Before(now) {
return errors.New("token is expired")
}
// iss REQUIRED. Issuer Identifier for the Issuer of the response.
// The iss value is a case sensitive URL using the https scheme that contains scheme,
// host, and optionally, port number and path components and no query or fragment components.
if iss, exists := claims["iss"].(string); exists {
if !urlEqual(iss, issuer) {
return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss)
}
} else {
return errors.New("missing claim: 'iss'")
}
// iat REQUIRED. Time at which the JWT was issued.
// Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z
// as measured in UTC until the date/time.
if _, exists := claims["iat"].(float64); !exists {
return errors.New("missing claim: 'iat'")
}
// aud REQUIRED. Audience(s) that this ID Token is intended for.
// It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value.
// It MAY also contain identifiers for other audiences. In the general case, the aud
// value is an array of case sensitive strings. In the common special case when there
// is one audience, the aud value MAY be a single case sensitive string.
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
if aud != clientID {
return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID)
}
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
if !containsString(clientID, aud) {
return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID)
}
} else {
return errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
}
return nil
}
// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT.
// Returns the client ID if valid, or an error if invalid.
func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) {
claims, err := jwt.Claims()
if err != nil {
return "", fmt.Errorf("failed to parse JWT claims: %v", err)
}
iss, ok, err := claims.StringClaim("iss")
if err != nil {
return "", fmt.Errorf("failed to parse 'iss' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'iss' claim")
} else if !urlEqual(iss, issuer) {
return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss)
}
sub, ok, err := claims.StringClaim("sub")
if err != nil {
return "", fmt.Errorf("failed to parse 'sub' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'sub' claim")
}
if aud, ok, err := claims.StringClaim("aud"); err == nil && ok {
if aud != sub {
return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub)
}
} else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok {
if !containsString(sub, aud) {
return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub)
}
} else {
return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array")
}
now := time.Now().UTC()
exp, ok, err := claims.TimeClaim("exp")
if err != nil {
return "", fmt.Errorf("failed to parse 'exp' claim: %v", err)
} else if !ok {
return "", errors.New("missing required 'exp' claim")
} else if exp.Before(now) {
return "", fmt.Errorf("token already expired at: %v", exp)
}
return sub, nil
}
type JWTVerifier struct {
issuer string
clientID string
syncFunc func() error
keysFunc func() []key.PublicKey
clock clockwork.Clock
}
func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier {
return JWTVerifier{
issuer: issuer,
clientID: clientID,
syncFunc: syncFunc,
keysFunc: keysFunc,
clock: clockwork.NewRealClock(),
}
}
func (v *JWTVerifier) Verify(jwt jose.JWT) error {
// Verify claims before verifying the signature. This is an optimization to throw out
// tokens we know are invalid without undergoing an expensive signature check and
// possibly a re-sync event.
if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil {
return fmt.Errorf("oidc: JWT claims invalid: %v", err)
}
ok, err := VerifySignature(jwt, v.keysFunc())
if err != nil {
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
} else if ok {
return nil
}
if err = v.syncFunc(); err != nil {
return fmt.Errorf("oidc: failed syncing KeySet: %v", err)
}
ok, err = VerifySignature(jwt, v.keysFunc())
if err != nil {
return fmt.Errorf("oidc: JWT signature verification failed: %v", err)
} else if !ok {
return errors.New("oidc: unable to verify JWT signature: no matching keys")
}
return nil
}

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,127 +0,0 @@
package health
import (
"expvar"
"fmt"
"log"
"net/http"
"github.com/coreos/pkg/httputil"
)
// Checkables should return nil when the thing they are checking is healthy, and an error otherwise.
type Checkable interface {
Healthy() error
}
// Checker provides a way to make an endpoint which can be probed for system health.
type Checker struct {
// Checks are the Checkables to be checked when probing.
Checks []Checkable
// Unhealthyhandler is called when one or more of the checks are unhealthy.
// If not provided DefaultUnhealthyHandler is called.
UnhealthyHandler UnhealthyHandler
// HealthyHandler is called when all checks are healthy.
// If not provided, DefaultHealthyHandler is called.
HealthyHandler http.HandlerFunc
}
func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) {
unhealthyHandler := c.UnhealthyHandler
if unhealthyHandler == nil {
unhealthyHandler = DefaultUnhealthyHandler
}
successHandler := c.HealthyHandler
if successHandler == nil {
successHandler = DefaultHealthyHandler
}
if r.Method != "GET" {
w.Header().Set("Allow", "GET")
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
if err := Check(c.Checks); err != nil {
unhealthyHandler(w, r, err)
return
}
successHandler(w, r)
}
type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error)
type StatusResponse struct {
Status string `json:"status"`
Details *StatusResponseDetails `json:"details,omitempty"`
}
type StatusResponseDetails struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func Check(checks []Checkable) (err error) {
errs := []error{}
for _, c := range checks {
if e := c.Healthy(); e != nil {
errs = append(errs, e)
}
}
switch len(errs) {
case 0:
err = nil
case 1:
err = errs[0]
default:
err = fmt.Errorf("multiple health check failure: %v", errs)
}
return
}
func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) {
err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{
Status: "ok",
})
if err != nil {
// TODO(bobbyrullo): replace with logging from new logging pkg,
// once it lands.
log.Printf("Failed to write JSON response: %v", err)
}
}
func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) {
writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{
Status: "error",
Details: &StatusResponseDetails{
Code: http.StatusInternalServerError,
Message: err.Error(),
},
})
if writeErr != nil {
// TODO(bobbyrullo): replace with logging from new logging pkg,
// once it lands.
log.Printf("Failed to write JSON response: %v", err)
}
}
// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported.
func ExpvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}

View file

@ -1,21 +0,0 @@
package httputil
import (
"net/http"
"time"
)
// DeleteCookies effectively deletes all named cookies
// by wiping all data and setting to expire immediately.
func DeleteCookies(w http.ResponseWriter, cookieNames ...string) {
for _, n := range cookieNames {
c := &http.Cookie{
Name: n,
Value: "",
Path: "/",
MaxAge: -1,
Expires: time.Time{},
}
http.SetCookie(w, c)
}
}

View file

@ -1,27 +0,0 @@
package httputil
import (
"encoding/json"
"net/http"
)
const (
JSONContentType = "application/json"
)
func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error {
enc, err := json.Marshal(resp)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return err
}
w.Header().Set("Content-Type", JSONContentType)
w.WriteHeader(code)
_, err = w.Write(enc)
if err != nil {
return err
}
return nil
}

View file

@ -1,15 +0,0 @@
package timeutil
import (
"time"
)
func ExpBackoff(prev, max time.Duration) time.Duration {
if prev == 0 {
return time.Second
}
if prev > max/2 {
return max
}
return 2 * prev
}

View file

@ -1,64 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
type ApiDeclarationList struct {
List []ApiDeclaration
}
// At returns the ApiDeclaration by its path unless absent, then ok is false
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
for _, each := range l.List {
if each.ResourcePath == path {
return each, true
}
}
return a, false
}
// Put adds or replaces a ApiDeclaration with this name
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
// maybe replace existing
for i, each := range l.List {
if each.ResourcePath == path {
// replace
l.List[i] = a
return
}
}
// add
l.List = append(l.List, a)
}
// Do enumerates all the properties, each with its assigned name
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
for _, each := range l.List {
block(each.ResourcePath, each)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.ResourcePath)
buf.WriteString("\": ")
encoder.Encode(each)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}

View file

@ -1,38 +0,0 @@
package swagger
import (
"net/http"
"github.com/emicklei/go-restful"
)
// PostBuildDeclarationMapFunc can be used to modify the api declaration map.
type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
type MapSchemaFormatFunc func(typeName string) string
type Config struct {
// url where the services are available, e.g. http://localhost:8080
// if left empty then the basePath of Swagger is taken from the actual request
WebServicesUrl string
// path where the JSON api is avaiable , e.g. /apidocs
ApiPath string
// [optional] path where the swagger UI will be served, e.g. /swagger
SwaggerPath string
// [optional] location of folder containing Swagger HTML5 application index.html
SwaggerFilePath string
// api listing is constructed from this list of restful WebServices.
WebServices []*restful.WebService
// will serve all static content (scripts,pages,images)
StaticHandler http.Handler
// [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled.
DisableCORS bool
// Top-level API version. Is reflected in the resource listing.
ApiVersion string
// If set then call this handler after building the complete ApiDeclaration Map
PostBuildHandler PostBuildDeclarationMapFunc
// Swagger global info struct
Info Info
// [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field convertion.
SchemaFormatHandler MapSchemaFormatFunc
}

View file

@ -1,449 +0,0 @@
package swagger
import (
"encoding/json"
"reflect"
"strings"
)
// ModelBuildable is used for extending Structs that need more control over
// how the Model appears in the Swagger api declaration.
type ModelBuildable interface {
PostBuildModel(m *Model) *Model
}
type modelBuilder struct {
Models *ModelList
Config *Config
}
type documentable interface {
SwaggerDoc() map[string]string
}
// Check if this structure has a method with signature func (<theModel>) SwaggerDoc() map[string]string
// If it exists, retrive the documentation and overwrite all struct tag descriptions
func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string {
if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok {
return docable.SwaggerDoc()
}
return make(map[string]string)
}
// addModelFrom creates and adds a Model to the builder and detects and calls
// the post build hook for customizations
func (b modelBuilder) addModelFrom(sample interface{}) {
if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil {
// allow customizations
if buildable, ok := sample.(ModelBuildable); ok {
modelOrNil = buildable.PostBuildModel(modelOrNil)
b.Models.Put(modelOrNil.Id, *modelOrNil)
}
}
}
func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
modelName := b.keyFrom(st)
if nameOverride != "" {
modelName = nameOverride
}
// no models needed for primitive types
if b.isPrimitiveType(modelName) {
return nil
}
// golang encoding/json packages says array and slice values encode as
// JSON arrays, except that []byte encodes as a base64-encoded string.
// If we see a []byte here, treat it at as a primitive type (string)
// and deal with it in buildArrayTypeProperty.
if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) &&
st.Elem().Kind() == reflect.Uint8 {
return nil
}
// see if we already have visited this model
if _, ok := b.Models.At(modelName); ok {
return nil
}
sm := Model{
Id: modelName,
Required: []string{},
Properties: ModelPropertyList{}}
// reference the model before further initializing (enables recursive structs)
b.Models.Put(modelName, sm)
// check for slice or array
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
b.addModel(st.Elem(), "")
return &sm
}
// check for structure or primitive type
if st.Kind() != reflect.Struct {
return &sm
}
fullDoc := getDocFromMethodSwaggerDoc2(st)
modelDescriptions := []string{}
for i := 0; i < st.NumField(); i++ {
field := st.Field(i)
jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName)
if len(modelDescription) > 0 {
modelDescriptions = append(modelDescriptions, modelDescription)
}
// add if not omitted
if len(jsonName) != 0 {
// update description
if fieldDoc, ok := fullDoc[jsonName]; ok {
prop.Description = fieldDoc
}
// update Required
if b.isPropertyRequired(field) {
sm.Required = append(sm.Required, jsonName)
}
sm.Properties.Put(jsonName, prop)
}
}
// We always overwrite documentation if SwaggerDoc method exists
// "" is special for documenting the struct itself
if modelDoc, ok := fullDoc[""]; ok {
sm.Description = modelDoc
} else if len(modelDescriptions) != 0 {
sm.Description = strings.Join(modelDescriptions, "\n")
}
// update model builder with completed model
b.Models.Put(modelName, sm)
return &sm
}
func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool {
required := true
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "omitempty" {
return false
}
}
return required
}
func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) {
jsonName = b.jsonNameOfField(field)
if len(jsonName) == 0 {
// empty name signals skip property
return "", "", prop
}
if tag := field.Tag.Get("modelDescription"); tag != "" {
modelDescription = tag
}
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, modelDescription, prop
}
fieldType := field.Type
// check if type is doing its own marshalling
marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem()
if fieldType.Implements(marshalerType) {
var pType = "string"
if prop.Type == nil {
prop.Type = &pType
}
if prop.Format == "" {
prop.Format = b.jsonSchemaFormat(fieldType.String())
}
return jsonName, modelDescription, prop
}
// check if annotation says it is a string
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if len(s) > 1 && s[1] == "string" {
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
}
}
fieldKind := fieldType.Kind()
switch {
case fieldKind == reflect.Struct:
jsonName, prop := b.buildStructTypeProperty(field, jsonName, model)
return jsonName, modelDescription, prop
case fieldKind == reflect.Slice || fieldKind == reflect.Array:
jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.Ptr:
jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName)
return jsonName, modelDescription, prop
case fieldKind == reflect.String:
stringt := "string"
prop.Type = &stringt
return jsonName, modelDescription, prop
case fieldKind == reflect.Map:
// if it's a map, it's unstructured, and swagger 1.2 can't handle it
objectType := "object"
prop.Type = &objectType
return jsonName, modelDescription, prop
}
if b.isPrimitiveType(fieldType.String()) {
mapped := b.jsonSchemaType(fieldType.String())
prop.Type = &mapped
prop.Format = b.jsonSchemaFormat(fieldType.String())
return jsonName, modelDescription, prop
}
modelType := fieldType.String()
prop.Ref = &modelType
if fieldType.Name() == "" { // override type of anonymous structs
nestedTypeName := modelName + "." + jsonName
prop.Ref = &nestedTypeName
b.addModel(fieldType, nestedTypeName)
}
return jsonName, modelDescription, prop
}
func hasNamedJSONTag(field reflect.StructField) bool {
parts := strings.Split(field.Tag.Get("json"), ",")
if len(parts) == 0 {
return false
}
for _, s := range parts[1:] {
if s == "inline" {
return false
}
}
return len(parts[0]) > 0
}
func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tag
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// check for anonymous
if len(fieldType.Name()) == 0 {
// anonymous
anonType := model.Id + "." + jsonName
b.addModel(fieldType, anonType)
prop.Ref = &anonType
return jsonName, prop
}
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
// embedded struct
sub := modelBuilder{new(ModelList), b.Config}
sub.addModel(fieldType, "")
subKey := sub.keyFrom(fieldType)
// merge properties from sub
subModel, _ := sub.Models.At(subKey)
subModel.Properties.Do(func(k string, v ModelProperty) {
model.Properties.Put(k, v)
// if subModel says this property is required then include it
required := false
for _, each := range subModel.Required {
if k == each {
required = true
break
}
}
if required {
model.Required = append(model.Required, k)
}
})
// add all new referenced models
sub.Models.Do(func(key string, sub Model) {
if key != subKey {
if _, ok := b.Models.At(key); !ok {
b.Models.Put(key, sub)
}
}
})
// empty name signals skip property
return "", prop
}
// simple struct
b.addModel(fieldType, "")
var pType = fieldType.String()
prop.Ref = &pType
return jsonName, prop
}
func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
// check for type override in tags
prop.setPropertyMetadata(field)
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
if fieldType.Elem().Kind() == reflect.Uint8 {
stringt := "string"
prop.Type = &stringt
return jsonName, prop
}
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Name())
elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem())
prop.Items = new(Item)
if isPrimitive {
mapped := b.jsonSchemaType(elemTypeName)
prop.Items.Type = &mapped
} else {
prop.Items.Ref = &elemTypeName
}
// add|overwrite model for element type
if fieldType.Elem().Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
if !isPrimitive {
b.addModel(fieldType.Elem(), elemTypeName)
}
return jsonName, prop
}
func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) {
prop.setPropertyMetadata(field)
// Check for type override in tags
if prop.Type != nil {
return jsonName, prop
}
fieldType := field.Type
// override type of pointer to list-likes
if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array {
var pType = "array"
prop.Type = &pType
isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name())
elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem())
if isPrimitive {
primName := b.jsonSchemaType(elemName)
prop.Items = &Item{Ref: &primName}
} else {
prop.Items = &Item{Ref: &elemName}
}
if !isPrimitive {
// add|overwrite model for element type
b.addModel(fieldType.Elem().Elem(), elemName)
}
} else {
// non-array, pointer type
var pType = b.jsonSchemaType(fieldType.String()[1:]) // no star, include pkg path
if b.isPrimitiveType(fieldType.String()[1:]) {
prop.Type = &pType
prop.Format = b.jsonSchemaFormat(fieldType.String()[1:])
return jsonName, prop
}
prop.Ref = &pType
elemName := ""
if fieldType.Elem().Name() == "" {
elemName = modelName + "." + jsonName
prop.Ref = &elemName
}
b.addModel(fieldType.Elem(), elemName)
}
return jsonName, prop
}
func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string {
if t.Kind() == reflect.Ptr {
return t.String()[1:]
}
if t.Name() == "" {
return modelName + "." + jsonName
}
return b.keyFrom(t)
}
func (b modelBuilder) keyFrom(st reflect.Type) string {
key := st.String()
if len(st.Name()) == 0 { // unnamed type
// Swagger UI has special meaning for [
key = strings.Replace(key, "[]", "||", -1)
}
return key
}
// see also https://golang.org/ref/spec#Numeric_types
func (b modelBuilder) isPrimitiveType(modelName string) bool {
if len(modelName) == 0 {
return false
}
return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName)
}
// jsonNameOfField returns the name of the field as it should appear in JSON format
// An empty string indicates that this field is not part of the JSON representation
func (b modelBuilder) jsonNameOfField(field reflect.StructField) string {
if jsonTag := field.Tag.Get("json"); jsonTag != "" {
s := strings.Split(jsonTag, ",")
if s[0] == "-" {
// empty name signals skip property
return ""
} else if s[0] != "" {
return s[0]
}
}
return field.Name
}
// see also http://json-schema.org/latest/json-schema-core.html#anchor8
func (b modelBuilder) jsonSchemaType(modelName string) string {
schemaMap := map[string]string{
"uint": "integer",
"uint8": "integer",
"uint16": "integer",
"uint32": "integer",
"uint64": "integer",
"int": "integer",
"int8": "integer",
"int16": "integer",
"int32": "integer",
"int64": "integer",
"byte": "integer",
"float64": "number",
"float32": "number",
"bool": "boolean",
"time.Time": "string",
}
mapped, ok := schemaMap[modelName]
if !ok {
return modelName // use as is (custom or struct)
}
return mapped
}
func (b modelBuilder) jsonSchemaFormat(modelName string) string {
if b.Config != nil && b.Config.SchemaFormatHandler != nil {
if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" {
return mapped
}
}
schemaMap := map[string]string{
"int": "int32",
"int32": "int32",
"int64": "int64",
"byte": "byte",
"uint": "integer",
"uint8": "byte",
"float64": "double",
"float32": "float",
"time.Time": "date-time",
"*time.Time": "date-time",
}
mapped, ok := schemaMap[modelName]
if !ok {
return "" // no format
}
return mapped
}

View file

@ -1,86 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModel associates a name with a Model (not using its Id)
type NamedModel struct {
Name string
Model Model
}
// ModelList encapsulates a list of NamedModel (association)
type ModelList struct {
List []NamedModel
}
// Put adds or replaces a Model by its name
func (l *ModelList) Put(name string, model Model) {
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModel{name, model}
return
}
}
// add
l.List = append(l.List, NamedModel{name, model})
}
// At returns a Model by its name, ok is false if absent
func (l *ModelList) At(name string) (m Model, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Model, true
}
}
return m, false
}
// Do enumerates all the models, each with its assigned name
func (l *ModelList) Do(block func(name string, value Model)) {
for _, each := range l.List {
block(each.Name, each.Model)
}
}
// MarshalJSON writes the ModelList as if it was a map[string]Model
func (l ModelList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Model)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
func (l *ModelList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m Model
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View file

@ -1,66 +0,0 @@
package swagger
import (
"reflect"
"strings"
)
func (prop *ModelProperty) setDescription(field reflect.StructField) {
if tag := field.Tag.Get("description"); tag != "" {
prop.Description = tag
}
}
func (prop *ModelProperty) setDefaultValue(field reflect.StructField) {
if tag := field.Tag.Get("default"); tag != "" {
prop.DefaultValue = Special(tag)
}
}
func (prop *ModelProperty) setEnumValues(field reflect.StructField) {
// We use | to separate the enum values. This value is chosen
// since its unlikely to be useful in actual enumeration values.
if tag := field.Tag.Get("enum"); tag != "" {
prop.Enum = strings.Split(tag, "|")
}
}
func (prop *ModelProperty) setMaximum(field reflect.StructField) {
if tag := field.Tag.Get("maximum"); tag != "" {
prop.Maximum = tag
}
}
func (prop *ModelProperty) setType(field reflect.StructField) {
if tag := field.Tag.Get("type"); tag != "" {
prop.Type = &tag
}
}
func (prop *ModelProperty) setMinimum(field reflect.StructField) {
if tag := field.Tag.Get("minimum"); tag != "" {
prop.Minimum = tag
}
}
func (prop *ModelProperty) setUniqueItems(field reflect.StructField) {
tag := field.Tag.Get("unique")
switch tag {
case "true":
v := true
prop.UniqueItems = &v
case "false":
v := false
prop.UniqueItems = &v
}
}
func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) {
prop.setDescription(field)
prop.setEnumValues(field)
prop.setMinimum(field)
prop.setMaximum(field)
prop.setUniqueItems(field)
prop.setDefaultValue(field)
prop.setType(field)
}

View file

@ -1,87 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModelProperty associates a name to a ModelProperty
type NamedModelProperty struct {
Name string
Property ModelProperty
}
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
type ModelPropertyList struct {
List []NamedModelProperty
}
// At returns the ModelPropety by its name unless absent, then ok is false
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Property, true
}
}
return p, false
}
// Put adds or replaces a ModelProperty with this name
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
// maybe replace existing
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModelProperty{Name: name, Property: prop}
return
}
}
// add
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
}
// Do enumerates all the properties, each with its assigned name
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
for _, each := range l.List {
block(each.Name, each.Property)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Property)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m ModelProperty
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View file

@ -1,36 +0,0 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "github.com/emicklei/go-restful"
type orderedRouteMap struct {
elements map[string][]restful.Route
keys []string
}
func newOrderedRouteMap() *orderedRouteMap {
return &orderedRouteMap{
elements: map[string][]restful.Route{},
keys: []string{},
}
}
func (o *orderedRouteMap) Add(key string, route restful.Route) {
routes, ok := o.elements[key]
if ok {
routes = append(routes, route)
o.elements[key] = routes
return
}
o.elements[key] = []restful.Route{route}
o.keys = append(o.keys, key)
}
func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) {
for _, k := range o.keys {
block(k, o.elements[k])
}
}

View file

@ -1,185 +0,0 @@
// Package swagger implements the structures of the Swagger
// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md
package swagger
const swaggerVersion = "1.2"
// 4.3.3 Data Type Fields
type DataTypeFields struct {
Type *string `json:"type,omitempty"` // if Ref not used
Ref *string `json:"$ref,omitempty"` // if Type not used
Format string `json:"format,omitempty"`
DefaultValue Special `json:"defaultValue,omitempty"`
Enum []string `json:"enum,omitempty"`
Minimum string `json:"minimum,omitempty"`
Maximum string `json:"maximum,omitempty"`
Items *Item `json:"items,omitempty"`
UniqueItems *bool `json:"uniqueItems,omitempty"`
}
type Special string
// 4.3.4 Items Object
type Item struct {
Type *string `json:"type,omitempty"`
Ref *string `json:"$ref,omitempty"`
Format string `json:"format,omitempty"`
}
// 5.1 Resource Listing
type ResourceListing struct {
SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2
Apis []Resource `json:"apis"`
ApiVersion string `json:"apiVersion"`
Info Info `json:"info"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.1.2 Resource Object
type Resource struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
}
// 5.1.3 Info Object
type Info struct {
Title string `json:"title"`
Description string `json:"description"`
TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"`
Contact string `json:"contact,omitempty"`
License string `json:"license,omitempty"`
LicenseUrl string `json:"licenseUrl,omitempty"`
}
// 5.1.5
type Authorization struct {
Type string `json:"type"`
PassAs string `json:"passAs"`
Keyname string `json:"keyname"`
Scopes []Scope `json:"scopes"`
GrantTypes []GrantType `json:"grandTypes"`
}
// 5.1.6, 5.2.11
type Scope struct {
// Required. The name of the scope.
Scope string `json:"scope"`
// Recommended. A short description of the scope.
Description string `json:"description"`
}
// 5.1.7
type GrantType struct {
Implicit Implicit `json:"implicit"`
AuthorizationCode AuthorizationCode `json:"authorization_code"`
}
// 5.1.8 Implicit Object
type Implicit struct {
// Required. The login endpoint definition.
loginEndpoint LoginEndpoint `json:"loginEndpoint"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.1.9 Authorization Code Object
type AuthorizationCode struct {
TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"`
TokenEndpoint TokenEndpoint `json:"tokenEndpoint"`
}
// 5.1.10 Login Endpoint Object
type LoginEndpoint struct {
// Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
}
// 5.1.11 Token Request Endpoint Object
type TokenRequestEndpoint struct {
// Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "client_id" OAuth2 parameter.
ClientIdName string `json:"clientIdName"`
// An optional alternative name to the standard "client_secret" OAuth2 parameter.
ClientSecretName string `json:"clientSecretName"`
}
// 5.1.12 Token Endpoint Object
type TokenEndpoint struct {
// Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format.
Url string `json:"url"`
// An optional alternative name to standard "access_token" OAuth2 parameter.
TokenName string `json:"tokenName"`
}
// 5.2 API Declaration
type ApiDeclaration struct {
SwaggerVersion string `json:"swaggerVersion"`
ApiVersion string `json:"apiVersion"`
BasePath string `json:"basePath"`
ResourcePath string `json:"resourcePath"` // must start with /
Info Info `json:"info"`
Apis []Api `json:"apis,omitempty"`
Models ModelList `json:"models,omitempty"`
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Authorizations []Authorization `json:"authorizations,omitempty"`
}
// 5.2.2 API Object
type Api struct {
Path string `json:"path"` // relative or absolute, must start with /
Description string `json:"description"`
Operations []Operation `json:"operations,omitempty"`
}
// 5.2.3 Operation Object
type Operation struct {
DataTypeFields
Method string `json:"method"`
Summary string `json:"summary,omitempty"`
Notes string `json:"notes,omitempty"`
Nickname string `json:"nickname"`
Authorizations []Authorization `json:"authorizations,omitempty"`
Parameters []Parameter `json:"parameters"`
ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional
Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"`
Deprecated string `json:"deprecated,omitempty"`
}
// 5.2.4 Parameter Object
type Parameter struct {
DataTypeFields
ParamType string `json:"paramType"` // path,query,body,header,form
Name string `json:"name"`
Description string `json:"description"`
Required bool `json:"required"`
AllowMultiple bool `json:"allowMultiple"`
}
// 5.2.5 Response Message Object
type ResponseMessage struct {
Code int `json:"code"`
Message string `json:"message"`
ResponseModel string `json:"responseModel,omitempty"`
}
// 5.2.6, 5.2.7 Models Object
type Model struct {
Id string `json:"id"`
Description string `json:"description,omitempty"`
Required []string `json:"required,omitempty"`
Properties ModelPropertyList `json:"properties"`
SubTypes []string `json:"subTypes,omitempty"`
Discriminator string `json:"discriminator,omitempty"`
}
// 5.2.8 Properties Object
type ModelProperty struct {
DataTypeFields
Description string `json:"description,omitempty"`
}
// 5.2.10
type Authorizations map[string]Authorization

View file

@ -1,21 +0,0 @@
package swagger
type SwaggerBuilder struct {
SwaggerService
}
func NewSwaggerBuilder(config Config) *SwaggerBuilder {
return &SwaggerBuilder{*newSwaggerService(config)}
}
func (sb SwaggerBuilder) ProduceListing() ResourceListing {
return sb.SwaggerService.produceListing()
}
func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration {
return sb.SwaggerService.produceAllDeclarations()
}
func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) {
return sb.SwaggerService.produceDeclarations(route)
}

View file

@ -1,440 +0,0 @@
package swagger
import (
"fmt"
"github.com/emicklei/go-restful"
// "github.com/emicklei/hopwatch"
"net/http"
"reflect"
"sort"
"strings"
"github.com/emicklei/go-restful/log"
)
type SwaggerService struct {
config Config
apiDeclarationMap *ApiDeclarationList
}
func newSwaggerService(config Config) *SwaggerService {
sws := &SwaggerService{
config: config,
apiDeclarationMap: new(ApiDeclarationList)}
// Build all ApiDeclarations
for _, each := range config.WebServices {
rootPath := each.RootPath()
// skip the api service itself
if rootPath != config.ApiPath {
if rootPath == "" || rootPath == "/" {
// use routes
for _, route := range each.Routes() {
entry := staticPathFromRoute(route)
_, exists := sws.apiDeclarationMap.At(entry)
if !exists {
sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
}
}
} else { // use root path
sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
}
}
}
// if specified then call the PostBuilderHandler
if config.PostBuildHandler != nil {
config.PostBuildHandler(sws.apiDeclarationMap)
}
return sws
}
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
var LogInfo = func(format string, v ...interface{}) {
// use the restful package-wide logger
log.Printf(format, v...)
}
// InstallSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func InstallSwaggerService(aSwaggerConfig Config) {
RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer)
}
// RegisterSwaggerService add the WebService that provides the API documentation of all services
// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki).
func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
sws := newSwaggerService(config)
ws := new(restful.WebService)
ws.Path(config.ApiPath)
ws.Produces(restful.MIME_JSON)
if config.DisableCORS {
ws.Filter(enableCORS)
}
ws.Route(ws.GET("/").To(sws.getListing))
ws.Route(ws.GET("/{a}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations))
ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations))
LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath)
wsContainer.Add(ws)
// Check paths for UI serving
if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath)
wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath))))
//if we define a custom static handler use it
} else if config.StaticHandler != nil && config.SwaggerPath != "" {
swaggerPathSlash := config.SwaggerPath
// path must end with slash /
if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] {
LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)")
swaggerPathSlash += "/"
}
LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler)
wsContainer.Handle(swaggerPathSlash, config.StaticHandler)
} else {
LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served")
}
}
func staticPathFromRoute(r restful.Route) string {
static := r.Path
bracket := strings.Index(static, "{")
if bracket <= 1 { // result cannot be empty
return static
}
if bracket != -1 {
static = r.Path[:bracket]
}
if strings.HasSuffix(static, "/") {
return static[:len(static)-1]
} else {
return static
}
}
func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) {
if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" {
// prevent duplicate header
if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 {
resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin)
}
}
chain.ProcessFilter(req, resp)
}
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
listing := sws.produceListing()
resp.WriteAsJson(listing)
}
func (sws SwaggerService) produceListing() ResourceListing {
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
ref := Resource{Path: k}
if len(v.Apis) > 0 { // use description of first (could still be empty)
ref.Description = v.Apis[0].Description
}
listing.Apis = append(listing.Apis, ref)
})
return listing
}
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
decl, ok := sws.produceDeclarations(composeRootPath(req))
if !ok {
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
return
}
// unless WebServicesUrl is given
if len(sws.config.WebServicesUrl) == 0 {
// update base path from the actual request
// TODO how to detect https? assume http for now
var host string
// X-Forwarded-Host or Host or Request.Host
hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific?
if !ok || len(hostvalues) == 0 {
forwarded, ok := req.Request.Header["Host"] // without reverse-proxy
if !ok || len(forwarded) == 0 {
// fallback to Host field
host = req.Request.Host
} else {
host = forwarded[0]
}
} else {
host = hostvalues[0]
}
// inspect Referer for the scheme (http vs https)
scheme := "http"
if referer := req.Request.Header["Referer"]; len(referer) > 0 {
if strings.HasPrefix(referer[0], "https") {
scheme = "https"
}
}
decl.BasePath = fmt.Sprintf("%s://%s", scheme, host)
}
resp.WriteAsJson(decl)
}
func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration {
decls := map[string]ApiDeclaration{}
sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
decls[k] = v
})
return decls
}
func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) {
decl, ok := sws.apiDeclarationMap.At(route)
if !ok {
return nil, false
}
decl.BasePath = sws.config.WebServicesUrl
return &decl, true
}
// composeDeclaration uses all routes and parameters to create a ApiDeclaration
func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration {
decl := ApiDeclaration{
SwaggerVersion: swaggerVersion,
BasePath: sws.config.WebServicesUrl,
ResourcePath: pathPrefix,
Models: ModelList{},
ApiVersion: ws.Version()}
// collect any path parameters
rootParams := []Parameter{}
for _, param := range ws.PathParameters() {
rootParams = append(rootParams, asSwaggerParameter(param.Data()))
}
// aggregate by path
pathToRoutes := newOrderedRouteMap()
for _, other := range ws.Routes() {
if strings.HasPrefix(other.Path, pathPrefix) {
pathToRoutes.Add(other.Path, other)
}
}
pathToRoutes.Do(func(path string, routes []restful.Route) {
api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()}
voidString := "void"
for _, route := range routes {
operation := Operation{
Method: route.Method,
Summary: route.Doc,
Notes: route.Notes,
// Type gets overwritten if there is a write sample
DataTypeFields: DataTypeFields{Type: &voidString},
Parameters: []Parameter{},
Nickname: route.Operation,
ResponseMessages: composeResponseMessages(route, &decl, &sws.config)}
operation.Consumes = route.Consumes
operation.Produces = route.Produces
// share root params if any
for _, swparam := range rootParams {
operation.Parameters = append(operation.Parameters, swparam)
}
// route specific params
for _, param := range route.ParameterDocs {
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
}
sws.addModelsFromRouteTo(&operation, route, &decl)
api.Operations = append(api.Operations, operation)
}
decl.Apis = append(decl.Apis, api)
})
return decl
}
func withoutWildcard(path string) string {
if strings.HasSuffix(path, ":*}") {
return path[0:len(path)-3] + "}"
}
return path
}
// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them.
func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) {
if route.ResponseErrors == nil {
return messages
}
// sort by code
codes := sort.IntSlice{}
for code, _ := range route.ResponseErrors {
codes = append(codes, code)
}
codes.Sort()
for _, code := range codes {
each := route.ResponseErrors[code]
message := ResponseMessage{
Code: code,
Message: each.Message,
}
if each.Model != nil {
st := reflect.TypeOf(each.Model)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
if isCollection {
modelName = "array[" + modelName + "]"
}
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
// reference the model
message.ResponseModel = modelName
}
messages = append(messages, message)
}
return
}
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
if route.ReadSample != nil {
sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
}
if route.WriteSample != nil {
sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
}
}
func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
isCollection := false
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
st = st.Elem()
isCollection = true
} else {
if st.Kind() == reflect.Ptr {
if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array {
st = st.Elem().Elem()
isCollection = true
}
}
}
return isCollection, st
}
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
if isResponse {
type_, items := asDataType(sample, &sws.config)
operation.Type = type_
operation.Items = items
}
modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
}
func asSwaggerParameter(param restful.ParameterData) Parameter {
return Parameter{
DataTypeFields: DataTypeFields{
Type: &param.DataType,
Format: asFormat(param.DataType, param.DataFormat),
DefaultValue: Special(param.DefaultValue),
},
Name: param.Name,
Description: param.Description,
ParamType: asParamType(param.Kind),
Required: param.Required}
}
// Between 1..7 path parameters is supported
func composeRootPath(req *restful.Request) string {
path := "/" + req.PathParameter("a")
b := req.PathParameter("b")
if b == "" {
return path
}
path = path + "/" + b
c := req.PathParameter("c")
if c == "" {
return path
}
path = path + "/" + c
d := req.PathParameter("d")
if d == "" {
return path
}
path = path + "/" + d
e := req.PathParameter("e")
if e == "" {
return path
}
path = path + "/" + e
f := req.PathParameter("f")
if f == "" {
return path
}
path = path + "/" + f
g := req.PathParameter("g")
if g == "" {
return path
}
return path + "/" + g
}
func asFormat(dataType string, dataFormat string) string {
if dataFormat != "" {
return dataFormat
}
return "" // TODO
}
func asParamType(kind int) string {
switch {
case kind == restful.PathParameterKind:
return "path"
case kind == restful.QueryParameterKind:
return "query"
case kind == restful.BodyParameterKind:
return "body"
case kind == restful.HeaderParameterKind:
return "header"
case kind == restful.FormParameterKind:
return "form"
}
return ""
}
func asDataType(any interface{}, config *Config) (*string, *Item) {
// If it's not a collection, return the suggested model name
st := reflect.TypeOf(any)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
// if it's not a collection we are done
if !isCollection {
return &modelName, nil
}
// XXX: This is not very elegant
// We create an Item object referring to the given model
models := ModelList{}
mb := modelBuilder{Models: &models, Config: config}
mb.addModelFrom(any)
elemTypeName := mb.getElementTypeName(modelName, "", st)
item := new(Item)
if mb.isPrimitiveType(elemTypeName) {
mapped := mb.jsonSchemaType(elemTypeName)
item.Type = &mapped
} else {
item.Ref = &elemTypeName
}
tmp := "array"
return &tmp, item
}

136
vendor/github.com/golang/protobuf/ptypes/any.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

35
vendor/github.com/golang/protobuf/ptypes/doc.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes

102
vendor/github.com/golang/protobuf/ptypes/duration.go generated vendored Normal file
View file

@ -0,0 +1,102 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}

View file

@ -0,0 +1,114 @@
// Code generated by protoc-gen-go.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
// DO NOT EDIT!
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/duration/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 189 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13,
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
}

125
vendor/github.com/golang/protobuf/ptypes/timestamp.go generated vendored Normal file
View file

@ -0,0 +1,125 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View file

@ -0,0 +1,127 @@
// Code generated by protoc-gen-go.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
// DO NOT EDIT!
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// now = time.time()
// seconds = int(now)
// nanos = int((now - seconds) * 10**9)
// timestamp = Timestamp(seconds=seconds, nanos=nanos)
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 194 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27,
0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1,
0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03,
0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92,
0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01,
0x00, 0x00,
}

View file

@ -1,4 +1,5 @@
Apache License
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -178,7 +179,7 @@ Apache License
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@ -186,7 +187,7 @@ Apache License
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -199,4 +200,3 @@ Apache License
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

881
vendor/github.com/google/btree/btree.go generated vendored Normal file
View file

@ -0,0 +1,881 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
// It is not meant for persistent storage solutions.
//
// It has a flatter structure than an equivalent red-black or other binary tree,
// which in some cases yields better memory usage and/or performance.
// See some discussion on the matter here:
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
// Note, though, that this project is in no way related to the C++ B-Tree
// implementation written about there.
//
// Within this tree, each node contains a slice of items and a (possibly nil)
// slice of children. For basic numeric values or raw structs, this can cause
// efficiency differences when compared to equivalent C++ template code that
// stores values in arrays within the node:
// * Due to the overhead of storing values as interfaces (each
// value needs to be stored as the value itself, then 2 words for the
// interface pointing to that value and its type), resulting in higher
// memory use.
// * Since interfaces can point to values anywhere in memory, values are
// most likely not stored in contiguous blocks, resulting in a higher
// number of cache misses.
// These issues don't tend to matter, though, when working with strings or other
// heap-allocated structures, since C++-equivalent structures also must store
// pointers and also distribute their values across the heap.
//
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
// widely used ordered tree implementation in the Go ecosystem currently.
// Its functions, therefore, exactly mirror those of
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
// support storing multiple equivalent values.
package btree
import (
"fmt"
"io"
"sort"
"strings"
"sync"
)
// Item represents a single object in the tree.
type Item interface {
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
Less(than Item) bool
}
const (
DefaultFreeListSize = 32
)
var (
nilItems = make(items, 16)
nilChildren = make(children, 16)
)
// FreeList represents a free list of btree nodes. By default each
// BTree has its own FreeList, but multiple BTrees can share the same
// FreeList.
// Two Btrees using the same freelist are safe for concurrent write access.
type FreeList struct {
mu sync.Mutex
freelist []*node
}
// NewFreeList creates a new free list.
// size is the maximum size of the returned free list.
func NewFreeList(size int) *FreeList {
return &FreeList{freelist: make([]*node, 0, size)}
}
func (f *FreeList) newNode() (n *node) {
f.mu.Lock()
index := len(f.freelist) - 1
if index < 0 {
f.mu.Unlock()
return new(node)
}
n = f.freelist[index]
f.freelist[index] = nil
f.freelist = f.freelist[:index]
f.mu.Unlock()
return
}
// freeNode adds the given node to the list, returning true if it was added
// and false if it was discarded.
func (f *FreeList) freeNode(n *node) (out bool) {
f.mu.Lock()
if len(f.freelist) < cap(f.freelist) {
f.freelist = append(f.freelist, n)
out = true
}
f.mu.Unlock()
return
}
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
// the tree. When this function returns false, iteration will stop and the
// associated Ascend* function will immediately return.
type ItemIterator func(i Item) bool
// New creates a new B-Tree with the given degree.
//
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
// and 2-4 children).
func New(degree int) *BTree {
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
}
// NewWithFreeList creates a new B-Tree that uses the given node free list.
func NewWithFreeList(degree int, f *FreeList) *BTree {
if degree <= 1 {
panic("bad degree")
}
return &BTree{
degree: degree,
cow: &copyOnWriteContext{freelist: f},
}
}
// items stores items in a node.
type items []Item
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *items) insertAt(index int, item Item) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = item
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *items) removeAt(index int) Item {
item := (*s)[index]
copy((*s)[index:], (*s)[index+1:])
(*s)[len(*s)-1] = nil
*s = (*s)[:len(*s)-1]
return item
}
// pop removes and returns the last element in the list.
func (s *items) pop() (out Item) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// truncate truncates this instance at index so that it contains only the
// first index items. index must be less than or equal to length.
func (s *items) truncate(index int) {
var toClear items
*s, toClear = (*s)[:index], (*s)[index:]
for len(toClear) > 0 {
toClear = toClear[copy(toClear, nilItems):]
}
}
// find returns the index where the given item should be inserted into this
// list. 'found' is true if the item already exists in the list at the given
// index.
func (s items) find(item Item) (index int, found bool) {
i := sort.Search(len(s), func(i int) bool {
return item.Less(s[i])
})
if i > 0 && !s[i-1].Less(item) {
return i - 1, true
}
return i, false
}
// children stores child nodes in a node.
type children []*node
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *children) insertAt(index int, n *node) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = n
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *children) removeAt(index int) *node {
n := (*s)[index]
copy((*s)[index:], (*s)[index+1:])
(*s)[len(*s)-1] = nil
*s = (*s)[:len(*s)-1]
return n
}
// pop removes and returns the last element in the list.
func (s *children) pop() (out *node) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// truncate truncates this instance at index so that it contains only the
// first index children. index must be less than or equal to length.
func (s *children) truncate(index int) {
var toClear children
*s, toClear = (*s)[:index], (*s)[index:]
for len(toClear) > 0 {
toClear = toClear[copy(toClear, nilChildren):]
}
}
// node is an internal node in a tree.
//
// It must at all times maintain the invariant that either
// * len(children) == 0, len(items) unconstrained
// * len(children) == len(items) + 1
type node struct {
items items
children children
cow *copyOnWriteContext
}
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
if n.cow == cow {
return n
}
out := cow.newNode()
if cap(out.items) >= len(n.items) {
out.items = out.items[:len(n.items)]
} else {
out.items = make(items, len(n.items), cap(n.items))
}
copy(out.items, n.items)
// Copy children
if cap(out.children) >= len(n.children) {
out.children = out.children[:len(n.children)]
} else {
out.children = make(children, len(n.children), cap(n.children))
}
copy(out.children, n.children)
return out
}
func (n *node) mutableChild(i int) *node {
c := n.children[i].mutableFor(n.cow)
n.children[i] = c
return c
}
// split splits the given node at the given index. The current node shrinks,
// and this function returns the item that existed at that index and a new node
// containing all items/children after it.
func (n *node) split(i int) (Item, *node) {
item := n.items[i]
next := n.cow.newNode()
next.items = append(next.items, n.items[i+1:]...)
n.items.truncate(i)
if len(n.children) > 0 {
next.children = append(next.children, n.children[i+1:]...)
n.children.truncate(i + 1)
}
return item, next
}
// maybeSplitChild checks if a child should be split, and if so splits it.
// Returns whether or not a split occurred.
func (n *node) maybeSplitChild(i, maxItems int) bool {
if len(n.children[i].items) < maxItems {
return false
}
first := n.mutableChild(i)
item, second := first.split(maxItems / 2)
n.items.insertAt(i, item)
n.children.insertAt(i+1, second)
return true
}
// insert inserts an item into the subtree rooted at this node, making sure
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
// be found/replaced by insert, it will be returned.
func (n *node) insert(item Item, maxItems int) Item {
i, found := n.items.find(item)
if found {
out := n.items[i]
n.items[i] = item
return out
}
if len(n.children) == 0 {
n.items.insertAt(i, item)
return nil
}
if n.maybeSplitChild(i, maxItems) {
inTree := n.items[i]
switch {
case item.Less(inTree):
// no change, we want first split node
case inTree.Less(item):
i++ // we want second split node
default:
out := n.items[i]
n.items[i] = item
return out
}
}
return n.mutableChild(i).insert(item, maxItems)
}
// get finds the given key in the subtree and returns it.
func (n *node) get(key Item) Item {
i, found := n.items.find(key)
if found {
return n.items[i]
} else if len(n.children) > 0 {
return n.children[i].get(key)
}
return nil
}
// min returns the first item in the subtree.
func min(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[0]
}
if len(n.items) == 0 {
return nil
}
return n.items[0]
}
// max returns the last item in the subtree.
func max(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[len(n.children)-1]
}
if len(n.items) == 0 {
return nil
}
return n.items[len(n.items)-1]
}
// toRemove details what item to remove in a node.remove call.
type toRemove int
const (
removeItem toRemove = iota // removes the given item
removeMin // removes smallest item in the subtree
removeMax // removes largest item in the subtree
)
// remove removes an item from the subtree rooted at this node.
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
var i int
var found bool
switch typ {
case removeMax:
if len(n.children) == 0 {
return n.items.pop()
}
i = len(n.items)
case removeMin:
if len(n.children) == 0 {
return n.items.removeAt(0)
}
i = 0
case removeItem:
i, found = n.items.find(item)
if len(n.children) == 0 {
if found {
return n.items.removeAt(i)
}
return nil
}
default:
panic("invalid type")
}
// If we get to here, we have children.
if len(n.children[i].items) <= minItems {
return n.growChildAndRemove(i, item, minItems, typ)
}
child := n.mutableChild(i)
// Either we had enough items to begin with, or we've done some
// merging/stealing, because we've got enough now and we're ready to return
// stuff.
if found {
// The item exists at index 'i', and the child we've selected can give us a
// predecessor, since if we've gotten here it's got > minItems items in it.
out := n.items[i]
// We use our special-case 'remove' call with typ=maxItem to pull the
// predecessor of item i (the rightmost leaf of our immediate left child)
// and set it into where we pulled the item from.
n.items[i] = child.remove(nil, minItems, removeMax)
return out
}
// Final recursive call. Once we're here, we know that the item isn't in this
// node and that the child is big enough to remove from.
return child.remove(item, minItems, typ)
}
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
// item from it while keeping it at minItems, then calls remove to actually
// remove it.
//
// Most documentation says we have to do two sets of special casing:
// 1) item is in this node
// 2) item is in child
// In both cases, we need to handle the two subcases:
// A) node has enough values that it can spare one
// B) node doesn't have enough values
// For the latter, we have to check:
// a) left sibling has node to spare
// b) right sibling has node to spare
// c) we must merge
// To simplify our code here, we handle cases #1 and #2 the same:
// If a node doesn't have enough items, we make sure it does (using a,b,c).
// We then simply redo our remove call, and the second time (regardless of
// whether we're in case 1 or 2), we'll have enough items and can guarantee
// that we hit case A.
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
if i > 0 && len(n.children[i-1].items) > minItems {
// Steal from left child
child := n.mutableChild(i)
stealFrom := n.mutableChild(i - 1)
stolenItem := stealFrom.items.pop()
child.items.insertAt(0, n.items[i-1])
n.items[i-1] = stolenItem
if len(stealFrom.children) > 0 {
child.children.insertAt(0, stealFrom.children.pop())
}
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
// steal from right child
child := n.mutableChild(i)
stealFrom := n.mutableChild(i + 1)
stolenItem := stealFrom.items.removeAt(0)
child.items = append(child.items, n.items[i])
n.items[i] = stolenItem
if len(stealFrom.children) > 0 {
child.children = append(child.children, stealFrom.children.removeAt(0))
}
} else {
if i >= len(n.items) {
i--
}
child := n.mutableChild(i)
// merge with right child
mergeItem := n.items.removeAt(i)
mergeChild := n.children.removeAt(i + 1)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
n.cow.freeNode(mergeChild)
}
return n.remove(item, minItems, typ)
}
type direction int
const (
descend = direction(-1)
ascend = direction(+1)
)
// iterate provides a simple method for iterating over elements in the tree.
//
// When ascending, the 'start' should be less than 'stop' and when descending,
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
// will force the iterator to include the first item when it equals 'start',
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
// "greaterThan" or "lessThan" queries.
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
var ok bool
switch dir {
case ascend:
for i := 0; i < len(n.items); i++ {
if start != nil && n.items[i].Less(start) {
continue
}
if len(n.children) > 0 {
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
hit = true
continue
}
hit = true
if stop != nil && !n.items[i].Less(stop) {
return hit, false
}
if !iter(n.items[i]) {
return hit, false
}
}
if len(n.children) > 0 {
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
case descend:
for i := len(n.items) - 1; i >= 0; i-- {
if start != nil && !n.items[i].Less(start) {
if !includeStart || hit || start.Less(n.items[i]) {
continue
}
}
if len(n.children) > 0 {
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
if stop != nil && !stop.Less(n.items[i]) {
return hit, false // continue
}
hit = true
if !iter(n.items[i]) {
return hit, false
}
}
if len(n.children) > 0 {
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
}
return hit, true
}
// Used for testing/debugging purposes.
func (n *node) print(w io.Writer, level int) {
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}
// BTree is an implementation of a B-Tree.
//
// BTree stores Item instances in an ordered structure, allowing easy insertion,
// removal, and iteration.
//
// Write operations are not safe for concurrent mutation by multiple
// goroutines, but Read operations are.
type BTree struct {
degree int
length int
root *node
cow *copyOnWriteContext
}
// copyOnWriteContext pointers determine node ownership... a tree with a write
// context equivalent to a node's write context is allowed to modify that node.
// A tree whose write context does not match a node's is not allowed to modify
// it, and must create a new, writable copy (IE: it's a Clone).
//
// When doing any write operation, we maintain the invariant that the current
// node's context is equal to the context of the tree that requested the write.
// We do this by, before we descend into any node, creating a copy with the
// correct context if the contexts don't match.
//
// Since the node we're currently visiting on any write has the requesting
// tree's context, that node is modifiable in place. Children of that node may
// not share context, but before we descend into them, we'll make a mutable
// copy.
type copyOnWriteContext struct {
freelist *FreeList
}
// Clone clones the btree, lazily. Clone should not be called concurrently,
// but the original tree (t) and the new tree (t2) can be used concurrently
// once the Clone call completes.
//
// The internal tree structure of b is marked read-only and shared between t and
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
// whenever one of b's original nodes would have been modified. Read operations
// should have no performance degredation. Write operations for both t and t2
// will initially experience minor slow-downs caused by additional allocs and
// copies due to the aforementioned copy-on-write logic, but should converge to
// the original performance characteristics of the original tree.
func (t *BTree) Clone() (t2 *BTree) {
// Create two entirely new copy-on-write contexts.
// This operation effectively creates three trees:
// the original, shared nodes (old b.cow)
// the new b.cow nodes
// the new out.cow nodes
cow1, cow2 := *t.cow, *t.cow
out := *t
t.cow = &cow1
out.cow = &cow2
return &out
}
// maxItems returns the max number of items to allow per node.
func (t *BTree) maxItems() int {
return t.degree*2 - 1
}
// minItems returns the min number of items to allow per node (ignored for the
// root node).
func (t *BTree) minItems() int {
return t.degree - 1
}
func (c *copyOnWriteContext) newNode() (n *node) {
n = c.freelist.newNode()
n.cow = c
return
}
type freeType int
const (
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
ftStored // node was stored in the freelist for later use
ftNotOwned // node was ignored by COW, since it's owned by another one
)
// freeNode frees a node within a given COW context, if it's owned by that
// context. It returns what happened to the node (see freeType const
// documentation).
func (c *copyOnWriteContext) freeNode(n *node) freeType {
if n.cow == c {
// clear to allow GC
n.items.truncate(0)
n.children.truncate(0)
n.cow = nil
if c.freelist.freeNode(n) {
return ftStored
} else {
return ftFreelistFull
}
} else {
return ftNotOwned
}
}
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
// already equals the given one, it is removed from the tree and returned.
// Otherwise, nil is returned.
//
// nil cannot be added to the tree (will panic).
func (t *BTree) ReplaceOrInsert(item Item) Item {
if item == nil {
panic("nil item being added to BTree")
}
if t.root == nil {
t.root = t.cow.newNode()
t.root.items = append(t.root.items, item)
t.length++
return nil
} else {
t.root = t.root.mutableFor(t.cow)
if len(t.root.items) >= t.maxItems() {
item2, second := t.root.split(t.maxItems() / 2)
oldroot := t.root
t.root = t.cow.newNode()
t.root.items = append(t.root.items, item2)
t.root.children = append(t.root.children, oldroot, second)
}
}
out := t.root.insert(item, t.maxItems())
if out == nil {
t.length++
}
return out
}
// Delete removes an item equal to the passed in item from the tree, returning
// it. If no such item exists, returns nil.
func (t *BTree) Delete(item Item) Item {
return t.deleteItem(item, removeItem)
}
// DeleteMin removes the smallest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMin() Item {
return t.deleteItem(nil, removeMin)
}
// DeleteMax removes the largest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMax() Item {
return t.deleteItem(nil, removeMax)
}
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
if t.root == nil || len(t.root.items) == 0 {
return nil
}
t.root = t.root.mutableFor(t.cow)
out := t.root.remove(item, t.minItems(), typ)
if len(t.root.items) == 0 && len(t.root.children) > 0 {
oldroot := t.root
t.root = t.root.children[0]
t.cow.freeNode(oldroot)
}
if out != nil {
t.length--
}
return out
}
// AscendRange calls the iterator for every value in the tree within the range
// [greaterOrEqual, lessThan), until iterator returns false.
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
}
// AscendLessThan calls the iterator for every value in the tree within the range
// [first, pivot), until iterator returns false.
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, nil, pivot, false, false, iterator)
}
// AscendGreaterOrEqual calls the iterator for every value in the tree within
// the range [pivot, last], until iterator returns false.
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, pivot, nil, true, false, iterator)
}
// Ascend calls the iterator for every value in the tree within the range
// [first, last], until iterator returns false.
func (t *BTree) Ascend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, nil, nil, false, false, iterator)
}
// DescendRange calls the iterator for every value in the tree within the range
// [lessOrEqual, greaterThan), until iterator returns false.
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
}
// DescendLessOrEqual calls the iterator for every value in the tree within the range
// [pivot, first], until iterator returns false.
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, pivot, nil, true, false, iterator)
}
// DescendGreaterThan calls the iterator for every value in the tree within
// the range (pivot, last], until iterator returns false.
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, nil, pivot, false, false, iterator)
}
// Descend calls the iterator for every value in the tree within the range
// [last, first], until iterator returns false.
func (t *BTree) Descend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, nil, nil, false, false, iterator)
}
// Get looks for the key item in the tree, returning it. It returns nil if
// unable to find that item.
func (t *BTree) Get(key Item) Item {
if t.root == nil {
return nil
}
return t.root.get(key)
}
// Min returns the smallest item in the tree, or nil if the tree is empty.
func (t *BTree) Min() Item {
return min(t.root)
}
// Max returns the largest item in the tree, or nil if the tree is empty.
func (t *BTree) Max() Item {
return max(t.root)
}
// Has returns true if the given key is in the tree.
func (t *BTree) Has(key Item) bool {
return t.Get(key) != nil
}
// Len returns the number of items currently in the tree.
func (t *BTree) Len() int {
return t.length
}
// Clear removes all items from the btree. If addNodesToFreelist is true,
// t's nodes are added to its freelist as part of this call, until the freelist
// is full. Otherwise, the root node is simply dereferenced and the subtree
// left to Go's normal GC processes.
//
// This can be much faster
// than calling Delete on all elements, because that requires finding/removing
// each element in the tree and updating the tree accordingly. It also is
// somewhat faster than creating a new tree to replace the old one, because
// nodes from the old tree are reclaimed into the freelist for use by the new
// one, instead of being lost to the garbage collector.
//
// This call takes:
// O(1): when addNodesToFreelist is false, this is a single operation.
// O(1): when the freelist is already full, it breaks out immediately
// O(freelist size): when the freelist is empty and the nodes are all owned
// by this tree, nodes are added to the freelist until full.
// O(tree size): when all nodes are owned by another tree, all nodes are
// iterated over looking for nodes to add to the freelist, and due to
// ownership, none are.
func (t *BTree) Clear(addNodesToFreelist bool) {
if t.root != nil && addNodesToFreelist {
t.root.reset(t.cow)
}
t.root, t.length = nil, 0
}
// reset returns a subtree to the freelist. It breaks out immediately if the
// freelist is full, since the only benefit of iterating is to fill that
// freelist up. Returns true if parent reset call should continue.
func (n *node) reset(c *copyOnWriteContext) bool {
for _, child := range n.children {
if !child.reset(c) {
return false
}
}
return c.freeNode(n) != ftFreelistFull
}
// Int implements the Item interface for integers.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}

76
vendor/github.com/google/btree/btree_mem.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
// This binary compares memory usage between btree and gollrb.
package main
import (
"flag"
"fmt"
"math/rand"
"runtime"
"time"
"github.com/google/btree"
"github.com/petar/GoLLRB/llrb"
)
var (
size = flag.Int("size", 1000000, "size of the tree to build")
degree = flag.Int("degree", 8, "degree of btree")
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
)
func main() {
flag.Parse()
vals := rand.Perm(*size)
var t, v interface{}
v = vals
var stats runtime.MemStats
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- BEFORE ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
start := time.Now()
if *gollrb {
tr := llrb.New()
for _, v := range vals {
tr.ReplaceOrInsert(llrb.Int(v))
}
t = tr // keep it around
} else {
tr := btree.New(*degree)
for _, v := range vals {
tr.ReplaceOrInsert(btree.Int(v))
}
t = tr // keep it around
}
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
fmt.Println("-------- AFTER ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- AFTER GC ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
if t == v {
fmt.Println("to make sure vals and tree aren't GC'd")
}
}

203
vendor/github.com/googleapis/gnostic/LICENSE generated vendored Normal file
View file

@ -0,0 +1,203 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,43 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
// Context contains state of the compiler as it traverses a document.
type Context struct {
Parent *Context
Name string
ExtensionHandlers *[]ExtensionHandler
}
// NewContextWithExtensions returns a new object representing the compiler state
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
}
// NewContext returns a new object representing the compiler state
func NewContext(name string, parent *Context) *Context {
if parent != nil {
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
}
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
}
// Description returns a text description of the compiler state
func (context *Context) Description() string {
if context.Parent != nil {
return context.Parent.Description() + "." + context.Name
}
return context.Name
}

61
vendor/github.com/googleapis/gnostic/compiler/error.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
// Error represents compiler errors and their location in the document.
type Error struct {
Context *Context
Message string
}
// NewError creates an Error.
func NewError(context *Context, message string) *Error {
return &Error{Context: context, Message: message}
}
// Error returns the string value of an Error.
func (err *Error) Error() string {
if err.Context == nil {
return "ERROR " + err.Message
}
return "ERROR " + err.Context.Description() + " " + err.Message
}
// ErrorGroup is a container for groups of Error values.
type ErrorGroup struct {
Errors []error
}
// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
func NewErrorGroupOrNil(errors []error) error {
if len(errors) == 0 {
return nil
} else if len(errors) == 1 {
return errors[0]
} else {
return &ErrorGroup{Errors: errors}
}
}
func (group *ErrorGroup) Error() string {
result := ""
for i, err := range group.Errors {
if i > 0 {
result += "\n"
}
result += err.Error()
}
return result
}

View file

@ -0,0 +1,101 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"errors"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
ext_plugin "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v2"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// HandleExtension calls a binary extension handler.
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
handled := false
var errFromPlugin error
var outFromPlugin *any.Any
if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
if outFromPlugin == nil {
continue
} else {
handled = true
break
}
}
}
return handled, outFromPlugin, errFromPlugin
}
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
binary, _ := yaml.Marshal(in)
request := &ext_plugin.ExtensionHandlerRequest{}
version := &ext_plugin.Version{}
version.Major = 0
version.Minor = 1
version.Patch = 0
request.CompilerVersion = version
request.Wrapper = &ext_plugin.Wrapper{}
request.Wrapper.Version = "v2"
request.Wrapper.Yaml = string(binary)
request.Wrapper.ExtensionName = extensionName
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
fmt.Printf("Error: %+v\n", err)
return nil, err
}
response := &ext_plugin.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil {
fmt.Printf("Error: %+v\n", err)
fmt.Printf("%s\n", string(output))
return nil, err
}
if !response.Handled {
return nil, nil
}
if len(response.Error) != 0 {
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
return nil, errors.New(message)
}
return response.Value, nil
}
return nil, nil
}

View file

@ -0,0 +1,197 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"fmt"
"gopkg.in/yaml.v2"
"regexp"
"sort"
"strconv"
)
// compiler helper functions, usually called from generated code
// UnpackMap gets a yaml.MapSlice if possible.
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
m, ok := in.(yaml.MapSlice)
if ok {
return m, true
}
// do we have an empty array?
a, ok := in.([]interface{})
if ok && len(a) == 0 {
// if so, return an empty map
return yaml.MapSlice{}, true
}
return nil, false
}
// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
func SortedKeysForMap(m yaml.MapSlice) []string {
keys := make([]string, 0)
for _, item := range m {
keys = append(keys, item.Key.(string))
}
sort.Strings(keys)
return keys
}
// MapHasKey returns true if a yaml.MapSlice contains a specified key.
func MapHasKey(m yaml.MapSlice, key string) bool {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return true
}
}
return false
}
// MapValueForKey gets the value of a map value for a specified key.
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return item.Value
}
}
return nil
}
// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
stringArray := make([]string, 0)
for _, item := range interfaceArray {
v, ok := item.(string)
if ok {
stringArray = append(stringArray, v)
}
}
return stringArray
}
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
missingKeys := make([]string, 0)
for _, k := range requiredKeys {
if !MapHasKey(m, k) {
missingKeys = append(missingKeys, k)
}
}
return missingKeys
}
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
invalidKeys := make([]string, 0)
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok {
key := itemKey
found := false
// does the key match an allowed key?
for _, allowedKey := range allowedKeys {
if key == allowedKey {
found = true
break
}
}
if !found {
// does the key match an allowed pattern?
for _, allowedPattern := range allowedPatterns {
if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
invalidKeys = append(invalidKeys, key)
}
}
}
}
return invalidKeys
}
// DescribeMap describes a map (for debugging purposes).
func DescribeMap(in interface{}, indent string) string {
description := ""
m, ok := in.(map[string]interface{})
if ok {
keys := make([]string, 0)
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := m[k]
description += fmt.Sprintf("%s%s:\n", indent, k)
description += DescribeMap(v, indent+" ")
}
return description
}
a, ok := in.([]interface{})
if ok {
for i, v := range a {
description += fmt.Sprintf("%s%d:\n", indent, i)
description += DescribeMap(v, indent+" ")
}
return description
}
description += fmt.Sprintf("%s%+v\n", indent, in)
return description
}
// PluralProperties returns the string "properties" pluralized.
func PluralProperties(count int) string {
if count == 1 {
return "property"
}
return "properties"
}
// StringArrayContainsValue returns true if a string array contains a specified value.
func StringArrayContainsValue(array []string, value string) bool {
for _, item := range array {
if item == value {
return true
}
}
return false
}
// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
func StringArrayContainsValues(array []string, values []string) bool {
for _, value := range values {
if !StringArrayContainsValue(array, value) {
return false
}
}
return true
}
// StringValue returns the string value of an item.
func StringValue(item interface{}) (value string, ok bool) {
value, ok = item.(string)
if ok {
return value, ok
}
intValue, ok := item.(int)
if ok {
return strconv.Itoa(intValue), true
}
return "", false
}

16
vendor/github.com/googleapis/gnostic/compiler/main.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compiler provides support functions to generated compiler code.
package compiler

173
vendor/github.com/googleapis/gnostic/compiler/reader.go generated vendored Normal file
View file

@ -0,0 +1,173 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"errors"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"net/http"
"net/url"
"path/filepath"
"strings"
)
var fileCache map[string][]byte
var infoCache map[string]interface{}
var count int64
var verboseReader = false
func initializeFileCache() {
if fileCache == nil {
fileCache = make(map[string][]byte, 0)
}
}
func initializeInfoCache() {
if infoCache == nil {
infoCache = make(map[string]interface{}, 0)
}
}
// FetchFile gets a specified file from the local filesystem or a remote location.
func FetchFile(fileurl string) ([]byte, error) {
initializeFileCache()
bytes, ok := fileCache[fileurl]
if ok {
if verboseReader {
log.Printf("Cache hit %s", fileurl)
}
return bytes, nil
}
if verboseReader {
log.Printf("Fetching %s", fileurl)
}
response, err := http.Get(fileurl)
if err != nil {
return nil, err
}
if response.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
}
defer response.Body.Close()
bytes, err = ioutil.ReadAll(response.Body)
if err == nil {
fileCache[fileurl] = bytes
}
return bytes, err
}
// ReadBytesForFile reads the bytes of a file.
func ReadBytesForFile(filename string) ([]byte, error) {
// is the filename a url?
fileurl, _ := url.Parse(filename)
if fileurl.Scheme != "" {
// yes, fetch it
bytes, err := FetchFile(filename)
if err != nil {
return nil, err
}
return bytes, nil
}
// no, it's a local filename
bytes, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return bytes, nil
}
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
initializeInfoCache()
cachedInfo, ok := infoCache[filename]
if ok {
if verboseReader {
log.Printf("Cache hit info for file %s", filename)
}
return cachedInfo, nil
}
if verboseReader {
log.Printf("Reading info for file %s", filename)
}
var info yaml.MapSlice
err := yaml.Unmarshal(bytes, &info)
if err != nil {
return nil, err
}
infoCache[filename] = info
return info, nil
}
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
initializeInfoCache()
{
info, ok := infoCache[ref]
if ok {
if verboseReader {
log.Printf("Cache hit for ref %s#%s", basefile, ref)
}
return info, nil
}
}
if verboseReader {
log.Printf("Reading info for ref %s#%s", basefile, ref)
}
count = count + 1
basedir, _ := filepath.Split(basefile)
parts := strings.Split(ref, "#")
var filename string
if parts[0] != "" {
filename = basedir + parts[0]
} else {
filename = basefile
}
bytes, err := ReadBytesForFile(filename)
if err != nil {
return nil, err
}
info, err := ReadInfoFromBytes(filename, bytes)
if err != nil {
log.Printf("File error: %v\n", err)
} else {
if len(parts) > 1 {
path := strings.Split(parts[1], "/")
for i, key := range path {
if i > 0 {
m, ok := info.(yaml.MapSlice)
if ok {
found := false
for _, section := range m {
if section.Key == key {
info = section.Value
found = true
}
}
if !found {
infoCache[ref] = nil
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
}
}
}
}
}
}
infoCache[ref] = info
return info, nil
}

View file

@ -0,0 +1,219 @@
// Code generated by protoc-gen-go.
// source: extension.proto
// DO NOT EDIT!
/*
Package openapiextension_v1 is a generated protocol buffer package.
It is generated from these files:
extension.proto
It has these top-level messages:
Version
ExtensionHandlerRequest
ExtensionHandlerResponse
Wrapper
*/
package openapiextension_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The version number of OpenAPI compiler.
type Version struct {
Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
}
func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Version) GetMajor() int32 {
if m != nil {
return m.Major
}
return 0
}
func (m *Version) GetMinor() int32 {
if m != nil {
return m.Minor
}
return 0
}
func (m *Version) GetPatch() int32 {
if m != nil {
return m.Patch
}
return 0
}
func (m *Version) GetSuffix() string {
if m != nil {
return m.Suffix
}
return ""
}
// An encoded Request is written to the ExtensionHandler's stdin.
type ExtensionHandlerRequest struct {
// The OpenAPI descriptions that were explicitly listed on the command line.
// The specifications will appear in the order they are specified to openapic.
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"`
// The version number of openapi compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
}
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerRequest) ProtoMessage() {}
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if m != nil {
return m.Wrapper
}
return nil
}
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if m != nil {
return m.CompilerVersion
}
return nil
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
type ExtensionHandlerResponse struct {
// true if the extension is handled by the extension handler; false otherwise
Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"`
// Error message. If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
// This should be used to indicate errors which prevent the extension from
// operating as intended. Errors which indicate a problem in gnostic
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"`
// text output
Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerResponse) ProtoMessage() {}
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *ExtensionHandlerResponse) GetHandled() bool {
if m != nil {
return m.Handled
}
return false
}
func (m *ExtensionHandlerResponse) GetError() []string {
if m != nil {
return m.Error
}
return nil
}
func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any {
if m != nil {
return m.Value
}
return nil
}
type Wrapper struct {
// version of the OpenAPI specification in which this extension was written.
Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"`
// Name of the extension
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"`
// Must be a valid yaml for the proto
Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"`
}
func (m *Wrapper) Reset() { *m = Wrapper{} }
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
func (*Wrapper) ProtoMessage() {}
func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Wrapper) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *Wrapper) GetExtensionName() string {
if m != nil {
return m.ExtensionName
}
return ""
}
func (m *Wrapper) GetYaml() string {
if m != nil {
return m.Yaml
}
return ""
}
func init() {
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
}
func init() { proto.RegisterFile("extension.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 355 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40,
0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22,
0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb,
0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50,
0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4,
0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4,
0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0,
0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34,
0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a,
0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a,
0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66,
0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2,
0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d,
0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3,
0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c,
0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa,
0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab,
0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3,
0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62,
0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23,
0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1,
0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52,
0x02, 0x00, 0x00,
}

View file

@ -0,0 +1,82 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openapiextension_v1
import (
"fmt"
"io/ioutil"
"os"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
)
type documentHandler func(version string, extensionName string, document string)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
func forInputYamlFromOpenapic(handler documentHandler) {
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Println("File error:", err.Error())
os.Exit(1)
}
if len(data) == 0 {
fmt.Println("No input data.")
os.Exit(1)
}
request := &ExtensionHandlerRequest{}
err = proto.Unmarshal(data, request)
if err != nil {
fmt.Println("Input error:", err.Error())
os.Exit(1)
}
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
}
// ProcessExtension calles the handler for a specified extension.
func ProcessExtension(handleExtension extensionHandler) {
response := &ExtensionHandlerResponse{}
forInputYamlFromOpenapic(
func(version string, extensionName string, yamlInput string) {
var newObject proto.Message
var err error
handled, newObject, err := handleExtension(extensionName, yamlInput)
if !handled {
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
// If we reach here, then the extension is handled
response.Handled = true
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
response.Value, err = ptypes.MarshalAny(newObject)
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
})
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
}

7
vendor/github.com/gregjones/httpcache/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,7 @@
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,61 @@
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
// to supplement an in-memory map with persistent storage
//
package diskcache
import (
"bytes"
"crypto/md5"
"encoding/hex"
"github.com/peterbourgon/diskv"
"io"
)
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
type Cache struct {
d *diskv.Diskv
}
// Get returns the response corresponding to key if present
func (c *Cache) Get(key string) (resp []byte, ok bool) {
key = keyToFilename(key)
resp, err := c.d.Read(key)
if err != nil {
return []byte{}, false
}
return resp, true
}
// Set saves a response to the cache as key
func (c *Cache) Set(key string, resp []byte) {
key = keyToFilename(key)
c.d.WriteStream(key, bytes.NewReader(resp), true)
}
// Delete removes the response with key from the cache
func (c *Cache) Delete(key string) {
key = keyToFilename(key)
c.d.Erase(key)
}
func keyToFilename(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New returns a new Cache that will store files in basePath
func New(basePath string) *Cache {
return &Cache{
d: diskv.New(diskv.Options{
BasePath: basePath,
CacheSizeMax: 100 * 1024 * 1024, // 100MB
}),
}
}
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
// storage.
func NewWithDiskv(d *diskv.Diskv) *Cache {
return &Cache{d}
}

551
vendor/github.com/gregjones/httpcache/httpcache.go generated vendored Normal file
View file

@ -0,0 +1,551 @@
// Package httpcache provides a http.RoundTripper implementation that works as a
// mostly RFC-compliant cache for http responses.
//
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
// and not for a shared proxy).
//
package httpcache
import (
"bufio"
"bytes"
"errors"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"sync"
"time"
)
const (
stale = iota
fresh
transparent
// XFromCache is the header added to responses that are returned from the cache
XFromCache = "X-From-Cache"
)
// A Cache interface is used by the Transport to store and retrieve responses.
type Cache interface {
// Get returns the []byte representation of a cached response and a bool
// set to true if the value isn't empty
Get(key string) (responseBytes []byte, ok bool)
// Set stores the []byte representation of a response against a key
Set(key string, responseBytes []byte)
// Delete removes the value associated with the key
Delete(key string)
}
// cacheKey returns the cache key for req.
func cacheKey(req *http.Request) string {
if req.Method == http.MethodGet {
return req.URL.String()
} else {
return req.Method + " " + req.URL.String()
}
}
// CachedResponse returns the cached http.Response for req if present, and nil
// otherwise.
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
cachedVal, ok := c.Get(cacheKey(req))
if !ok {
return
}
b := bytes.NewBuffer(cachedVal)
return http.ReadResponse(bufio.NewReader(b), req)
}
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
type MemoryCache struct {
mu sync.RWMutex
items map[string][]byte
}
// Get returns the []byte representation of the response and true if present, false if not
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
c.mu.RLock()
resp, ok = c.items[key]
c.mu.RUnlock()
return resp, ok
}
// Set saves response resp to the cache with key
func (c *MemoryCache) Set(key string, resp []byte) {
c.mu.Lock()
c.items[key] = resp
c.mu.Unlock()
}
// Delete removes key from the cache
func (c *MemoryCache) Delete(key string) {
c.mu.Lock()
delete(c.items, key)
c.mu.Unlock()
}
// NewMemoryCache returns a new Cache that will store items in an in-memory map
func NewMemoryCache() *MemoryCache {
c := &MemoryCache{items: map[string][]byte{}}
return c
}
// Transport is an implementation of http.RoundTripper that will return values from a cache
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
// to repeated requests allowing servers to return 304 / Not Modified
type Transport struct {
// The RoundTripper interface actually used to make requests
// If nil, http.DefaultTransport is used
Transport http.RoundTripper
Cache Cache
// If true, responses returned from the cache will be given an extra header, X-From-Cache
MarkCachedResponses bool
}
// NewTransport returns a new Transport with the
// provided Cache implementation and MarkCachedResponses set to true
func NewTransport(c Cache) *Transport {
return &Transport{Cache: c, MarkCachedResponses: true}
}
// Client returns an *http.Client that caches responses.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
// varyMatches will return false unless all of the cached values for the headers listed in Vary
// match the new request
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
header = http.CanonicalHeaderKey(header)
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
return false
}
}
return true
}
// RoundTrip takes a Request and returns a Response
//
// If there is a fresh Response already in cache, then it will be returned without connecting to
// the server.
//
// If there is a stale Response, then any validators it contains will be set on the new request
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
// will be returned.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
cacheKey := cacheKey(req)
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
var cachedResp *http.Response
if cacheable {
cachedResp, err = CachedResponse(t.Cache, req)
} else {
// Need to invalidate an existing value
t.Cache.Delete(cacheKey)
}
transport := t.Transport
if transport == nil {
transport = http.DefaultTransport
}
if cacheable && cachedResp != nil && err == nil {
if t.MarkCachedResponses {
cachedResp.Header.Set(XFromCache, "1")
}
if varyMatches(cachedResp, req) {
// Can only use cached value if the new request doesn't Vary significantly
freshness := getFreshness(cachedResp.Header, req.Header)
if freshness == fresh {
return cachedResp, nil
}
if freshness == stale {
var req2 *http.Request
// Add validators if caller hasn't already done so
etag := cachedResp.Header.Get("etag")
if etag != "" && req.Header.Get("etag") == "" {
req2 = cloneRequest(req)
req2.Header.Set("if-none-match", etag)
}
lastModified := cachedResp.Header.Get("last-modified")
if lastModified != "" && req.Header.Get("last-modified") == "" {
if req2 == nil {
req2 = cloneRequest(req)
}
req2.Header.Set("if-modified-since", lastModified)
}
if req2 != nil {
req = req2
}
}
}
resp, err = transport.RoundTrip(req)
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
// Replace the 304 response with the one from cache, but update with some new headers
endToEndHeaders := getEndToEndHeaders(resp.Header)
for _, header := range endToEndHeaders {
cachedResp.Header[header] = resp.Header[header]
}
resp = cachedResp
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
// In case of transport failure and stale-if-error activated, returns cached content
// when available
return cachedResp, nil
} else {
if err != nil || resp.StatusCode != http.StatusOK {
t.Cache.Delete(cacheKey)
}
if err != nil {
return nil, err
}
}
} else {
reqCacheControl := parseCacheControl(req.Header)
if _, ok := reqCacheControl["only-if-cached"]; ok {
resp = newGatewayTimeoutResponse(req)
} else {
resp, err = transport.RoundTrip(req)
if err != nil {
return nil, err
}
}
}
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
varyKey = http.CanonicalHeaderKey(varyKey)
fakeHeader := "X-Varied-" + varyKey
reqValue := req.Header.Get(varyKey)
if reqValue != "" {
resp.Header.Set(fakeHeader, reqValue)
}
}
switch req.Method {
case "GET":
// Delay caching until EOF is reached.
resp.Body = &cachingReadCloser{
R: resp.Body,
OnEOF: func(r io.Reader) {
resp := *resp
resp.Body = ioutil.NopCloser(r)
respBytes, err := httputil.DumpResponse(&resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
},
}
default:
respBytes, err := httputil.DumpResponse(resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
}
} else {
t.Cache.Delete(cacheKey)
}
return resp, nil
}
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
var ErrNoDateHeader = errors.New("no Date header")
// Date parses and returns the value of the Date header.
func Date(respHeaders http.Header) (date time.Time, err error) {
dateHeader := respHeaders.Get("date")
if dateHeader == "" {
err = ErrNoDateHeader
return
}
return time.Parse(time.RFC1123, dateHeader)
}
type realClock struct{}
func (c *realClock) since(d time.Time) time.Duration {
return time.Since(d)
}
type timer interface {
since(d time.Time) time.Duration
}
var clock timer = &realClock{}
// getFreshness will return one of fresh/stale/transparent based on the cache-control
// values of the request and the response
//
// fresh indicates the response can be returned
// stale indicates that the response needs validating before it is returned
// transparent indicates the response should not be used to fulfil the request
//
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
// signficant. Similarly, smax-age isn't used.
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
if _, ok := reqCacheControl["no-cache"]; ok {
return transparent
}
if _, ok := respCacheControl["no-cache"]; ok {
return stale
}
if _, ok := reqCacheControl["only-if-cached"]; ok {
return fresh
}
date, err := Date(respHeaders)
if err != nil {
return stale
}
currentAge := clock.since(date)
var lifetime time.Duration
var zeroDuration time.Duration
// If a response includes both an Expires header and a max-age directive,
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
if maxAge, ok := respCacheControl["max-age"]; ok {
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
} else {
expiresHeader := respHeaders.Get("Expires")
if expiresHeader != "" {
expires, err := time.Parse(time.RFC1123, expiresHeader)
if err != nil {
lifetime = zeroDuration
} else {
lifetime = expires.Sub(date)
}
}
}
if maxAge, ok := reqCacheControl["max-age"]; ok {
// the client is willing to accept a response whose age is no greater than the specified time in seconds
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
}
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
// the client wants a response that will still be fresh for at least the specified number of seconds.
minfreshDuration, err := time.ParseDuration(minfresh + "s")
if err == nil {
currentAge = time.Duration(currentAge + minfreshDuration)
}
}
if maxstale, ok := reqCacheControl["max-stale"]; ok {
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
// its expiration time by no more than the specified number of seconds.
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
//
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
// return-value available here.
if maxstale == "" {
return fresh
}
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
if err == nil {
currentAge = time.Duration(currentAge - maxstaleDuration)
}
}
if lifetime > currentAge {
return fresh
}
return stale
}
// Returns true if either the request or the response includes the stale-if-error
// cache control extension: https://tools.ietf.org/html/rfc5861
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
var err error
lifetime := time.Duration(-1)
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if lifetime >= 0 {
date, err := Date(respHeaders)
if err != nil {
return false
}
currentAge := clock.since(date)
if lifetime > currentAge {
return true
}
}
return false
}
func getEndToEndHeaders(respHeaders http.Header) []string {
// These headers are always hop-by-hop
hopByHopHeaders := map[string]struct{}{
"Connection": struct{}{},
"Keep-Alive": struct{}{},
"Proxy-Authenticate": struct{}{},
"Proxy-Authorization": struct{}{},
"Te": struct{}{},
"Trailers": struct{}{},
"Transfer-Encoding": struct{}{},
"Upgrade": struct{}{},
}
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
// any header listed in connection, if present, is also considered hop-by-hop
if strings.Trim(extra, " ") != "" {
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
}
}
endToEndHeaders := []string{}
for respHeader, _ := range respHeaders {
if _, ok := hopByHopHeaders[respHeader]; !ok {
endToEndHeaders = append(endToEndHeaders, respHeader)
}
}
return endToEndHeaders
}
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
if _, ok := respCacheControl["no-store"]; ok {
return false
}
if _, ok := reqCacheControl["no-store"]; ok {
return false
}
return true
}
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
var braw bytes.Buffer
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
if err != nil {
panic(err)
}
return resp
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
type cacheControl map[string]string
func parseCacheControl(headers http.Header) cacheControl {
cc := cacheControl{}
ccHeader := headers.Get("Cache-Control")
for _, part := range strings.Split(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue
}
if strings.ContainsRune(part, '=') {
keyval := strings.Split(part, "=")
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
} else {
cc[part] = ""
}
}
return cc
}
// headerAllCommaSepValues returns all comma-separated values (each
// with whitespace trimmed) for header name in headers. According to
// Section 4.2 of the HTTP/1.1 spec
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
// values from multiple occurrences of a header should be concatenated, if
// the header's value is a comma-separated list.
func headerAllCommaSepValues(headers http.Header, name string) []string {
var vals []string
for _, val := range headers[http.CanonicalHeaderKey(name)] {
fields := strings.Split(val, ",")
for i, f := range fields {
fields[i] = strings.TrimSpace(f)
}
vals = append(vals, fields...)
}
return vals
}
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
// handler with a full copy of the content read from R when EOF is
// reached.
type cachingReadCloser struct {
// Underlying ReadCloser.
R io.ReadCloser
// OnEOF is called with a copy of the content of R when EOF is reached.
OnEOF func(io.Reader)
buf bytes.Buffer // buf stores a copy of the content of R.
}
// Read reads the next len(p) bytes from R or until R is drained. The
// return value n is the number of bytes read. If R has no data to
// return, err is io.EOF and OnEOF is called with a full copy of what
// has been read so far.
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p)
r.buf.Write(p[:n])
if err == io.EOF {
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
}
return n, err
}
func (r *cachingReadCloser) Close() error {
return r.R.Close()
}
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
func NewMemoryCacheTransport() *Transport {
c := NewMemoryCache()
t := NewTransport(c)
return t
}

223
vendor/github.com/hashicorp/golang-lru/2q.go generated vendored Normal file
View file

@ -0,0 +1,223 @@
package lru
import (
"fmt"
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
// to recently added entries that have only been accessed once.
Default2QRecentRatio = 0.25
// Default2QGhostEntries is the default ratio of ghost
// entries kept to track entries recently evicted
Default2QGhostEntries = 0.50
)
// TwoQueueCache is a thread-safe fixed size 2Q cache.
// 2Q is an enhancement over the standard LRU cache
// in that it tracks both frequently and recently used
// entries separately. This avoids a burst in access to new
// entries from evicting frequently used entries. It adds some
// additional tracking overhead to the standard LRU cache, and is
// computationally about 2x the cost, and adds some metadata over
// head. The ARCCache is similar, but does not require setting any
// parameters.
type TwoQueueCache struct {
size int
recentSize int
recent simplelru.LRUCache
frequent simplelru.LRUCache
recentEvict simplelru.LRUCache
lock sync.RWMutex
}
// New2Q creates a new TwoQueueCache using the default
// values for the parameters.
func New2Q(size int) (*TwoQueueCache, error) {
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
}
// New2QParams creates a new TwoQueueCache using the provided
// parameter values.
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
if size <= 0 {
return nil, fmt.Errorf("invalid size")
}
if recentRatio < 0.0 || recentRatio > 1.0 {
return nil, fmt.Errorf("invalid recent ratio")
}
if ghostRatio < 0.0 || ghostRatio > 1.0 {
return nil, fmt.Errorf("invalid ghost ratio")
}
// Determine the sub-sizes
recentSize := int(float64(size) * recentRatio)
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
frequent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
recentEvict, err := simplelru.NewLRU(evictSize, nil)
if err != nil {
return nil, err
}
// Initialize the cache
c := &TwoQueueCache{
size: size,
recentSize: recentSize,
recent: recent,
frequent: frequent,
recentEvict: recentEvict,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if this is a frequent value
if val, ok := c.frequent.Get(key); ok {
return val, ok
}
// If the value is contained in recent, then we
// promote it to frequent
if val, ok := c.recent.Peek(key); ok {
c.recent.Remove(key)
c.frequent.Add(key, val)
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *TwoQueueCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is frequently used already,
// and just update the value
if c.frequent.Contains(key) {
c.frequent.Add(key, value)
return
}
// Check if the value is recently used, and promote
// the value into the frequent list
if c.recent.Contains(key) {
c.recent.Remove(key)
c.frequent.Add(key, value)
return
}
// If the value was recently evicted, add it to the
// frequently used list
if c.recentEvict.Contains(key) {
c.ensureSpace(true)
c.recentEvict.Remove(key)
c.frequent.Add(key, value)
return
}
// Add to the recently seen list
c.ensureSpace(false)
c.recent.Add(key, value)
return
}
// ensureSpace is used to ensure we have space in the cache
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
// If we have space, nothing to do
recentLen := c.recent.Len()
freqLen := c.frequent.Len()
if recentLen+freqLen < c.size {
return
}
// If the recent buffer is larger than
// the target, evict from there
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
k, _, _ := c.recent.RemoveOldest()
c.recentEvict.Add(k, nil)
return
}
// Remove from the frequent list otherwise
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.frequent.Keys()
k2 := c.recent.Keys()
return append(k1, k2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.frequent.Remove(key) {
return
}
if c.recent.Remove(key) {
return
}
if c.recentEvict.Remove(key) {
return
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.recent.Purge()
c.frequent.Purge()
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {
return val, ok
}
return c.recent.Peek(key)
}

362
vendor/github.com/hashicorp/golang-lru/LICENSE generated vendored Normal file
View file

@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

257
vendor/github.com/hashicorp/golang-lru/arc.go generated vendored Normal file
View file

@ -0,0 +1,257 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
// ARC is an enhancement over the standard LRU cache in that tracks both
// frequency and recency of use. This avoids a burst in access to new
// entries from evicting the frequently used older entries. It adds some
// additional tracking overhead to a standard LRU cache, computationally
// it is roughly 2x the cost, and the extra memory overhead is linear
// with the size of the cache. ARC has been patented by IBM, but is
// similar to the TwoQueueCache (2Q) which requires setting parameters.
type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
// NewARC creates an ARC of the given size
func NewARC(size int) (*ARCCache, error) {
// Create the sub LRUs
b1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
b2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
// Initialize the ARC
c := &ARCCache{
size: size,
p: 0,
t1: t1,
b1: b1,
t2: t2,
b2: b2,
}
return c, nil
}
// Get looks up a key's value from the cache.
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
c.t2.Add(key, val)
return val, ok
}
// Check if the value is contained in T2 (frequent)
if val, ok := c.t2.Get(key); ok {
return val, ok
}
// No hit
return nil, false
}
// Add adds a value to the cache.
func (c *ARCCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
// Check if the value is contained in T1 (recent), and potentially
// promote it to frequent T2
if c.t1.Contains(key) {
c.t1.Remove(key)
c.t2.Add(key, value)
return
}
// Check if the value is already in T2 (frequent) and update it
if c.t2.Contains(key) {
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// recently used list
if c.b1.Contains(key) {
// T1 set is too small, increase P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b2Len > b1Len {
delta = b2Len / b1Len
}
if c.p+delta >= c.size {
c.p = c.size
} else {
c.p += delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Remove from B1
c.b1.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Check if this value was recently evicted as part of the
// frequently used list
if c.b2.Contains(key) {
// T2 set is too small, decrease P appropriately
delta := 1
b1Len := c.b1.Len()
b2Len := c.b2.Len()
if b1Len > b2Len {
delta = b1Len / b2Len
}
if delta >= c.p {
c.p = 0
} else {
c.p -= delta
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(true)
}
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
// Potentially need to make room in the cache
if c.t1.Len()+c.t2.Len() >= c.size {
c.replace(false)
}
// Keep the size of the ghost buffers trim
if c.b1.Len() > c.size-c.p {
c.b1.RemoveOldest()
}
if c.b2.Len() > c.p {
c.b2.RemoveOldest()
}
// Add to the recently seen list
c.t1.Add(key, value)
return
}
// replace is used to adaptively evict from either T1 or T2
// based on the current learned value of P
func (c *ARCCache) replace(b2ContainsKey bool) {
t1Len := c.t1.Len()
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
k, _, ok := c.t1.RemoveOldest()
if ok {
c.b1.Add(k, nil)
}
} else {
k, _, ok := c.t2.RemoveOldest()
if ok {
c.b2.Add(k, nil)
}
}
}
// Len returns the number of cached entries
func (c *ARCCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Len() + c.t2.Len()
}
// Keys returns all the cached keys
func (c *ARCCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
k1 := c.t1.Keys()
k2 := c.t2.Keys()
return append(k1, k2...)
}
// Remove is used to purge a key from the cache
func (c *ARCCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.t1.Remove(key) {
return
}
if c.t2.Remove(key) {
return
}
if c.b1.Remove(key) {
return
}
if c.b2.Remove(key) {
return
}
}
// Purge is used to clear the cache
func (c *ARCCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
c.t1.Purge()
c.t2.Purge()
c.b1.Purge()
c.b2.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *ARCCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.t1.Contains(key) || c.t2.Contains(key)
}
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {
return val, ok
}
return c.t2.Peek(key)
}

21
vendor/github.com/hashicorp/golang-lru/doc.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru

110
vendor/github.com/hashicorp/golang-lru/lru.go generated vendored Normal file
View file

@ -0,0 +1,110 @@
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/simplelru"
)
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru simplelru.LRUCache
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New(size int) (*Cache, error) {
return NewWithEvict(size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
if err != nil {
return nil, err
}
c := &Cache{
lru: lru,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
c.lock.Lock()
c.lru.Purge()
c.lock.Unlock()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Add(key, value interface{}) (evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Add(key, value)
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Get(key)
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Contains(key)
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Peek(key)
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
if c.lru.Contains(key) {
return true, false
}
evicted = c.lru.Add(key, value)
return false, evicted
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key interface{}) {
c.lock.Lock()
c.lru.Remove(key)
c.lock.Unlock()
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() {
c.lock.Lock()
c.lru.RemoveOldest()
c.lock.Unlock()
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Keys()
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Len()
}

161
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go generated vendored Normal file
View file

@ -0,0 +1,161 @@
package simplelru
import (
"container/list"
"errors"
)
// EvictCallback is used to get a callback when a cache entry is evicted
type EvictCallback func(key interface{}, value interface{})
// LRU implements a non-thread safe fixed size LRU cache
type LRU struct {
size int
evictList *list.List
items map[interface{}]*list.Element
onEvict EvictCallback
}
// entry is used to hold a value in the evictList
type entry struct {
key interface{}
value interface{}
}
// NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 {
return nil, errors.New("Must provide a positive size")
}
c := &LRU{
size: size,
evictList: list.New(),
items: make(map[interface{}]*list.Element),
onEvict: onEvict,
}
return c, nil
}
// Purge is used to completely clear the cache.
func (c *LRU) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
c.onEvict(k, v.Value.(*entry).value)
}
delete(c.items, k)
}
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU) Add(key, value interface{}) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
ent.Value.(*entry).value = value
return false
}
// Add new item
ent := &entry{key, value}
entry := c.evictList.PushFront(ent)
c.items[key] = entry
evict := c.evictList.Len() > c.size
// Verify size not exceeded
if evict {
c.removeOldest()
}
return evict
}
// Get looks up a key's value from the cache.
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
return ent.Value.(*entry).value, true
}
return
}
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU) Contains(key interface{}) (ok bool) {
_, ok = c.items[key]
return ok
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
var ent *list.Element
if ent, ok = c.items[key]; ok {
return ent.Value.(*entry).value, true
}
return nil, ok
}
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU) Remove(key interface{}) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
}
return false
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)
return kv.key, kv.value, true
}
return nil, nil, false
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *LRU) Keys() []interface{} {
keys := make([]interface{}, len(c.items))
i := 0
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
keys[i] = ent.Value.(*entry).key
i++
}
return keys
}
// Len returns the number of items in the cache.
func (c *LRU) Len() int {
return c.evictList.Len()
}
// removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
}
}
// removeElement is used to remove a given list element from the cache
func (c *LRU) removeElement(e *list.Element) {
c.evictList.Remove(e)
kv := e.Value.(*entry)
delete(c.items, kv.key)
if c.onEvict != nil {
c.onEvict(kv.key, kv.value)
}
}

View file

@ -0,0 +1,37 @@
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key, value interface{}) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key interface{}) (value interface{}, ok bool)
// Removes a key from the cache.
Remove(key interface{}) bool
// Removes the oldest entry from cache.
RemoveOldest() (interface{}, interface{}, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (interface{}, interface{}, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []interface{}
// Returns the number of items in the cache.
Len() int
// Clear all cache entries
Purge()
}

21
vendor/github.com/json-iterator/go/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 json-iterator
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

138
vendor/github.com/json-iterator/go/feature_adapter.go generated vendored Normal file
View file

@ -0,0 +1,138 @@
package jsoniter
import (
"bytes"
"io"
)
// RawMessage to make replace json with jsoniter
type RawMessage []byte
// Unmarshal adapts to json/encoding Unmarshal API
//
// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
// Refer to https://godoc.org/encoding/json#Unmarshal for more information
func Unmarshal(data []byte, v interface{}) error {
return ConfigDefault.Unmarshal(data, v)
}
func lastNotSpacePos(data []byte) int {
for i := len(data) - 1; i >= 0; i-- {
if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' {
return i + 1
}
}
return 0
}
// UnmarshalFromString convenient method to read from string instead of []byte
func UnmarshalFromString(str string, v interface{}) error {
return ConfigDefault.UnmarshalFromString(str, v)
}
// Get quick method to get value from deeply nested JSON structure
func Get(data []byte, path ...interface{}) Any {
return ConfigDefault.Get(data, path...)
}
// Marshal adapts to json/encoding Marshal API
//
// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
// Refer to https://godoc.org/encoding/json#Marshal for more information
func Marshal(v interface{}) ([]byte, error) {
return ConfigDefault.Marshal(v)
}
// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
return ConfigDefault.MarshalIndent(v, prefix, indent)
}
// MarshalToString convenient method to write as string instead of []byte
func MarshalToString(v interface{}) (string, error) {
return ConfigDefault.MarshalToString(v)
}
// NewDecoder adapts to json/stream NewDecoder API.
//
// NewDecoder returns a new decoder that reads from r.
//
// Instead of a json/encoding Decoder, an Decoder is returned
// Refer to https://godoc.org/encoding/json#NewDecoder for more information
func NewDecoder(reader io.Reader) *Decoder {
return ConfigDefault.NewDecoder(reader)
}
// Decoder reads and decodes JSON values from an input stream.
// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
type Decoder struct {
iter *Iterator
}
// Decode decode JSON into interface{}
func (adapter *Decoder) Decode(obj interface{}) error {
if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
if !adapter.iter.loadMore() {
return io.EOF
}
}
adapter.iter.ReadVal(obj)
err := adapter.iter.Error
if err == io.EOF {
return nil
}
return adapter.iter.Error
}
// More is there more?
func (adapter *Decoder) More() bool {
return adapter.iter.head != adapter.iter.tail
}
// Buffered remaining buffer
func (adapter *Decoder) Buffered() io.Reader {
remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
return bytes.NewReader(remaining)
}
// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string)
func (adapter *Decoder) UseNumber() {
origCfg := adapter.iter.cfg.configBeforeFrozen
origCfg.UseNumber = true
adapter.iter.cfg = origCfg.Froze().(*frozenConfig)
}
// NewEncoder same as json.NewEncoder
func NewEncoder(writer io.Writer) *Encoder {
return ConfigDefault.NewEncoder(writer)
}
// Encoder same as json.Encoder
type Encoder struct {
stream *Stream
}
// Encode encode interface{} as JSON to io.Writer
func (adapter *Encoder) Encode(val interface{}) error {
adapter.stream.WriteVal(val)
adapter.stream.WriteRaw("\n")
adapter.stream.Flush()
return adapter.stream.Error
}
// SetIndent set the indention. Prefix is not supported
func (adapter *Encoder) SetIndent(prefix, indent string) {
adapter.stream.cfg.indentionStep = len(indent)
}
// SetEscapeHTML escape html by default, set to false to disable
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
config := adapter.stream.cfg.configBeforeFrozen
config.EscapeHTML = escapeHTML
adapter.stream.cfg = config.Froze().(*frozenConfig)
}
// Valid reports whether data is a valid JSON encoding.
func Valid(data []byte) bool {
return ConfigDefault.Valid(data)
}

245
vendor/github.com/json-iterator/go/feature_any.go generated vendored Normal file
View file

@ -0,0 +1,245 @@
package jsoniter
import (
"errors"
"fmt"
"io"
"reflect"
)
// Any generic object representation.
// The lazy json implementation holds []byte and parse lazily.
type Any interface {
LastError() error
ValueType() ValueType
MustBeValid() Any
ToBool() bool
ToInt() int
ToInt32() int32
ToInt64() int64
ToUint() uint
ToUint32() uint32
ToUint64() uint64
ToFloat32() float32
ToFloat64() float64
ToString() string
ToVal(val interface{})
Get(path ...interface{}) Any
// TODO: add Set
Size() int
Keys() []string
GetInterface() interface{}
WriteTo(stream *Stream)
}
type baseAny struct{}
func (any *baseAny) Get(path ...interface{}) Any {
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
}
func (any *baseAny) Size() int {
return 0
}
func (any *baseAny) Keys() []string {
return []string{}
}
func (any *baseAny) ToVal(obj interface{}) {
panic("not implemented")
}
// WrapInt32 turn int32 into Any interface
func WrapInt32(val int32) Any {
return &int32Any{baseAny{}, val}
}
// WrapInt64 turn int64 into Any interface
func WrapInt64(val int64) Any {
return &int64Any{baseAny{}, val}
}
// WrapUint32 turn uint32 into Any interface
func WrapUint32(val uint32) Any {
return &uint32Any{baseAny{}, val}
}
// WrapUint64 turn uint64 into Any interface
func WrapUint64(val uint64) Any {
return &uint64Any{baseAny{}, val}
}
// WrapFloat64 turn float64 into Any interface
func WrapFloat64(val float64) Any {
return &floatAny{baseAny{}, val}
}
// WrapString turn string into Any interface
func WrapString(val string) Any {
return &stringAny{baseAny{}, val}
}
// Wrap turn a go object into Any interface
func Wrap(val interface{}) Any {
if val == nil {
return &nilAny{}
}
asAny, isAny := val.(Any)
if isAny {
return asAny
}
typ := reflect.TypeOf(val)
switch typ.Kind() {
case reflect.Slice:
return wrapArray(val)
case reflect.Struct:
return wrapStruct(val)
case reflect.Map:
return wrapMap(val)
case reflect.String:
return WrapString(val.(string))
case reflect.Int:
return WrapInt64(int64(val.(int)))
case reflect.Int8:
return WrapInt32(int32(val.(int8)))
case reflect.Int16:
return WrapInt32(int32(val.(int16)))
case reflect.Int32:
return WrapInt32(val.(int32))
case reflect.Int64:
return WrapInt64(val.(int64))
case reflect.Uint:
return WrapUint64(uint64(val.(uint)))
case reflect.Uint8:
return WrapUint32(uint32(val.(uint8)))
case reflect.Uint16:
return WrapUint32(uint32(val.(uint16)))
case reflect.Uint32:
return WrapUint32(uint32(val.(uint32)))
case reflect.Uint64:
return WrapUint64(val.(uint64))
case reflect.Float32:
return WrapFloat64(float64(val.(float32)))
case reflect.Float64:
return WrapFloat64(val.(float64))
case reflect.Bool:
if val.(bool) == true {
return &trueAny{}
}
return &falseAny{}
}
return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
}
// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
func (iter *Iterator) ReadAny() Any {
return iter.readAny()
}
func (iter *Iterator) readAny() Any {
c := iter.nextToken()
switch c {
case '"':
iter.unreadByte()
return &stringAny{baseAny{}, iter.ReadString()}
case 'n':
iter.skipThreeBytes('u', 'l', 'l') // null
return &nilAny{}
case 't':
iter.skipThreeBytes('r', 'u', 'e') // true
return &trueAny{}
case 'f':
iter.skipFourBytes('a', 'l', 's', 'e') // false
return &falseAny{}
case '{':
return iter.readObjectAny()
case '[':
return iter.readArrayAny()
case '-':
return iter.readNumberAny(false)
case 0:
return &invalidAny{baseAny{}, errors.New("input is empty")}
default:
return iter.readNumberAny(true)
}
}
func (iter *Iterator) readNumberAny(positive bool) Any {
iter.startCapture(iter.head - 1)
iter.skipNumber()
lazyBuf := iter.stopCapture()
return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func (iter *Iterator) readObjectAny() Any {
iter.startCapture(iter.head - 1)
iter.skipObject()
lazyBuf := iter.stopCapture()
return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func (iter *Iterator) readArrayAny() Any {
iter.startCapture(iter.head - 1)
iter.skipArray()
lazyBuf := iter.stopCapture()
return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func locateObjectField(iter *Iterator, target string) []byte {
var found []byte
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
if field == target {
found = iter.SkipAndReturnBytes()
return false
}
iter.Skip()
return true
})
return found
}
func locateArrayElement(iter *Iterator, target int) []byte {
var found []byte
n := 0
iter.ReadArrayCB(func(iter *Iterator) bool {
if n == target {
found = iter.SkipAndReturnBytes()
return false
}
iter.Skip()
n++
return true
})
return found
}
func locatePath(iter *Iterator, path []interface{}) Any {
for i, pathKeyObj := range path {
switch pathKey := pathKeyObj.(type) {
case string:
valueBytes := locateObjectField(iter, pathKey)
if valueBytes == nil {
return newInvalidAny(path[i:])
}
iter.ResetBytes(valueBytes)
case int:
valueBytes := locateArrayElement(iter, pathKey)
if valueBytes == nil {
return newInvalidAny(path[i:])
}
iter.ResetBytes(valueBytes)
case int32:
if '*' == pathKey {
return iter.readAny().Get(path[i:]...)
}
return newInvalidAny(path[i:])
default:
return newInvalidAny(path[i:])
}
}
if iter.Error != nil && iter.Error != io.EOF {
return &invalidAny{baseAny{}, iter.Error}
}
return iter.readAny()
}

278
vendor/github.com/json-iterator/go/feature_any_array.go generated vendored Normal file
View file

@ -0,0 +1,278 @@
package jsoniter
import (
"reflect"
"unsafe"
)
type arrayLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *arrayLazyAny) ValueType() ValueType {
return ArrayValue
}
func (any *arrayLazyAny) MustBeValid() Any {
return any
}
func (any *arrayLazyAny) LastError() error {
return any.err
}
func (any *arrayLazyAny) ToBool() bool {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.ReadArray()
}
func (any *arrayLazyAny) ToInt() int {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToInt32() int32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToInt64() int64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint() uint {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint32() uint32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint64() uint64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToFloat32() float32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToFloat64() float64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *arrayLazyAny) ToVal(val interface{}) {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadVal(val)
}
func (any *arrayLazyAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int:
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
valueBytes := locateArrayElement(iter, firstPath)
if valueBytes == nil {
return newInvalidAny(path)
}
iter.ResetBytes(valueBytes)
return locatePath(iter, path[1:])
case int32:
if '*' == firstPath {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
arr := make([]Any, 0)
iter.ReadArrayCB(func(iter *Iterator) bool {
found := iter.readAny().Get(path[1:]...)
if found.ValueType() != InvalidValue {
arr = append(arr, found)
}
return true
})
return wrapArray(arr)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *arrayLazyAny) Size() int {
size := 0
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadArrayCB(func(iter *Iterator) bool {
size++
iter.Skip()
return true
})
return size
}
func (any *arrayLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *arrayLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}
type arrayAny struct {
baseAny
val reflect.Value
}
func wrapArray(val interface{}) *arrayAny {
return &arrayAny{baseAny{}, reflect.ValueOf(val)}
}
func (any *arrayAny) ValueType() ValueType {
return ArrayValue
}
func (any *arrayAny) MustBeValid() Any {
return any
}
func (any *arrayAny) LastError() error {
return nil
}
func (any *arrayAny) ToBool() bool {
return any.val.Len() != 0
}
func (any *arrayAny) ToInt() int {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToInt32() int32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToInt64() int64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint() uint {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint32() uint32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint64() uint64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToFloat32() float32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToFloat64() float64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToString() string {
str, _ := MarshalToString(any.val.Interface())
return str
}
func (any *arrayAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int:
if firstPath < 0 || firstPath >= any.val.Len() {
return newInvalidAny(path)
}
return Wrap(any.val.Index(firstPath).Interface())
case int32:
if '*' == firstPath {
mappedAll := make([]Any, 0)
for i := 0; i < any.val.Len(); i++ {
mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll = append(mappedAll, mapped)
}
}
return wrapArray(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *arrayAny) Size() int {
return any.val.Len()
}
func (any *arrayAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *arrayAny) GetInterface() interface{} {
return any.val.Interface()
}

137
vendor/github.com/json-iterator/go/feature_any_bool.go generated vendored Normal file
View file

@ -0,0 +1,137 @@
package jsoniter
type trueAny struct {
baseAny
}
func (any *trueAny) LastError() error {
return nil
}
func (any *trueAny) ToBool() bool {
return true
}
func (any *trueAny) ToInt() int {
return 1
}
func (any *trueAny) ToInt32() int32 {
return 1
}
func (any *trueAny) ToInt64() int64 {
return 1
}
func (any *trueAny) ToUint() uint {
return 1
}
func (any *trueAny) ToUint32() uint32 {
return 1
}
func (any *trueAny) ToUint64() uint64 {
return 1
}
func (any *trueAny) ToFloat32() float32 {
return 1
}
func (any *trueAny) ToFloat64() float64 {
return 1
}
func (any *trueAny) ToString() string {
return "true"
}
func (any *trueAny) WriteTo(stream *Stream) {
stream.WriteTrue()
}
func (any *trueAny) Parse() *Iterator {
return nil
}
func (any *trueAny) GetInterface() interface{} {
return true
}
func (any *trueAny) ValueType() ValueType {
return BoolValue
}
func (any *trueAny) MustBeValid() Any {
return any
}
type falseAny struct {
baseAny
}
func (any *falseAny) LastError() error {
return nil
}
func (any *falseAny) ToBool() bool {
return false
}
func (any *falseAny) ToInt() int {
return 0
}
func (any *falseAny) ToInt32() int32 {
return 0
}
func (any *falseAny) ToInt64() int64 {
return 0
}
func (any *falseAny) ToUint() uint {
return 0
}
func (any *falseAny) ToUint32() uint32 {
return 0
}
func (any *falseAny) ToUint64() uint64 {
return 0
}
func (any *falseAny) ToFloat32() float32 {
return 0
}
func (any *falseAny) ToFloat64() float64 {
return 0
}
func (any *falseAny) ToString() string {
return "false"
}
func (any *falseAny) WriteTo(stream *Stream) {
stream.WriteFalse()
}
func (any *falseAny) Parse() *Iterator {
return nil
}
func (any *falseAny) GetInterface() interface{} {
return false
}
func (any *falseAny) ValueType() ValueType {
return BoolValue
}
func (any *falseAny) MustBeValid() Any {
return any
}

View file

@ -0,0 +1,83 @@
package jsoniter
import (
"strconv"
)
type floatAny struct {
baseAny
val float64
}
func (any *floatAny) Parse() *Iterator {
return nil
}
func (any *floatAny) ValueType() ValueType {
return NumberValue
}
func (any *floatAny) MustBeValid() Any {
return any
}
func (any *floatAny) LastError() error {
return nil
}
func (any *floatAny) ToBool() bool {
return any.ToFloat64() != 0
}
func (any *floatAny) ToInt() int {
return int(any.val)
}
func (any *floatAny) ToInt32() int32 {
return int32(any.val)
}
func (any *floatAny) ToInt64() int64 {
return int64(any.val)
}
func (any *floatAny) ToUint() uint {
if any.val > 0 {
return uint(any.val)
}
return 0
}
func (any *floatAny) ToUint32() uint32 {
if any.val > 0 {
return uint32(any.val)
}
return 0
}
func (any *floatAny) ToUint64() uint64 {
if any.val > 0 {
return uint64(any.val)
}
return 0
}
func (any *floatAny) ToFloat32() float32 {
return float32(any.val)
}
func (any *floatAny) ToFloat64() float64 {
return any.val
}
func (any *floatAny) ToString() string {
return strconv.FormatFloat(any.val, 'E', -1, 64)
}
func (any *floatAny) WriteTo(stream *Stream) {
stream.WriteFloat64(any.val)
}
func (any *floatAny) GetInterface() interface{} {
return any.val
}

View file

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type int32Any struct {
baseAny
val int32
}
func (any *int32Any) LastError() error {
return nil
}
func (any *int32Any) ValueType() ValueType {
return NumberValue
}
func (any *int32Any) MustBeValid() Any {
return any
}
func (any *int32Any) ToBool() bool {
return any.val != 0
}
func (any *int32Any) ToInt() int {
return int(any.val)
}
func (any *int32Any) ToInt32() int32 {
return any.val
}
func (any *int32Any) ToInt64() int64 {
return int64(any.val)
}
func (any *int32Any) ToUint() uint {
return uint(any.val)
}
func (any *int32Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *int32Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *int32Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *int32Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *int32Any) ToString() string {
return strconv.FormatInt(int64(any.val), 10)
}
func (any *int32Any) WriteTo(stream *Stream) {
stream.WriteInt32(any.val)
}
func (any *int32Any) Parse() *Iterator {
return nil
}
func (any *int32Any) GetInterface() interface{} {
return any.val
}

View file

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type int64Any struct {
baseAny
val int64
}
func (any *int64Any) LastError() error {
return nil
}
func (any *int64Any) ValueType() ValueType {
return NumberValue
}
func (any *int64Any) MustBeValid() Any {
return any
}
func (any *int64Any) ToBool() bool {
return any.val != 0
}
func (any *int64Any) ToInt() int {
return int(any.val)
}
func (any *int64Any) ToInt32() int32 {
return int32(any.val)
}
func (any *int64Any) ToInt64() int64 {
return any.val
}
func (any *int64Any) ToUint() uint {
return uint(any.val)
}
func (any *int64Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *int64Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *int64Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *int64Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *int64Any) ToString() string {
return strconv.FormatInt(any.val, 10)
}
func (any *int64Any) WriteTo(stream *Stream) {
stream.WriteInt64(any.val)
}
func (any *int64Any) Parse() *Iterator {
return nil
}
func (any *int64Any) GetInterface() interface{} {
return any.val
}

View file

@ -0,0 +1,82 @@
package jsoniter
import "fmt"
type invalidAny struct {
baseAny
err error
}
func newInvalidAny(path []interface{}) *invalidAny {
return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
}
func (any *invalidAny) LastError() error {
return any.err
}
func (any *invalidAny) ValueType() ValueType {
return InvalidValue
}
func (any *invalidAny) MustBeValid() Any {
panic(any.err)
}
func (any *invalidAny) ToBool() bool {
return false
}
func (any *invalidAny) ToInt() int {
return 0
}
func (any *invalidAny) ToInt32() int32 {
return 0
}
func (any *invalidAny) ToInt64() int64 {
return 0
}
func (any *invalidAny) ToUint() uint {
return 0
}
func (any *invalidAny) ToUint32() uint32 {
return 0
}
func (any *invalidAny) ToUint64() uint64 {
return 0
}
func (any *invalidAny) ToFloat32() float32 {
return 0
}
func (any *invalidAny) ToFloat64() float64 {
return 0
}
func (any *invalidAny) ToString() string {
return ""
}
func (any *invalidAny) WriteTo(stream *Stream) {
}
func (any *invalidAny) Get(path ...interface{}) Any {
if any.err == nil {
return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
}
return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
}
func (any *invalidAny) Parse() *Iterator {
return nil
}
func (any *invalidAny) GetInterface() interface{} {
return nil
}

69
vendor/github.com/json-iterator/go/feature_any_nil.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
package jsoniter
type nilAny struct {
baseAny
}
func (any *nilAny) LastError() error {
return nil
}
func (any *nilAny) ValueType() ValueType {
return NilValue
}
func (any *nilAny) MustBeValid() Any {
return any
}
func (any *nilAny) ToBool() bool {
return false
}
func (any *nilAny) ToInt() int {
return 0
}
func (any *nilAny) ToInt32() int32 {
return 0
}
func (any *nilAny) ToInt64() int64 {
return 0
}
func (any *nilAny) ToUint() uint {
return 0
}
func (any *nilAny) ToUint32() uint32 {
return 0
}
func (any *nilAny) ToUint64() uint64 {
return 0
}
func (any *nilAny) ToFloat32() float32 {
return 0
}
func (any *nilAny) ToFloat64() float64 {
return 0
}
func (any *nilAny) ToString() string {
return ""
}
func (any *nilAny) WriteTo(stream *Stream) {
stream.WriteNil()
}
func (any *nilAny) Parse() *Iterator {
return nil
}
func (any *nilAny) GetInterface() interface{} {
return nil
}

View file

@ -0,0 +1,123 @@
package jsoniter
import (
"unsafe"
"io"
)
type numberLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *numberLazyAny) ValueType() ValueType {
return NumberValue
}
func (any *numberLazyAny) MustBeValid() Any {
return any
}
func (any *numberLazyAny) LastError() error {
return any.err
}
func (any *numberLazyAny) ToBool() bool {
return any.ToFloat64() != 0
}
func (any *numberLazyAny) ToInt() int {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToInt32() int32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt32()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToInt64() int64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt64()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToUint() uint {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToUint32() uint32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint32()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToUint64() uint64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint64()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToFloat32() float32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat32()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToFloat64() float64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat64()
if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val
}
func (any *numberLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *numberLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *numberLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}

View file

@ -0,0 +1,374 @@
package jsoniter
import (
"reflect"
"unsafe"
)
type objectLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *objectLazyAny) ValueType() ValueType {
return ObjectValue
}
func (any *objectLazyAny) MustBeValid() Any {
return any
}
func (any *objectLazyAny) LastError() error {
return any.err
}
func (any *objectLazyAny) ToBool() bool {
return true
}
func (any *objectLazyAny) ToInt() int {
return 0
}
func (any *objectLazyAny) ToInt32() int32 {
return 0
}
func (any *objectLazyAny) ToInt64() int64 {
return 0
}
func (any *objectLazyAny) ToUint() uint {
return 0
}
func (any *objectLazyAny) ToUint32() uint32 {
return 0
}
func (any *objectLazyAny) ToUint64() uint64 {
return 0
}
func (any *objectLazyAny) ToFloat32() float32 {
return 0
}
func (any *objectLazyAny) ToFloat64() float64 {
return 0
}
func (any *objectLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *objectLazyAny) ToVal(obj interface{}) {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadVal(obj)
}
func (any *objectLazyAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case string:
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
valueBytes := locateObjectField(iter, firstPath)
if valueBytes == nil {
return newInvalidAny(path)
}
iter.ResetBytes(valueBytes)
return locatePath(iter, path[1:])
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadMapCB(func(iter *Iterator, field string) bool {
mapped := locatePath(iter, path[1:])
if mapped.ValueType() != InvalidValue {
mappedAll[field] = mapped
}
return true
})
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *objectLazyAny) Keys() []string {
keys := []string{}
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadMapCB(func(iter *Iterator, field string) bool {
iter.Skip()
keys = append(keys, field)
return true
})
return keys
}
func (any *objectLazyAny) Size() int {
size := 0
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
iter.Skip()
size++
return true
})
return size
}
func (any *objectLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *objectLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}
type objectAny struct {
baseAny
err error
val reflect.Value
}
func wrapStruct(val interface{}) *objectAny {
return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
}
func (any *objectAny) ValueType() ValueType {
return ObjectValue
}
func (any *objectAny) MustBeValid() Any {
return any
}
func (any *objectAny) Parse() *Iterator {
return nil
}
func (any *objectAny) LastError() error {
return any.err
}
func (any *objectAny) ToBool() bool {
return any.val.NumField() != 0
}
func (any *objectAny) ToInt() int {
return 0
}
func (any *objectAny) ToInt32() int32 {
return 0
}
func (any *objectAny) ToInt64() int64 {
return 0
}
func (any *objectAny) ToUint() uint {
return 0
}
func (any *objectAny) ToUint32() uint32 {
return 0
}
func (any *objectAny) ToUint64() uint64 {
return 0
}
func (any *objectAny) ToFloat32() float32 {
return 0
}
func (any *objectAny) ToFloat64() float64 {
return 0
}
func (any *objectAny) ToString() string {
str, err := MarshalToString(any.val.Interface())
any.err = err
return str
}
func (any *objectAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case string:
field := any.val.FieldByName(firstPath)
if !field.IsValid() {
return newInvalidAny(path)
}
return Wrap(field.Interface())
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
for i := 0; i < any.val.NumField(); i++ {
field := any.val.Field(i)
if field.CanInterface() {
mapped := Wrap(field.Interface()).Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll[any.val.Type().Field(i).Name] = mapped
}
}
}
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *objectAny) Keys() []string {
keys := make([]string, 0, any.val.NumField())
for i := 0; i < any.val.NumField(); i++ {
keys = append(keys, any.val.Type().Field(i).Name)
}
return keys
}
func (any *objectAny) Size() int {
return any.val.NumField()
}
func (any *objectAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *objectAny) GetInterface() interface{} {
return any.val.Interface()
}
type mapAny struct {
baseAny
err error
val reflect.Value
}
func wrapMap(val interface{}) *mapAny {
return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
}
func (any *mapAny) ValueType() ValueType {
return ObjectValue
}
func (any *mapAny) MustBeValid() Any {
return any
}
func (any *mapAny) Parse() *Iterator {
return nil
}
func (any *mapAny) LastError() error {
return any.err
}
func (any *mapAny) ToBool() bool {
return true
}
func (any *mapAny) ToInt() int {
return 0
}
func (any *mapAny) ToInt32() int32 {
return 0
}
func (any *mapAny) ToInt64() int64 {
return 0
}
func (any *mapAny) ToUint() uint {
return 0
}
func (any *mapAny) ToUint32() uint32 {
return 0
}
func (any *mapAny) ToUint64() uint64 {
return 0
}
func (any *mapAny) ToFloat32() float32 {
return 0
}
func (any *mapAny) ToFloat64() float64 {
return 0
}
func (any *mapAny) ToString() string {
str, err := MarshalToString(any.val.Interface())
any.err = err
return str
}
func (any *mapAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
for _, key := range any.val.MapKeys() {
keyAsStr := key.String()
element := Wrap(any.val.MapIndex(key).Interface())
mapped := element.Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll[keyAsStr] = mapped
}
}
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
value := any.val.MapIndex(reflect.ValueOf(firstPath))
if !value.IsValid() {
return newInvalidAny(path)
}
return Wrap(value.Interface())
}
}
func (any *mapAny) Keys() []string {
keys := make([]string, 0, any.val.Len())
for _, key := range any.val.MapKeys() {
keys = append(keys, key.String())
}
return keys
}
func (any *mapAny) Size() int {
return any.val.Len()
}
func (any *mapAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *mapAny) GetInterface() interface{} {
return any.val.Interface()
}

View file

@ -0,0 +1,166 @@
package jsoniter
import (
"fmt"
"strconv"
)
type stringAny struct {
baseAny
val string
}
func (any *stringAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
}
func (any *stringAny) Parse() *Iterator {
return nil
}
func (any *stringAny) ValueType() ValueType {
return StringValue
}
func (any *stringAny) MustBeValid() Any {
return any
}
func (any *stringAny) LastError() error {
return nil
}
func (any *stringAny) ToBool() bool {
str := any.ToString()
if str == "0" {
return false
}
for _, c := range str {
switch c {
case ' ', '\n', '\r', '\t':
default:
return true
}
}
return false
}
func (any *stringAny) ToInt() int {
return int(any.ToInt64())
}
func (any *stringAny) ToInt32() int32 {
return int32(any.ToInt64())
}
func (any *stringAny) ToInt64() int64 {
if any.val == "" {
return 0
}
flag := 1
startPos := 0
endPos := 0
if any.val[0] == '+' || any.val[0] == '-' {
startPos = 1
}
if any.val[0] == '-' {
flag = -1
}
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
break
}
}
parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
return int64(flag) * parsed
}
func (any *stringAny) ToUint() uint {
return uint(any.ToUint64())
}
func (any *stringAny) ToUint32() uint32 {
return uint32(any.ToUint64())
}
func (any *stringAny) ToUint64() uint64 {
if any.val == "" {
return 0
}
startPos := 0
endPos := 0
if any.val[0] == '-' {
return 0
}
if any.val[0] == '+' {
startPos = 1
}
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
break
}
}
parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
return parsed
}
func (any *stringAny) ToFloat32() float32 {
return float32(any.ToFloat64())
}
func (any *stringAny) ToFloat64() float64 {
if len(any.val) == 0 {
return 0
}
// first char invalid
if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
return 0
}
// extract valid num expression from string
// eg 123true => 123, -12.12xxa => -12.12
endPos := 1
for i := 1; i < len(any.val); i++ {
if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
endPos = i + 1
continue
}
// end position is the first char which is not digit
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
endPos = i
break
}
}
parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
return parsed
}
func (any *stringAny) ToString() string {
return any.val
}
func (any *stringAny) WriteTo(stream *Stream) {
stream.WriteString(any.val)
}
func (any *stringAny) GetInterface() interface{} {
return any.val
}

View file

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type uint32Any struct {
baseAny
val uint32
}
func (any *uint32Any) LastError() error {
return nil
}
func (any *uint32Any) ValueType() ValueType {
return NumberValue
}
func (any *uint32Any) MustBeValid() Any {
return any
}
func (any *uint32Any) ToBool() bool {
return any.val != 0
}
func (any *uint32Any) ToInt() int {
return int(any.val)
}
func (any *uint32Any) ToInt32() int32 {
return int32(any.val)
}
func (any *uint32Any) ToInt64() int64 {
return int64(any.val)
}
func (any *uint32Any) ToUint() uint {
return uint(any.val)
}
func (any *uint32Any) ToUint32() uint32 {
return any.val
}
func (any *uint32Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *uint32Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *uint32Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *uint32Any) ToString() string {
return strconv.FormatInt(int64(any.val), 10)
}
func (any *uint32Any) WriteTo(stream *Stream) {
stream.WriteUint32(any.val)
}
func (any *uint32Any) Parse() *Iterator {
return nil
}
func (any *uint32Any) GetInterface() interface{} {
return any.val
}

View file

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type uint64Any struct {
baseAny
val uint64
}
func (any *uint64Any) LastError() error {
return nil
}
func (any *uint64Any) ValueType() ValueType {
return NumberValue
}
func (any *uint64Any) MustBeValid() Any {
return any
}
func (any *uint64Any) ToBool() bool {
return any.val != 0
}
func (any *uint64Any) ToInt() int {
return int(any.val)
}
func (any *uint64Any) ToInt32() int32 {
return int32(any.val)
}
func (any *uint64Any) ToInt64() int64 {
return int64(any.val)
}
func (any *uint64Any) ToUint() uint {
return uint(any.val)
}
func (any *uint64Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *uint64Any) ToUint64() uint64 {
return any.val
}
func (any *uint64Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *uint64Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *uint64Any) ToString() string {
return strconv.FormatUint(any.val, 10)
}
func (any *uint64Any) WriteTo(stream *Stream) {
stream.WriteUint64(any.val)
}
func (any *uint64Any) Parse() *Iterator {
return nil
}
func (any *uint64Any) GetInterface() interface{} {
return any.val
}

Some files were not shown because too many files have changed in this diff Show more