Fix integration test
Signed-off-by: Emile Vauge <emile@vauge.com>
This commit is contained in:
parent
720912e880
commit
d82e1342fb
6 changed files with 194 additions and 79 deletions
|
@ -8,6 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/go-check/check"
|
"github.com/go-check/check"
|
||||||
|
|
||||||
|
"bytes"
|
||||||
checker "github.com/vdemeester/shakers"
|
checker "github.com/vdemeester/shakers"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,24 +17,44 @@ type SimpleSuite struct{ BaseSuite }
|
||||||
|
|
||||||
func (s *SimpleSuite) TestNoOrInexistentConfigShouldFail(c *check.C) {
|
func (s *SimpleSuite) TestNoOrInexistentConfigShouldFail(c *check.C) {
|
||||||
cmd := exec.Command(traefikBinary)
|
cmd := exec.Command(traefikBinary)
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
|
|
||||||
c.Assert(err, checker.NotNil)
|
var b bytes.Buffer
|
||||||
|
cmd.Stdout = &b
|
||||||
|
cmd.Stderr = &b
|
||||||
|
|
||||||
|
cmd.Start()
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
output := b.Bytes()
|
||||||
|
|
||||||
c.Assert(string(output), checker.Contains, "No configuration file found")
|
c.Assert(string(output), checker.Contains, "No configuration file found")
|
||||||
|
cmd.Process.Kill()
|
||||||
|
|
||||||
nonExistentFile := "non/existent/file.toml"
|
nonExistentFile := "non/existent/file.toml"
|
||||||
cmd = exec.Command(traefikBinary, "--configFile="+nonExistentFile)
|
cmd = exec.Command(traefikBinary, "--configFile="+nonExistentFile)
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
|
|
||||||
c.Assert(err, checker.NotNil)
|
cmd.Stdout = &b
|
||||||
|
cmd.Stderr = &b
|
||||||
|
|
||||||
|
cmd.Start()
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
output = b.Bytes()
|
||||||
|
|
||||||
c.Assert(string(output), checker.Contains, fmt.Sprintf("Error reading configuration file: open %s: no such file or directory", nonExistentFile))
|
c.Assert(string(output), checker.Contains, fmt.Sprintf("Error reading configuration file: open %s: no such file or directory", nonExistentFile))
|
||||||
|
cmd.Process.Kill()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SimpleSuite) TestInvalidConfigShouldFail(c *check.C) {
|
func (s *SimpleSuite) TestInvalidConfigShouldFail(c *check.C) {
|
||||||
cmd := exec.Command(traefikBinary, "--configFile=fixtures/invalid_configuration.toml")
|
cmd := exec.Command(traefikBinary, "--configFile=fixtures/invalid_configuration.toml")
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
|
|
||||||
c.Assert(err, checker.NotNil)
|
var b bytes.Buffer
|
||||||
|
cmd.Stdout = &b
|
||||||
|
cmd.Stderr = &b
|
||||||
|
|
||||||
|
cmd.Start()
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
output := b.Bytes()
|
||||||
|
|
||||||
c.Assert(string(output), checker.Contains, "While parsing config: Near line 0 (last key parsed ''): Bare keys cannot contain '{'")
|
c.Assert(string(output), checker.Contains, "While parsing config: Near line 0 (last key parsed ''): Bare keys cannot contain '{'")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,9 +23,7 @@ type Kubernetes struct {
|
||||||
Endpoint string
|
Endpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the provider to provide configurations to traefik
|
func (provider *Kubernetes) createClient() (*k8s.Client, error) {
|
||||||
// using the given configuration channel.
|
|
||||||
func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
|
||||||
var token string
|
var token string
|
||||||
tokenBytes, err := ioutil.ReadFile(serviceAccountToken)
|
tokenBytes, err := ioutil.ReadFile(serviceAccountToken)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -46,7 +44,13 @@ func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage
|
||||||
provider.Endpoint = "https://" + kubernetesHost + ":" + kubernetesPort
|
provider.Endpoint = "https://" + kubernetesHost + ":" + kubernetesPort
|
||||||
}
|
}
|
||||||
log.Debugf("Kubernetes endpoint: %s", provider.Endpoint)
|
log.Debugf("Kubernetes endpoint: %s", provider.Endpoint)
|
||||||
k8sClient, err := k8s.NewClient(provider.Endpoint, caCert, token)
|
return k8s.NewClient(provider.Endpoint, caCert, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide allows the provider to provide configurations to traefik
|
||||||
|
// using the given configuration channel.
|
||||||
|
func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error {
|
||||||
|
k8sClient, err := provider.createClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -67,10 +71,6 @@ func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
templateObjects := types.Configuration{
|
|
||||||
map[string]*types.Backend{},
|
|
||||||
map[string]*types.Frontend{},
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
stopWatch <- true
|
stopWatch <- true
|
||||||
|
@ -79,64 +79,13 @@ func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage
|
||||||
return err
|
return err
|
||||||
case event := <-ingressesChan:
|
case event := <-ingressesChan:
|
||||||
log.Debugf("Received event from kubenetes %+v", event)
|
log.Debugf("Received event from kubenetes %+v", event)
|
||||||
ingresses, err := k8sClient.GetIngresses(func(ingress k8s.Ingress) bool {
|
templateObjects, err := provider.loadIngresses(k8sClient)
|
||||||
return true
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error retrieving ingresses: %+v", err)
|
return err
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
for _, i := range ingresses {
|
|
||||||
for _, r := range i.Spec.Rules {
|
|
||||||
for _, pa := range r.HTTP.Paths {
|
|
||||||
if _, exists := templateObjects.Backends[r.Host+pa.Path]; !exists {
|
|
||||||
templateObjects.Backends[r.Host+pa.Path] = &types.Backend{
|
|
||||||
Servers: make(map[string]types.Server),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
|
|
||||||
templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
|
|
||||||
Backend: r.Host + pa.Path,
|
|
||||||
Routes: make(map[string]types.Route),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, exists := templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host]; !exists {
|
|
||||||
templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host] = types.Route{
|
|
||||||
Rule: "Host:" + r.Host,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(pa.Path) > 0 {
|
|
||||||
templateObjects.Frontends[r.Host+pa.Path].Routes[pa.Path] = types.Route{
|
|
||||||
Rule: "Path:" + pa.Path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
services, err := k8sClient.GetServices(func(service k8s.Service) bool {
|
|
||||||
return service.Name == pa.Backend.ServiceName
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("Error retrieving services: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, service := range services {
|
|
||||||
var protocol string
|
|
||||||
for _, port := range service.Spec.Ports {
|
|
||||||
if port.Port == pa.Backend.ServicePort.IntValue() {
|
|
||||||
protocol = port.Name
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
|
|
||||||
URL: protocol + "://" + service.Spec.ClusterIP + ":" + pa.Backend.ServicePort.String(),
|
|
||||||
Weight: 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
configurationChan <- types.ConfigMessage{
|
configurationChan <- types.ConfigMessage{
|
||||||
ProviderName: "kubernetes",
|
ProviderName: "kubernetes",
|
||||||
Configuration: provider.loadConfig(templateObjects),
|
Configuration: provider.loadConfig(*templateObjects),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,6 +103,68 @@ func (provider *Kubernetes) Provide(configurationChan chan<- types.ConfigMessage
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (provider *Kubernetes) loadIngresses(k8sClient *k8s.Client) (*types.Configuration, error) {
|
||||||
|
ingresses, err := k8sClient.GetIngresses(func(ingress k8s.Ingress) bool {
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error retrieving ingresses: %+v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
templateObjects := types.Configuration{
|
||||||
|
map[string]*types.Backend{},
|
||||||
|
map[string]*types.Frontend{},
|
||||||
|
}
|
||||||
|
for _, i := range ingresses {
|
||||||
|
for _, r := range i.Spec.Rules {
|
||||||
|
for _, pa := range r.HTTP.Paths {
|
||||||
|
if _, exists := templateObjects.Backends[r.Host+pa.Path]; !exists {
|
||||||
|
templateObjects.Backends[r.Host+pa.Path] = &types.Backend{
|
||||||
|
Servers: make(map[string]types.Server),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
|
||||||
|
templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
|
||||||
|
Backend: r.Host + pa.Path,
|
||||||
|
Routes: make(map[string]types.Route),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, exists := templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host]; !exists {
|
||||||
|
templateObjects.Frontends[r.Host+pa.Path].Routes[r.Host] = types.Route{
|
||||||
|
Rule: "Host:" + r.Host,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(pa.Path) > 0 {
|
||||||
|
templateObjects.Frontends[r.Host+pa.Path].Routes[pa.Path] = types.Route{
|
||||||
|
Rule: pa.Path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
services, err := k8sClient.GetServices(func(service k8s.Service) bool {
|
||||||
|
return service.Name == pa.Backend.ServiceName
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error retrieving services: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, service := range services {
|
||||||
|
var protocol string
|
||||||
|
for _, port := range service.Spec.Ports {
|
||||||
|
if port.Port == pa.Backend.ServicePort.IntValue() {
|
||||||
|
protocol = port.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
|
||||||
|
URL: protocol + "://" + service.Spec.ClusterIP + ":" + pa.Backend.ServicePort.String(),
|
||||||
|
Weight: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &templateObjects, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (provider *Kubernetes) loadConfig(templateObjects types.Configuration) *types.Configuration {
|
func (provider *Kubernetes) loadConfig(templateObjects types.Configuration) *types.Configuration {
|
||||||
var FuncMap = template.FuncMap{}
|
var FuncMap = template.FuncMap{}
|
||||||
configuration, err := provider.getConfiguration("templates/kubernetes.tmpl", FuncMap, templateObjects)
|
configuration, err := provider.getConfiguration("templates/kubernetes.tmpl", FuncMap, templateObjects)
|
||||||
|
|
1
provider/kubernetes_test.go
Normal file
1
provider/kubernetes_test.go
Normal file
|
@ -0,0 +1 @@
|
||||||
|
package provider
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containous/traefik/safe"
|
|
||||||
"github.com/docker/libkv/store"
|
"github.com/docker/libkv/store"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -81,7 +80,7 @@ func TestKvList(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
keys: []string{"foo", "/baz/"},
|
keys: []string{"foo", "/baz/"},
|
||||||
expected: []string{"foo/baz/biz", "foo/baz/1", "foo/baz/2"},
|
expected: []string{"foo/baz/1", "foo/baz/2"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,9 +256,9 @@ func TestKvWatchTree(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
configChan := make(chan types.ConfigMessage)
|
configChan := make(chan types.ConfigMessage)
|
||||||
safe.Go(func() {
|
go func() {
|
||||||
provider.watchKv(configChan, "prefix", make(chan bool, 1))
|
provider.watchKv(configChan, "prefix", make(chan bool, 1))
|
||||||
})
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case c1 := <-returnedChans:
|
case c1 := <-returnedChans:
|
||||||
|
@ -339,7 +338,7 @@ func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
|
||||||
}
|
}
|
||||||
kv := []*store.KVPair{}
|
kv := []*store.KVPair{}
|
||||||
for _, kvPair := range s.KVPairs {
|
for _, kvPair := range s.KVPairs {
|
||||||
if strings.HasPrefix(kvPair.Key, prefix) {
|
if strings.HasPrefix(kvPair.Key, prefix) && !strings.ContainsAny(strings.TrimPrefix(kvPair.Key, prefix), "/") {
|
||||||
kv = append(kv, kvPair)
|
kv = append(kv, kvPair)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -365,3 +364,86 @@ func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
|
||||||
func (s *Mock) Close() {
|
func (s *Mock) Close() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestKVLoadConfig(t *testing.T) {
|
||||||
|
provider := &Kv{
|
||||||
|
Prefix: "traefik",
|
||||||
|
kvclient: &Mock{
|
||||||
|
KVPairs: []*store.KVPair{
|
||||||
|
{
|
||||||
|
Key: "traefik/frontends/frontend.with.dot",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/frontends/frontend.with.dot/backend",
|
||||||
|
Value: []byte("backend.with.dot.too"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/frontends/frontend.with.dot/routes",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot/rule",
|
||||||
|
Value: []byte("Host:test.localhost"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/backends/backend.with.dot.too",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/backends/backend.with.dot.too/servers",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot",
|
||||||
|
Value: []byte(""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/url",
|
||||||
|
Value: []byte("http://172.17.0.2:80"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/weight",
|
||||||
|
Value: []byte("1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
actual := provider.loadConfig()
|
||||||
|
expected := &types.Configuration{
|
||||||
|
Backends: map[string]*types.Backend{
|
||||||
|
"backend.with.dot.too": {
|
||||||
|
Servers: map[string]types.Server{
|
||||||
|
"server.with.dot": {
|
||||||
|
URL: "http://172.17.0.2:80",
|
||||||
|
Weight: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CircuitBreaker: nil,
|
||||||
|
LoadBalancer: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Frontends: map[string]*types.Frontend{
|
||||||
|
"frontend.with.dot": {
|
||||||
|
Backend: "backend.with.dot.too",
|
||||||
|
PassHostHeader: false,
|
||||||
|
EntryPoints: []string{},
|
||||||
|
Routes: map[string]types.Route{
|
||||||
|
"route.with.dot": {
|
||||||
|
Rule: "Host:test.localhost",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual.Backends, expected.Backends) {
|
||||||
|
t.Fatalf("expected %+v, got %+v", expected.Backends, actual.Backends)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(actual.Frontends, expected.Frontends) {
|
||||||
|
t.Fatalf("expected %+v, got %+v", expected.Frontends, actual.Frontends)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -11,6 +11,6 @@
|
||||||
backend = "{{$frontend.Backend}}"
|
backend = "{{$frontend.Backend}}"
|
||||||
{{range $routeName, $route := $frontend.Routes}}
|
{{range $routeName, $route := $frontend.Routes}}
|
||||||
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
|
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
|
||||||
rule = "{{$route.Rule}}"
|
rule = "PathStrip:{{$route.Rule}}"
|
||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
{{$frontends := List .Prefix "/frontends/" }}
|
{{$frontends := List .Prefix "/frontends/" }}
|
||||||
{{$backends := List .Prefix "/backends/"}}
|
{{$backends := List .Prefix "/backends/"}}
|
||||||
|
|
||||||
{{range $backends}}
|
[backends]{{range $backends}}
|
||||||
{{$backend := .}}
|
{{$backend := .}}
|
||||||
{{$servers := List $backend "/servers/" }}
|
{{$servers := List $backend "/servers/" }}
|
||||||
|
|
||||||
{{$circuitBreaker := Get "" . "/circuitbreaker/" "expression"}}
|
{{$circuitBreaker := Get "" . "/circuitbreaker/" "expression"}}
|
||||||
{{with $circuitBreaker}}
|
{{with $circuitBreaker}}
|
||||||
[backends.{{Last $backend}}.circuitBreaker]
|
[backends."{{Last $backend}}".circuitBreaker]
|
||||||
expression = "{{$circuitBreaker}}"
|
expression = "{{$circuitBreaker}}"
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
{{$loadBalancer := Get "" . "/loadbalancer/" "method"}}
|
{{$loadBalancer := Get "" . "/loadbalancer/" "method"}}
|
||||||
{{with $loadBalancer}}
|
{{with $loadBalancer}}
|
||||||
[backends.{{Last $backend}}.loadBalancer]
|
[backends."{{Last $backend}}".loadBalancer]
|
||||||
method = "{{$loadBalancer}}"
|
method = "{{$loadBalancer}}"
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
|
@ -21,14 +21,14 @@
|
||||||
{{$maxConnExtractorFunc := Get "" . "/maxconn/" "extractorfunc"}}
|
{{$maxConnExtractorFunc := Get "" . "/maxconn/" "extractorfunc"}}
|
||||||
{{with $maxConnAmt}}
|
{{with $maxConnAmt}}
|
||||||
{{with $maxConnExtractorFunc}}
|
{{with $maxConnExtractorFunc}}
|
||||||
[backends.{{Last $backend}}.maxConn]
|
[backends."{{Last $backend}}".maxConn]
|
||||||
amount = {{$maxConnAmt}}
|
amount = {{$maxConnAmt}}
|
||||||
extractorFunc = "{{$maxConnExtractorFunc}}"
|
extractorFunc = "{{$maxConnExtractorFunc}}"
|
||||||
{{end}}
|
{{end}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
||||||
{{range $servers}}
|
{{range $servers}}
|
||||||
[backends.{{Last $backend}}.servers.{{Last .}}]
|
[backends."{{Last $backend}}".servers."{{Last .}}"]
|
||||||
url = "{{Get "" . "/url"}}"
|
url = "{{Get "" . "/url"}}"
|
||||||
weight = {{Get "" . "/weight"}}
|
weight = {{Get "" . "/weight"}}
|
||||||
{{end}}
|
{{end}}
|
||||||
|
|
Loading…
Reference in a new issue