fix concurrent provider config reloads

This commit is contained in:
Marco Jantke 2017-11-17 10:26:03 +01:00 committed by Traefiker
parent 722f299306
commit cdab6b1796
6 changed files with 223 additions and 353 deletions

4
glide.lock generated
View file

@ -1,4 +1,4 @@
hash: 5aef5628a880e04fac9cd9db2a33f1f4716680b0c338f3aa803d9786f253405a
hash: 6e35766dd261f1eb0c2dfaadb8e0bf235eb4f3f942de776f9b48faf8a868a2d0
updated: 2017-11-15T18:39:20.364720581+02:00
imports:
- name: cloud.google.com/go
@ -465,8 +465,6 @@ imports:
version: 10f801ebc38b33738c9d17d50860f484a0988ff5
- name: github.com/spf13/pflag
version: cb88ea77998c3f024757528e3305022ab50b43be
- name: github.com/streamrail/concurrent-map
version: 8bf1e9bacbf65b10c81d0f4314cf2b1ebef728b5
- name: github.com/stretchr/objx
version: cbeaeb16a013161a98496fad62933b1d21786672
- name: github.com/stretchr/testify

View file

@ -48,7 +48,6 @@ import:
- package: github.com/hashicorp/consul
subpackages:
- api
- package: github.com/streamrail/concurrent-map
- package: github.com/thoas/stats
version: 152b5d051953fdb6e45f14b6826962aadc032324
- package: github.com/unrolled/render
@ -231,4 +230,4 @@ testImport:
- package: github.com/mattn/go-shellwords
- package: github.com/vdemeester/shakers
- package: github.com/docker/cli
version: d95fd2f38cfc23e077530c6181330727d561b6a0
version: d95fd2f38cfc23e077530c6181330727d561b6a0

View file

@ -36,7 +36,7 @@ import (
traefikTls "github.com/containous/traefik/tls"
"github.com/containous/traefik/types"
"github.com/containous/traefik/whitelist"
"github.com/streamrail/concurrent-map"
"github.com/eapache/channels"
thoas_stats "github.com/thoas/stats"
"github.com/urfave/negroni"
"github.com/vulcand/oxy/cbreaker"
@ -67,8 +67,6 @@ type Server struct {
leadership *cluster.Leadership
defaultForwardingRoundTripper http.RoundTripper
metricsRegistry metrics.Registry
lastReceivedConfiguration *safe.Safe
lastConfigs cmap.ConcurrentMap
}
type serverEntryPoints map[string]*serverEntryPoint
@ -109,8 +107,6 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration) *Server {
server.routinesPool = safe.NewPool(context.Background())
server.defaultForwardingRoundTripper = createHTTPTransport(globalConfiguration)
server.lastReceivedConfiguration = safe.New(time.Unix(0, 0))
server.lastConfigs = cmap.New()
server.metricsRegistry = metrics.NewVoidRegistry()
if globalConfiguration.Metrics != nil {
@ -345,6 +341,8 @@ func (server *Server) listenProviders(stop chan bool) {
}
func (server *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
providerConfigUpdateMap := map[string]chan types.ConfigMessage{}
providersThrottleDuration := time.Duration(server.globalConfiguration.ProvidersThrottleDuration)
server.defaultConfigurationValues(configMsg.Configuration)
currentConfigurations := server.currentConfigurations.Get().(types.Configurations)
jsonConf, _ := json.Marshal(configMsg.Configuration)
@ -354,28 +352,43 @@ func (server *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
} else if reflect.DeepEqual(currentConfigurations[configMsg.ProviderName], configMsg.Configuration) {
log.Infof("Skipping same configuration for provider %s", configMsg.ProviderName)
} else {
server.lastConfigs.Set(configMsg.ProviderName, &configMsg)
lastReceivedConfigurationValue := server.lastReceivedConfiguration.Get().(time.Time)
providersThrottleDuration := time.Duration(server.globalConfiguration.ProvidersThrottleDuration)
if time.Now().After(lastReceivedConfigurationValue.Add(providersThrottleDuration)) {
log.Debugf("Last %s configuration received more than %s, OK", configMsg.ProviderName, server.globalConfiguration.ProvidersThrottleDuration.String())
// last config received more than n server ago
server.configurationValidatedChan <- configMsg
} else {
log.Debugf("Last %s configuration received less than %s, waiting...", configMsg.ProviderName, server.globalConfiguration.ProvidersThrottleDuration.String())
safe.Go(func() {
<-time.After(providersThrottleDuration)
lastReceivedConfigurationValue := server.lastReceivedConfiguration.Get().(time.Time)
if time.Now().After(lastReceivedConfigurationValue.Add(time.Duration(providersThrottleDuration))) {
log.Debugf("Waited for %s configuration, OK", configMsg.ProviderName)
if lastConfig, ok := server.lastConfigs.Get(configMsg.ProviderName); ok {
server.configurationValidatedChan <- *lastConfig.(*types.ConfigMessage)
}
}
if _, ok := providerConfigUpdateMap[configMsg.ProviderName]; !ok {
providerConfigUpdate := make(chan types.ConfigMessage)
providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdate
server.routinesPool.Go(func(stop chan bool) {
throttleProviderConfigReload(providersThrottleDuration, server.configurationValidatedChan, providerConfigUpdate, stop)
})
}
// Update the last configuration loading time
server.lastReceivedConfiguration.Set(time.Now())
providerConfigUpdateMap[configMsg.ProviderName] <- configMsg
}
}
// throttleProviderConfigReload throttles the configuration reload speed for a single provider.
// It will immediately publish a new configuration and then only publish the next configuration after the throttle duration.
// Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing,
// it will publish the last of the newly received configurations.
func throttleProviderConfigReload(throttle time.Duration, publish chan<- types.ConfigMessage, in <-chan types.ConfigMessage, stop chan bool) {
ring := channels.NewRingChannel(1)
safe.Go(func() {
for {
select {
case <-stop:
return
case nextConfig := <-ring.Out():
publish <- nextConfig.(types.ConfigMessage)
time.Sleep(throttle)
}
}
})
for {
select {
case <-stop:
return
case nextConfig := <-in:
ring.In() <- nextConfig
}
}
}

View file

@ -160,6 +160,189 @@ func TestPrepareServerTimeouts(t *testing.T) {
}
}
func TestListenProvidersSkipsEmptyConfigs(t *testing.T) {
server, stop, invokeStopChan := setupListenProvider(10 * time.Millisecond)
defer invokeStopChan()
go func() {
for {
select {
case <-stop:
return
case <-server.configurationValidatedChan:
t.Error("An empty configuration was published but it should not")
}
}
}()
server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes"}
// give some time so that the configuration can be processed
time.Sleep(100 * time.Millisecond)
}
func TestListenProvidersSkipsSameConfigurationForProvider(t *testing.T) {
server, stop, invokeStopChan := setupListenProvider(10 * time.Millisecond)
defer invokeStopChan()
publishedConfigCount := 0
go func() {
for {
select {
case <-stop:
return
case config := <-server.configurationValidatedChan:
// set the current configuration
// this is usually done in the processing part of the published configuration
// so we have to emulate the behaviour here
currentConfigurations := server.currentConfigurations.Get().(types.Configurations)
currentConfigurations[config.ProviderName] = config.Configuration
server.currentConfigurations.Set(currentConfigurations)
publishedConfigCount++
if publishedConfigCount > 1 {
t.Error("Same configuration should not be published multiple times")
}
}
}
}()
config := buildDynamicConfig(
withFrontend("frontend", buildFrontend()),
withBackend("backend", buildBackend()),
)
// provide a configuration
server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config}
// give some time so that the configuration can be processed
time.Sleep(20 * time.Millisecond)
// provide the same configuration a second time
server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config}
// give some time so that the configuration can be processed
time.Sleep(100 * time.Millisecond)
}
func TestListenProvidersPublishesConfigForEachProvider(t *testing.T) {
server, stop, invokeStopChan := setupListenProvider(10 * time.Millisecond)
defer invokeStopChan()
publishedProviderConfigCount := map[string]int{}
publishedConfigCount := 0
consumePublishedConfigsDone := make(chan bool)
go func() {
for {
select {
case <-stop:
return
case newConfig := <-server.configurationValidatedChan:
publishedProviderConfigCount[newConfig.ProviderName]++
publishedConfigCount++
if publishedConfigCount == 2 {
consumePublishedConfigsDone <- true
return
}
}
}
}()
config := buildDynamicConfig(
withFrontend("frontend", buildFrontend()),
withBackend("backend", buildBackend()),
)
server.configurationChan <- types.ConfigMessage{ProviderName: "kubernetes", Configuration: config}
server.configurationChan <- types.ConfigMessage{ProviderName: "marathon", Configuration: config}
select {
case <-consumePublishedConfigsDone:
if val := publishedProviderConfigCount["kubernetes"]; val != 1 {
t.Errorf("Got %d configuration publication(s) for provider %q, want 1", val, "kubernetes")
}
if val := publishedProviderConfigCount["marathon"]; val != 1 {
t.Errorf("Got %d configuration publication(s) for provider %q, want 1", val, "marathon")
}
case <-time.After(100 * time.Millisecond):
t.Errorf("Published configurations were not consumed in time")
}
}
// setupListenProvider configures the Server and starts listenProviders
func setupListenProvider(throttleDuration time.Duration) (server *Server, stop chan bool, invokeStopChan func()) {
stop = make(chan bool)
invokeStopChan = func() {
stop <- true
}
globalConfig := configuration.GlobalConfiguration{
EntryPoints: configuration.EntryPoints{
"http": &configuration.EntryPoint{},
},
ProvidersThrottleDuration: flaeg.Duration(throttleDuration),
}
server = NewServer(globalConfig)
go server.listenProviders(stop)
return server, stop, invokeStopChan
}
func TestThrottleProviderConfigReload(t *testing.T) {
throttleDuration := 30 * time.Millisecond
publishConfig := make(chan types.ConfigMessage)
providerConfig := make(chan types.ConfigMessage)
stop := make(chan bool)
defer func() {
stop <- true
}()
go throttleProviderConfigReload(throttleDuration, publishConfig, providerConfig, stop)
publishedConfigCount := 0
stopConsumeConfigs := make(chan bool)
go func() {
for {
select {
case <-stop:
return
case <-stopConsumeConfigs:
return
case <-publishConfig:
publishedConfigCount++
}
}
}()
// publish 5 new configs, one new config each 10 milliseconds
for i := 0; i < 5; i++ {
providerConfig <- types.ConfigMessage{}
time.Sleep(10 * time.Millisecond)
}
// after 50 milliseconds 5 new configs were published
// with a throttle duration of 30 milliseconds this means, we should have received 2 new configs
wantPublishedConfigCount := 2
if publishedConfigCount != wantPublishedConfigCount {
t.Errorf("%d times configs were published, want %d times", publishedConfigCount, wantPublishedConfigCount)
}
stopConsumeConfigs <- true
select {
case <-publishConfig:
// There should be exactly one more message that we receive after ~60 milliseconds since the start of the test.
select {
case <-publishConfig:
t.Error("extra config publication found")
case <-time.After(100 * time.Millisecond):
return
}
case <-time.After(100 * time.Millisecond):
t.Error("Last config was not published in time")
}
}
func TestServerMultipleFrontendRules(t *testing.T) {
cases := []struct {
expression string

View file

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 streamrail
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,301 +0,0 @@
package cmap
import (
"encoding/json"
"sync"
)
var SHARD_COUNT = 32
// A "thread" safe map of type string:Anything.
// To avoid lock bottlenecks this map is dived to several (SHARD_COUNT) map shards.
type ConcurrentMap []*ConcurrentMapShared
// A "thread" safe string to anything map.
type ConcurrentMapShared struct {
items map[string]interface{}
sync.RWMutex // Read Write mutex, guards access to internal map.
}
// Creates a new concurrent map.
func New() ConcurrentMap {
m := make(ConcurrentMap, SHARD_COUNT)
for i := 0; i < SHARD_COUNT; i++ {
m[i] = &ConcurrentMapShared{items: make(map[string]interface{})}
}
return m
}
// Returns shard under given key
func (m ConcurrentMap) GetShard(key string) *ConcurrentMapShared {
return m[uint(fnv32(key))%uint(SHARD_COUNT)]
}
func (m ConcurrentMap) MSet(data map[string]interface{}) {
for key, value := range data {
shard := m.GetShard(key)
shard.Lock()
shard.items[key] = value
shard.Unlock()
}
}
// Sets the given value under the specified key.
func (m *ConcurrentMap) Set(key string, value interface{}) {
// Get map shard.
shard := m.GetShard(key)
shard.Lock()
shard.items[key] = value
shard.Unlock()
}
// Callback to return new element to be inserted into the map
// It is called while lock is held, therefore it MUST NOT
// try to access other keys in same map, as it can lead to deadlock since
// Go sync.RWLock is not reentrant
type UpsertCb func(exist bool, valueInMap interface{}, newValue interface{}) interface{}
// Insert or Update - updates existing element or inserts a new one using UpsertCb
func (m *ConcurrentMap) Upsert(key string, value interface{}, cb UpsertCb) (res interface{}) {
shard := m.GetShard(key)
shard.Lock()
v, ok := shard.items[key]
res = cb(ok, v, value)
shard.items[key] = res
shard.Unlock()
return res
}
// Sets the given value under the specified key if no value was associated with it.
func (m *ConcurrentMap) SetIfAbsent(key string, value interface{}) bool {
// Get map shard.
shard := m.GetShard(key)
shard.Lock()
_, ok := shard.items[key]
if !ok {
shard.items[key] = value
}
shard.Unlock()
return !ok
}
// Retrieves an element from map under given key.
func (m ConcurrentMap) Get(key string) (interface{}, bool) {
// Get shard
shard := m.GetShard(key)
shard.RLock()
// Get item from shard.
val, ok := shard.items[key]
shard.RUnlock()
return val, ok
}
// Returns the number of elements within the map.
func (m ConcurrentMap) Count() int {
count := 0
for i := 0; i < SHARD_COUNT; i++ {
shard := m[i]
shard.RLock()
count += len(shard.items)
shard.RUnlock()
}
return count
}
// Looks up an item under specified key
func (m *ConcurrentMap) Has(key string) bool {
// Get shard
shard := m.GetShard(key)
shard.RLock()
// See if element is within shard.
_, ok := shard.items[key]
shard.RUnlock()
return ok
}
// Removes an element from the map.
func (m *ConcurrentMap) Remove(key string) {
// Try to get shard.
shard := m.GetShard(key)
shard.Lock()
delete(shard.items, key)
shard.Unlock()
}
// Removes an element from the map and returns it
func (m *ConcurrentMap) Pop(key string) (v interface{}, exists bool) {
// Try to get shard.
shard := m.GetShard(key)
shard.Lock()
v, exists = shard.items[key]
delete(shard.items, key)
shard.Unlock()
return v, exists
}
// Checks if map is empty.
func (m *ConcurrentMap) IsEmpty() bool {
return m.Count() == 0
}
// Used by the Iter & IterBuffered functions to wrap two variables together over a channel,
type Tuple struct {
Key string
Val interface{}
}
// Returns an iterator which could be used in a for range loop.
//
// Deprecated: using IterBuffered() will get a better performence
func (m ConcurrentMap) Iter() <-chan Tuple {
ch := make(chan Tuple)
go func() {
wg := sync.WaitGroup{}
wg.Add(SHARD_COUNT)
// Foreach shard.
for _, shard := range m {
go func(shard *ConcurrentMapShared) {
// Foreach key, value pair.
shard.RLock()
for key, val := range shard.items {
ch <- Tuple{key, val}
}
shard.RUnlock()
wg.Done()
}(shard)
}
wg.Wait()
close(ch)
}()
return ch
}
// Returns a buffered iterator which could be used in a for range loop.
func (m ConcurrentMap) IterBuffered() <-chan Tuple {
ch := make(chan Tuple, m.Count())
go func() {
wg := sync.WaitGroup{}
wg.Add(SHARD_COUNT)
// Foreach shard.
for _, shard := range m {
go func(shard *ConcurrentMapShared) {
// Foreach key, value pair.
shard.RLock()
for key, val := range shard.items {
ch <- Tuple{key, val}
}
shard.RUnlock()
wg.Done()
}(shard)
}
wg.Wait()
close(ch)
}()
return ch
}
// Returns all items as map[string]interface{}
func (m ConcurrentMap) Items() map[string]interface{} {
tmp := make(map[string]interface{})
// Insert items to temporary map.
for item := range m.IterBuffered() {
tmp[item.Key] = item.Val
}
return tmp
}
// Iterator callback,called for every key,value found in
// maps. RLock is held for all calls for a given shard
// therefore callback sess consistent view of a shard,
// but not across the shards
type IterCb func(key string, v interface{})
// Callback based iterator, cheapest way to read
// all elements in a map.
func (m *ConcurrentMap) IterCb(fn IterCb) {
for idx := range *m {
shard := (*m)[idx]
shard.RLock()
for key, value := range shard.items {
fn(key, value)
}
shard.RUnlock()
}
}
// Return all keys as []string
func (m ConcurrentMap) Keys() []string {
count := m.Count()
ch := make(chan string, count)
go func() {
// Foreach shard.
wg := sync.WaitGroup{}
wg.Add(SHARD_COUNT)
for _, shard := range m {
go func(shard *ConcurrentMapShared) {
// Foreach key, value pair.
shard.RLock()
for key := range shard.items {
ch <- key
}
shard.RUnlock()
wg.Done()
}(shard)
}
wg.Wait()
close(ch)
}()
// Generate keys
keys := make([]string, count)
for i := 0; i < count; i++ {
keys[i] = <-ch
}
return keys
}
//Reviles ConcurrentMap "private" variables to json marshal.
func (m ConcurrentMap) MarshalJSON() ([]byte, error) {
// Create a temporary map, which will hold all item spread across shards.
tmp := make(map[string]interface{})
// Insert items to temporary map.
for item := range m.IterBuffered() {
tmp[item.Key] = item.Val
}
return json.Marshal(tmp)
}
func fnv32(key string) uint32 {
hash := uint32(2166136261)
const prime32 = uint32(16777619)
for i := 0; i < len(key); i++ {
hash *= prime32
hash ^= uint32(key[i])
}
return hash
}
// Concurrent map uses Interface{} as its value, therefor JSON Unmarshal
// will probably won't know which to type to unmarshal into, in such case
// we'll end up with a value of type map[string]interface{}, In most cases this isn't
// out value type, this is why we've decided to remove this functionality.
// func (m *ConcurrentMap) UnmarshalJSON(b []byte) (err error) {
// // Reverse process of Marshal.
// tmp := make(map[string]interface{})
// // Unmarshal into a single map.
// if err := json.Unmarshal(b, &tmp); err != nil {
// return nil
// }
// // foreach key,value pair in temporary map insert into our concurrent map.
// for key, val := range tmp {
// m.Set(key, val)
// }
// return nil
// }