Add leadership election
Signed-off-by: Emile Vauge <emile@vauge.com>
This commit is contained in:
parent
5a0440d6f8
commit
bea5ad3f13
7 changed files with 138 additions and 29 deletions
|
@ -94,8 +94,8 @@ func (d *Datastore) watchChanges() error {
|
||||||
|
|
||||||
// Begin creates a transaction with the KV store.
|
// Begin creates a transaction with the KV store.
|
||||||
func (d *Datastore) Begin() (*Transaction, error) {
|
func (d *Datastore) Begin() (*Transaction, error) {
|
||||||
value := uuid.NewV4().String()
|
id := uuid.NewV4().String()
|
||||||
remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(value)})
|
remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(id)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -119,8 +119,8 @@ func (d *Datastore) Begin() (*Transaction, error) {
|
||||||
// we got the lock! Now make sure we are synced with KV store
|
// we got the lock! Now make sure we are synced with KV store
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
meta := d.get()
|
meta := d.get()
|
||||||
if meta.Lock != value {
|
if meta.Lock != id {
|
||||||
return fmt.Errorf("Object lock value: expected %s, got %s", value, meta.Lock)
|
return fmt.Errorf("Object lock value: expected %s, got %s", id, meta.Lock)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,22 +6,34 @@ import (
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
"github.com/containous/traefik/types"
|
"github.com/containous/traefik/types"
|
||||||
"github.com/docker/leadership"
|
"github.com/docker/leadership"
|
||||||
|
"golang.org/x/net/context"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Leadership allows leadership election using a KV store
|
// Leadership allows leadership election using a KV store
|
||||||
type Leadership struct {
|
type Leadership struct {
|
||||||
types.Cluster
|
*safe.Pool
|
||||||
|
*types.Cluster
|
||||||
candidate *leadership.Candidate
|
candidate *leadership.Candidate
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewLeadership creates a leadership
|
||||||
|
func NewLeadership(ctx context.Context, cluster *types.Cluster) *Leadership {
|
||||||
|
return &Leadership{
|
||||||
|
Pool: safe.NewPool(ctx),
|
||||||
|
Cluster: cluster,
|
||||||
|
candidate: leadership.NewCandidate(cluster.Store, cluster.Store.Prefix+"/leader", cluster.Node, 20*time.Second),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Participate tries to be a leader
|
// Participate tries to be a leader
|
||||||
func (l *Leadership) Participate(pool *safe.Pool, isElected func(bool)) {
|
func (l *Leadership) Participate(pool *safe.Pool) {
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctx context.Context) {
|
||||||
l.candidate = leadership.NewCandidate(l.Store, l.Store.Prefix+"/leader", l.Node, 30*time.Second)
|
log.Debugf("Node %s running for election", l.Cluster.Node)
|
||||||
|
defer log.Debugf("Node %s no more running for election", l.Cluster.Node)
|
||||||
backOff := backoff.NewExponentialBackOff()
|
backOff := backoff.NewExponentialBackOff()
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
return l.run(l.candidate, stop, isElected)
|
return l.run(l.candidate, ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
notify := func(err error, time time.Duration) {
|
notify := func(err error, time time.Duration) {
|
||||||
|
@ -36,21 +48,31 @@ func (l *Leadership) Participate(pool *safe.Pool, isElected func(bool)) {
|
||||||
|
|
||||||
// Resign resigns from being a leader
|
// Resign resigns from being a leader
|
||||||
func (l *Leadership) Resign() {
|
func (l *Leadership) Resign() {
|
||||||
if l.candidate != nil {
|
|
||||||
l.candidate.Resign()
|
l.candidate.Resign()
|
||||||
}
|
log.Infof("Node %s resined", l.Cluster.Node)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Leadership) run(candidate *leadership.Candidate, stop chan bool, isElected func(bool)) error {
|
func (l *Leadership) run(candidate *leadership.Candidate, ctx context.Context) error {
|
||||||
electedCh, errCh := candidate.RunForElection()
|
electedCh, errCh := candidate.RunForElection()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case elected := <-electedCh:
|
case elected := <-electedCh:
|
||||||
isElected(elected)
|
l.onElection(elected)
|
||||||
case err := <-errCh:
|
case err := <-errCh:
|
||||||
return err
|
return err
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
|
l.candidate.Resign()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *Leadership) onElection(elected bool) {
|
||||||
|
if elected {
|
||||||
|
log.Infof("Node %s elected leader ♚", l.Cluster.Node)
|
||||||
|
l.Start()
|
||||||
|
} else {
|
||||||
|
log.Infof("Node %s elected slave ♝", l.Cluster.Node)
|
||||||
|
l.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -148,7 +148,7 @@ func (provider *Kv) list(keys ...string) []string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keysPairs, err := provider.kvclient.List(joinedKeys)
|
keysPairs, err := provider.kvclient.List(joinedKeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error getting keys %s %s ", joinedKeys, err)
|
log.Warnf("Cannot get keys %s %s ", joinedKeys, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
directoryKeys := make(map[string]string)
|
directoryKeys := make(map[string]string)
|
||||||
|
@ -170,10 +170,10 @@ func (provider *Kv) get(defaultValue string, keys ...string) string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keyPair, err := provider.kvclient.Get(strings.TrimPrefix(joinedKeys, "/"))
|
keyPair, err := provider.kvclient.Get(strings.TrimPrefix(joinedKeys, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error getting key %s %s, setting default %s", joinedKeys, err, defaultValue)
|
log.Warnf("Cannot get key %s %s, setting default %s", joinedKeys, err, defaultValue)
|
||||||
return defaultValue
|
return defaultValue
|
||||||
} else if keyPair == nil {
|
} else if keyPair == nil {
|
||||||
log.Warnf("Error getting key %s, setting default %s", joinedKeys, defaultValue)
|
log.Warnf("Cannot get key %s, setting default %s", joinedKeys, defaultValue)
|
||||||
return defaultValue
|
return defaultValue
|
||||||
}
|
}
|
||||||
return string(keyPair.Value)
|
return string(keyPair.Value)
|
||||||
|
@ -183,10 +183,10 @@ func (provider *Kv) splitGet(keys ...string) []string {
|
||||||
joinedKeys := strings.Join(keys, "")
|
joinedKeys := strings.Join(keys, "")
|
||||||
keyPair, err := provider.kvclient.Get(joinedKeys)
|
keyPair, err := provider.kvclient.Get(joinedKeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error getting key %s %s, setting default empty", joinedKeys, err)
|
log.Warnf("Cannot get key %s %s, setting default empty", joinedKeys, err)
|
||||||
return []string{}
|
return []string{}
|
||||||
} else if keyPair == nil {
|
} else if keyPair == nil {
|
||||||
log.Warnf("Error getting key %s, setting default %empty", joinedKeys)
|
log.Warnf("Cannot get key %s, setting default %empty", joinedKeys)
|
||||||
return []string{}
|
return []string{}
|
||||||
}
|
}
|
||||||
return strings.Split(string(keyPair.Value), ",")
|
return strings.Split(string(keyPair.Value), ",")
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package safe
|
package safe
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
"log"
|
"log"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -11,11 +12,44 @@ type routine struct {
|
||||||
stop chan bool
|
stop chan bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pool creates a pool of go routines
|
type routineCtx func(ctx context.Context)
|
||||||
|
|
||||||
|
// Pool is a pool of go routines
|
||||||
type Pool struct {
|
type Pool struct {
|
||||||
routines []routine
|
routines []routine
|
||||||
|
routinesCtx []routineCtx
|
||||||
waitGroup sync.WaitGroup
|
waitGroup sync.WaitGroup
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
|
baseCtx context.Context
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPool creates a Pool
|
||||||
|
func NewPool(baseCtx context.Context) *Pool {
|
||||||
|
ctx, cancel := context.WithCancel(baseCtx)
|
||||||
|
return &Pool{
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
baseCtx: baseCtx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ctx returns main context
|
||||||
|
func (p *Pool) Ctx() context.Context {
|
||||||
|
return p.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
//GoCtx starts a recoverable goroutine with a context
|
||||||
|
func (p *Pool) GoCtx(goroutine routineCtx) {
|
||||||
|
p.lock.Lock()
|
||||||
|
p.routinesCtx = append(p.routinesCtx, goroutine)
|
||||||
|
p.waitGroup.Add(1)
|
||||||
|
Go(func() {
|
||||||
|
goroutine(p.ctx)
|
||||||
|
p.waitGroup.Done()
|
||||||
|
})
|
||||||
|
p.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go starts a recoverable goroutine, and can be stopped with stop chan
|
// Go starts a recoverable goroutine, and can be stopped with stop chan
|
||||||
|
@ -37,6 +71,7 @@ func (p *Pool) Go(goroutine func(stop chan bool)) {
|
||||||
// Stop stops all started routines, waiting for their termination
|
// Stop stops all started routines, waiting for their termination
|
||||||
func (p *Pool) Stop() {
|
func (p *Pool) Stop() {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
|
p.cancel()
|
||||||
for _, routine := range p.routines {
|
for _, routine := range p.routines {
|
||||||
routine.stop <- true
|
routine.stop <- true
|
||||||
}
|
}
|
||||||
|
@ -47,6 +82,29 @@ func (p *Pool) Stop() {
|
||||||
p.lock.Unlock()
|
p.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start starts all stoped routines
|
||||||
|
func (p *Pool) Start() {
|
||||||
|
p.lock.Lock()
|
||||||
|
p.ctx, p.cancel = context.WithCancel(p.baseCtx)
|
||||||
|
for _, routine := range p.routines {
|
||||||
|
p.waitGroup.Add(1)
|
||||||
|
routine.stop = make(chan bool, 1)
|
||||||
|
Go(func() {
|
||||||
|
routine.goroutine(routine.stop)
|
||||||
|
p.waitGroup.Done()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, routine := range p.routinesCtx {
|
||||||
|
p.waitGroup.Add(1)
|
||||||
|
Go(func() {
|
||||||
|
routine(p.ctx)
|
||||||
|
p.waitGroup.Done()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
p.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Go starts a recoverable goroutine
|
// Go starts a recoverable goroutine
|
||||||
func Go(goroutine func()) {
|
func Go(goroutine func()) {
|
||||||
GoWithRecover(goroutine, defaultRecoverGoroutine)
|
GoWithRecover(goroutine, defaultRecoverGoroutine)
|
||||||
|
|
30
server.go
30
server.go
|
@ -24,6 +24,7 @@ import (
|
||||||
log "github.com/Sirupsen/logrus"
|
log "github.com/Sirupsen/logrus"
|
||||||
"github.com/codegangsta/negroni"
|
"github.com/codegangsta/negroni"
|
||||||
"github.com/containous/mux"
|
"github.com/containous/mux"
|
||||||
|
"github.com/containous/traefik/cluster"
|
||||||
"github.com/containous/traefik/middlewares"
|
"github.com/containous/traefik/middlewares"
|
||||||
"github.com/containous/traefik/provider"
|
"github.com/containous/traefik/provider"
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
|
@ -50,7 +51,8 @@ type Server struct {
|
||||||
currentConfigurations safe.Safe
|
currentConfigurations safe.Safe
|
||||||
globalConfiguration GlobalConfiguration
|
globalConfiguration GlobalConfiguration
|
||||||
loggerMiddleware *middlewares.Logger
|
loggerMiddleware *middlewares.Logger
|
||||||
routinesPool safe.Pool
|
routinesPool *safe.Pool
|
||||||
|
leadership *cluster.Leadership
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverEntryPoints map[string]*serverEntryPoint
|
type serverEntryPoints map[string]*serverEntryPoint
|
||||||
|
@ -80,12 +82,18 @@ func NewServer(globalConfiguration GlobalConfiguration) *Server {
|
||||||
server.currentConfigurations.Set(currentConfigurations)
|
server.currentConfigurations.Set(currentConfigurations)
|
||||||
server.globalConfiguration = globalConfiguration
|
server.globalConfiguration = globalConfiguration
|
||||||
server.loggerMiddleware = middlewares.NewLogger(globalConfiguration.AccessLogsFile)
|
server.loggerMiddleware = middlewares.NewLogger(globalConfiguration.AccessLogsFile)
|
||||||
|
server.routinesPool = safe.NewPool(context.Background())
|
||||||
|
if globalConfiguration.Cluster != nil {
|
||||||
|
// leadership creation if cluster mode
|
||||||
|
server.leadership = cluster.NewLeadership(server.routinesPool.Ctx(), globalConfiguration.Cluster)
|
||||||
|
}
|
||||||
|
|
||||||
return server
|
return server
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the server and blocks until server is shutted down.
|
// Start starts the server and blocks until server is shutted down.
|
||||||
func (server *Server) Start() {
|
func (server *Server) Start() {
|
||||||
|
server.startLeadership()
|
||||||
server.startHTTPServers()
|
server.startHTTPServers()
|
||||||
server.routinesPool.Go(func(stop chan bool) {
|
server.routinesPool.Go(func(stop chan bool) {
|
||||||
server.listenProviders(stop)
|
server.listenProviders(stop)
|
||||||
|
@ -125,6 +133,7 @@ func (server *Server) Close() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}(ctx)
|
}(ctx)
|
||||||
|
server.stopLeadership()
|
||||||
server.routinesPool.Stop()
|
server.routinesPool.Stop()
|
||||||
close(server.configurationChan)
|
close(server.configurationChan)
|
||||||
close(server.configurationValidatedChan)
|
close(server.configurationValidatedChan)
|
||||||
|
@ -135,6 +144,23 @@ func (server *Server) Close() {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (server *Server) startLeadership() {
|
||||||
|
if server.leadership != nil {
|
||||||
|
server.leadership.Participate(server.routinesPool)
|
||||||
|
server.leadership.GoCtx(func(ctx context.Context) {
|
||||||
|
log.Debugf("Started test routine")
|
||||||
|
<-ctx.Done()
|
||||||
|
log.Debugf("Stopped test routine")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) stopLeadership() {
|
||||||
|
if server.leadership != nil {
|
||||||
|
server.leadership.Resign()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (server *Server) startHTTPServers() {
|
func (server *Server) startHTTPServers() {
|
||||||
server.serverEntryPoints = server.buildEntryPoints(server.globalConfiguration)
|
server.serverEntryPoints = server.buildEntryPoints(server.globalConfiguration)
|
||||||
for newServerEntryPointName, newServerEntryPoint := range server.serverEntryPoints {
|
for newServerEntryPointName, newServerEntryPoint := range server.serverEntryPoints {
|
||||||
|
@ -321,7 +347,7 @@ func (server *Server) startProviders() {
|
||||||
log.Infof("Starting provider %v %s", reflect.TypeOf(provider), jsonConf)
|
log.Infof("Starting provider %v %s", reflect.TypeOf(provider), jsonConf)
|
||||||
currentProvider := provider
|
currentProvider := provider
|
||||||
safe.Go(func() {
|
safe.Go(func() {
|
||||||
err := currentProvider.Provide(server.configurationChan, &server.routinesPool, server.globalConfiguration.Constraints)
|
err := currentProvider.Provide(server.configurationChan, server.routinesPool, server.globalConfiguration.Constraints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Error starting provider %s", err)
|
log.Errorf("Error starting provider %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/containous/traefik/types"
|
"github.com/containous/traefik/types"
|
||||||
"github.com/containous/traefik/version"
|
"github.com/containous/traefik/version"
|
||||||
"github.com/docker/libkv/store"
|
"github.com/docker/libkv/store"
|
||||||
|
"github.com/satori/go.uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var versionTemplate = `Version: {{.Version}}
|
var versionTemplate = `Version: {{.Version}}
|
||||||
|
@ -155,8 +156,10 @@ Complete documentation is available at https://traefik.io`,
|
||||||
// IF a KV Store is enable and no sub-command called in args
|
// IF a KV Store is enable and no sub-command called in args
|
||||||
if kv != nil && usedCmd == traefikCmd {
|
if kv != nil && usedCmd == traefikCmd {
|
||||||
if traefikConfiguration.Cluster == nil {
|
if traefikConfiguration.Cluster == nil {
|
||||||
hostname, _ := os.Hostname()
|
traefikConfiguration.Cluster = &types.Cluster{Node: uuid.NewV4().String()}
|
||||||
traefikConfiguration.Cluster = &types.Cluster{Store: types.Store{Prefix: kv.Prefix, Store: kv.Store}, Node: hostname}
|
}
|
||||||
|
if traefikConfiguration.Cluster.Store == nil {
|
||||||
|
traefikConfiguration.Cluster.Store = &types.Store{Prefix: kv.Prefix, Store: kv.Store}
|
||||||
}
|
}
|
||||||
s.AddSource(kv)
|
s.AddSource(kv)
|
||||||
if _, err := s.LoadConfig(); err != nil {
|
if _, err := s.LoadConfig(); err != nil {
|
||||||
|
|
|
@ -202,7 +202,7 @@ type Store struct {
|
||||||
// Cluster holds cluster config
|
// Cluster holds cluster config
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Node string
|
Node string
|
||||||
Store Store
|
Store *Store
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth holds authentication configuration (BASIC, DIGEST, users)
|
// Auth holds authentication configuration (BASIC, DIGEST, users)
|
||||||
|
|
Loading…
Reference in a new issue