Fix kubernetes providers shutdown and clean safe.Pool
This commit is contained in:
parent
c80d53e7e5
commit
1b63c95c4e
11 changed files with 73 additions and 190 deletions
|
@ -179,12 +179,12 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
p.renewCertificates(ctx)
|
p.renewCertificates(ctx)
|
||||||
|
|
||||||
ticker := time.NewTicker(24 * time.Hour)
|
ticker := time.NewTicker(24 * time.Hour)
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
p.renewCertificates(ctx)
|
p.renewCertificates(ctx)
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ func (p *Provider) resolveDomains(ctx context.Context, domains []string, tlsStor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Provider) watchNewDomains(ctx context.Context) {
|
func (p *Provider) watchNewDomains(ctx context.Context) {
|
||||||
p.pool.Go(func(stop chan bool) {
|
p.pool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case config := <-p.configFromListenerChan:
|
case config := <-p.configFromListenerChan:
|
||||||
|
@ -415,7 +415,7 @@ func (p *Provider) watchNewDomains(ctx context.Context) {
|
||||||
p.resolveDomains(ctxRouter, domains, tlsStore)
|
p.resolveDomains(ctxRouter, domains, tlsStore)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,7 +556,7 @@ func deleteUnnecessaryDomains(ctx context.Context, domains []types.Domain) []typ
|
||||||
func (p *Provider) watchCertificate(ctx context.Context) {
|
func (p *Provider) watchCertificate(ctx context.Context) {
|
||||||
p.certsChan = make(chan *CertAndStore)
|
p.certsChan = make(chan *CertAndStore)
|
||||||
|
|
||||||
p.pool.Go(func(stop chan bool) {
|
p.pool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case cert := <-p.certsChan:
|
case cert := <-p.certsChan:
|
||||||
|
@ -576,7 +576,7 @@ func (p *Provider) watchCertificate(ctx context.Context) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.FromContext(ctx).Error(err)
|
log.FromContext(ctx).Error(err)
|
||||||
}
|
}
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,11 +103,11 @@ func (p *Provider) addWatcher(pool *safe.Pool, directory string, configurationCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process events
|
// Process events
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctx context.Context) {
|
||||||
defer watcher.Close()
|
defer watcher.Close()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case evt := <-watcher.Events:
|
case evt := <-watcher.Events:
|
||||||
if p.Directory == "" {
|
if p.Directory == "" {
|
||||||
|
|
|
@ -98,11 +98,9 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
stopWatch := make(chan struct{}, 1)
|
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||||
defer close(stopWatch)
|
|
||||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||||
|
@ -110,20 +108,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
return err
|
return err
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
throttleDuration := time.Duration(p.ThrottleDuration)
|
throttleDuration := time.Duration(p.ThrottleDuration)
|
||||||
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
|
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
|
||||||
if throttledChan != nil {
|
if throttledChan != nil {
|
||||||
eventsChan = throttledChan
|
eventsChan = throttledChan
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return nil
|
return nil
|
||||||
case event := <-eventsChan:
|
case event := <-eventsChan:
|
||||||
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
|
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
|
||||||
|
@ -156,7 +154,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
notify := func(err error, time time.Duration) {
|
notify := func(err error, time time.Duration) {
|
||||||
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
|
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
|
||||||
}
|
}
|
||||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot connect to Provider: %v", err)
|
logger.Errorf("Cannot connect to Provider: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -625,7 +623,7 @@ func getCABlocks(secret *corev1.Secret, namespace, secretName string) (string, e
|
||||||
return cert, nil
|
return cert, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
|
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
|
||||||
if throttleDuration == 0 {
|
if throttleDuration == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -635,10 +633,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
|
||||||
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
|
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
|
||||||
// This guarantees that writing to eventChan will never block,
|
// This guarantees that writing to eventChan will never block,
|
||||||
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
|
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
|
||||||
go func() {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
case nextEvent := <-eventsChan:
|
case nextEvent := <-eventsChan:
|
||||||
select {
|
select {
|
||||||
|
@ -650,7 +648,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
|
|
||||||
return eventsChanBuffered
|
return eventsChanBuffered
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,32 +104,29 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
stopWatch := make(chan struct{}, 1)
|
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
|
||||||
defer close(stopWatch)
|
|
||||||
|
|
||||||
eventsChan, err := k8sClient.WatchAll(p.Namespaces, stopWatch)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error watching kubernetes events: %v", err)
|
logger.Errorf("Error watching kubernetes events: %v", err)
|
||||||
timer := time.NewTimer(1 * time.Second)
|
timer := time.NewTimer(1 * time.Second)
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
return err
|
return err
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
throttleDuration := time.Duration(p.ThrottleDuration)
|
throttleDuration := time.Duration(p.ThrottleDuration)
|
||||||
throttledChan := throttleEvents(ctxLog, throttleDuration, stop, eventsChan)
|
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
|
||||||
if throttledChan != nil {
|
if throttledChan != nil {
|
||||||
eventsChan = throttledChan
|
eventsChan = throttledChan
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return nil
|
return nil
|
||||||
case event := <-eventsChan:
|
case event := <-eventsChan:
|
||||||
// Note that event is the *first* event that came in during this
|
// Note that event is the *first* event that came in during this
|
||||||
|
@ -164,7 +161,8 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
notify := func(err error, time time.Duration) {
|
notify := func(err error, time time.Duration) {
|
||||||
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
|
logger.Errorf("Provider connection error: %s; retrying in %s", err, time)
|
||||||
}
|
}
|
||||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
|
||||||
|
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot connect to Provider: %s", err)
|
logger.Errorf("Cannot connect to Provider: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -517,7 +515,7 @@ func (p *Provider) updateIngressStatus(i *v1beta1.Ingress, k8sClient Client) err
|
||||||
return k8sClient.UpdateIngressStatus(i.Namespace, i.Name, service.Status.LoadBalancer.Ingress[0].IP, service.Status.LoadBalancer.Ingress[0].Hostname)
|
return k8sClient.UpdateIngressStatus(i.Namespace, i.Name, service.Status.LoadBalancer.Ingress[0].IP, service.Status.LoadBalancer.Ingress[0].Hostname)
|
||||||
}
|
}
|
||||||
|
|
||||||
func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
|
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
|
||||||
if throttleDuration == 0 {
|
if throttleDuration == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -528,10 +526,10 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
|
||||||
// non-blocking write to pendingEvent. This guarantees that writing to
|
// non-blocking write to pendingEvent. This guarantees that writing to
|
||||||
// eventChan will never block, and that pendingEvent will have
|
// eventChan will never block, and that pendingEvent will have
|
||||||
// something in it if there's been an event since we read from that channel.
|
// something in it if there's been an event since we read from that channel.
|
||||||
go func() {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
case nextEvent := <-eventsChan:
|
case nextEvent := <-eventsChan:
|
||||||
select {
|
select {
|
||||||
|
@ -545,7 +543,7 @@ func throttleEvents(ctx context.Context, throttleDuration time.Duration, stop ch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
})
|
||||||
|
|
||||||
return eventsChanBuffered
|
return eventsChanBuffered
|
||||||
}
|
}
|
||||||
|
|
|
@ -159,11 +159,11 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
||||||
logger.Errorf("Failed to register for events, %s", err)
|
logger.Errorf("Failed to register for events, %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pool.Go(func(stop chan bool) {
|
pool.GoCtx(func(ctxPool context.Context) {
|
||||||
defer close(update)
|
defer close(update)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
case event := <-update:
|
case event := <-update:
|
||||||
logger.Debugf("Received provider event %s", event)
|
logger.Debugf("Received provider event %s", event)
|
||||||
|
|
|
@ -10,88 +10,37 @@ import (
|
||||||
"github.com/containous/traefik/v2/pkg/log"
|
"github.com/containous/traefik/v2/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type routine struct {
|
|
||||||
goroutine func(chan bool)
|
|
||||||
stop chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type routineCtx func(ctx context.Context)
|
type routineCtx func(ctx context.Context)
|
||||||
|
|
||||||
// Pool is a pool of go routines
|
// Pool is a pool of go routines
|
||||||
type Pool struct {
|
type Pool struct {
|
||||||
routines []routine
|
waitGroup sync.WaitGroup
|
||||||
waitGroup sync.WaitGroup
|
ctx context.Context
|
||||||
lock sync.Mutex
|
cancel context.CancelFunc
|
||||||
baseCtx context.Context
|
|
||||||
baseCancel context.CancelFunc
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPool creates a Pool
|
// NewPool creates a Pool
|
||||||
func NewPool(parentCtx context.Context) *Pool {
|
func NewPool(parentCtx context.Context) *Pool {
|
||||||
baseCtx, baseCancel := context.WithCancel(parentCtx)
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
ctx, cancel := context.WithCancel(baseCtx)
|
|
||||||
return &Pool{
|
return &Pool{
|
||||||
baseCtx: baseCtx,
|
ctx: ctx,
|
||||||
baseCancel: baseCancel,
|
cancel: cancel,
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ctx returns main context
|
|
||||||
func (p *Pool) Ctx() context.Context {
|
|
||||||
return p.baseCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
// GoCtx starts a recoverable goroutine with a context
|
// GoCtx starts a recoverable goroutine with a context
|
||||||
func (p *Pool) GoCtx(goroutine routineCtx) {
|
func (p *Pool) GoCtx(goroutine routineCtx) {
|
||||||
p.lock.Lock()
|
|
||||||
p.waitGroup.Add(1)
|
p.waitGroup.Add(1)
|
||||||
Go(func() {
|
Go(func() {
|
||||||
defer p.waitGroup.Done()
|
defer p.waitGroup.Done()
|
||||||
goroutine(p.ctx)
|
goroutine(p.ctx)
|
||||||
})
|
})
|
||||||
p.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go starts a recoverable goroutine, and can be stopped with stop chan
|
|
||||||
func (p *Pool) Go(goroutine func(stop chan bool)) {
|
|
||||||
p.lock.Lock()
|
|
||||||
newRoutine := routine{
|
|
||||||
goroutine: goroutine,
|
|
||||||
stop: make(chan bool, 1),
|
|
||||||
}
|
|
||||||
p.routines = append(p.routines, newRoutine)
|
|
||||||
p.waitGroup.Add(1)
|
|
||||||
Go(func() {
|
|
||||||
defer p.waitGroup.Done()
|
|
||||||
goroutine(newRoutine.stop)
|
|
||||||
})
|
|
||||||
p.lock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops all started routines, waiting for their termination
|
// Stop stops all started routines, waiting for their termination
|
||||||
func (p *Pool) Stop() {
|
func (p *Pool) Stop() {
|
||||||
p.lock.Lock()
|
|
||||||
defer p.lock.Unlock()
|
|
||||||
p.cancel()
|
p.cancel()
|
||||||
for _, routine := range p.routines {
|
|
||||||
routine.stop <- true
|
|
||||||
}
|
|
||||||
p.waitGroup.Wait()
|
p.waitGroup.Wait()
|
||||||
for _, routine := range p.routines {
|
|
||||||
close(routine.stop)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup releases resources used by the pool, and should be called when the pool will no longer be used
|
|
||||||
func (p *Pool) Cleanup() {
|
|
||||||
p.Stop()
|
|
||||||
p.lock.Lock()
|
|
||||||
defer p.lock.Unlock()
|
|
||||||
p.baseCancel()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go starts a recoverable goroutine
|
// Go starts a recoverable goroutine
|
||||||
|
|
|
@ -18,12 +18,13 @@ func TestNewPoolContext(t *testing.T) {
|
||||||
ctx := context.WithValue(context.Background(), testKey, "test")
|
ctx := context.WithValue(context.Background(), testKey, "test")
|
||||||
p := NewPool(ctx)
|
p := NewPool(ctx)
|
||||||
|
|
||||||
retCtx := p.Ctx()
|
p.GoCtx(func(ctx context.Context) {
|
||||||
|
retCtxVal, ok := ctx.Value(testKey).(string)
|
||||||
retCtxVal, ok := retCtx.Value(testKey).(string)
|
if !ok || retCtxVal != "test" {
|
||||||
if !ok || retCtxVal != "test" {
|
t.Errorf("Pool.Ctx() did not return a derived context, got %#v, expected context with test value", ctx)
|
||||||
t.Errorf("Pool.Ctx() did not return a derived context, got %#v, expected context with test value", retCtx)
|
}
|
||||||
}
|
})
|
||||||
|
p.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeRoutine struct {
|
type fakeRoutine struct {
|
||||||
|
@ -46,14 +47,6 @@ func (tr *fakeRoutine) routineCtx(ctx context.Context) {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tr *fakeRoutine) routine(stop chan bool) {
|
|
||||||
tr.Lock()
|
|
||||||
tr.started = true
|
|
||||||
tr.Unlock()
|
|
||||||
tr.startSig <- true
|
|
||||||
<-stop
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPoolWithCtx(t *testing.T) {
|
func TestPoolWithCtx(t *testing.T) {
|
||||||
testRoutine := newFakeRoutine()
|
testRoutine := newFakeRoutine()
|
||||||
|
|
||||||
|
@ -79,12 +72,12 @@ func TestPoolWithCtx(t *testing.T) {
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
test.fn(p)
|
test.fn(p)
|
||||||
defer p.Cleanup()
|
defer p.Stop()
|
||||||
|
|
||||||
testDone := make(chan bool, 1)
|
testDone := make(chan bool, 1)
|
||||||
go func() {
|
go func() {
|
||||||
<-testRoutine.startSig
|
<-testRoutine.startSig
|
||||||
p.Cleanup()
|
p.Stop()
|
||||||
testDone <- true
|
testDone <- true
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -100,89 +93,30 @@ func TestPoolWithCtx(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPoolWithStopChan(t *testing.T) {
|
func TestPoolCleanupWithGoPanicking(t *testing.T) {
|
||||||
testRoutine := newFakeRoutine()
|
|
||||||
|
|
||||||
p := NewPool(context.Background())
|
p := NewPool(context.Background())
|
||||||
|
|
||||||
timer := time.NewTimer(500 * time.Millisecond)
|
timer := time.NewTimer(500 * time.Millisecond)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
p.Go(testRoutine.routine)
|
p.GoCtx(func(ctx context.Context) {
|
||||||
if len(p.routines) != 1 {
|
panic("BOOM")
|
||||||
t.Fatalf("After Pool.Go(func), Pool did have %d goroutines, expected 1", len(p.routines))
|
})
|
||||||
}
|
|
||||||
|
|
||||||
testDone := make(chan bool, 1)
|
testDone := make(chan bool, 1)
|
||||||
go func() {
|
go func() {
|
||||||
<-testRoutine.startSig
|
p.Stop()
|
||||||
p.Cleanup()
|
|
||||||
testDone <- true
|
testDone <- true
|
||||||
}()
|
}()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
testRoutine.Lock()
|
t.Fatalf("Pool.Cleanup() did not complete in time with a panicking goroutine")
|
||||||
defer testRoutine.Unlock()
|
|
||||||
t.Fatalf("Pool test did not complete in time, goroutine started equals '%t'", testRoutine.started)
|
|
||||||
case <-testDone:
|
case <-testDone:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPoolCleanupWithGoPanicking(t *testing.T) {
|
|
||||||
testRoutine := func(stop chan bool) {
|
|
||||||
panic("BOOM")
|
|
||||||
}
|
|
||||||
|
|
||||||
testCtxRoutine := func(ctx context.Context) {
|
|
||||||
panic("BOOM")
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
desc string
|
|
||||||
fn func(*Pool)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
desc: "Go()",
|
|
||||||
fn: func(p *Pool) {
|
|
||||||
p.Go(testRoutine)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: "GoCtx()",
|
|
||||||
fn: func(p *Pool) {
|
|
||||||
p.GoCtx(testCtxRoutine)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range testCases {
|
|
||||||
test := test
|
|
||||||
t.Run(test.desc, func(t *testing.T) {
|
|
||||||
p := NewPool(context.Background())
|
|
||||||
|
|
||||||
timer := time.NewTimer(500 * time.Millisecond)
|
|
||||||
defer timer.Stop()
|
|
||||||
|
|
||||||
test.fn(p)
|
|
||||||
|
|
||||||
testDone := make(chan bool, 1)
|
|
||||||
go func() {
|
|
||||||
p.Cleanup()
|
|
||||||
testDone <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-timer.C:
|
|
||||||
t.Fatalf("Pool.Cleanup() did not complete in time with a panicking goroutine")
|
|
||||||
case <-testDone:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoroutineRecover(t *testing.T) {
|
func TestGoroutineRecover(t *testing.T) {
|
||||||
// if recover fails the test will panic
|
// if recover fails the test will panic
|
||||||
Go(func() {
|
Go(func() {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
@ -49,8 +50,8 @@ func NewConfigurationWatcher(routinesPool *safe.Pool, pvd provider.Provider, pro
|
||||||
|
|
||||||
// Start the configuration watcher.
|
// Start the configuration watcher.
|
||||||
func (c *ConfigurationWatcher) Start() {
|
func (c *ConfigurationWatcher) Start() {
|
||||||
c.routinesPool.Go(c.listenProviders)
|
c.routinesPool.GoCtx(c.listenProviders)
|
||||||
c.routinesPool.Go(c.listenConfigurations)
|
c.routinesPool.GoCtx(c.listenConfigurations)
|
||||||
c.startProvider()
|
c.startProvider()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,10 +91,10 @@ func (c *ConfigurationWatcher) startProvider() {
|
||||||
// listenProviders receives configuration changes from the providers.
|
// listenProviders receives configuration changes from the providers.
|
||||||
// The configuration message then gets passed along a series of check
|
// The configuration message then gets passed along a series of check
|
||||||
// to finally end up in a throttler that sends it to listenConfigurations (through c. configurationValidatedChan).
|
// to finally end up in a throttler that sends it to listenConfigurations (through c. configurationValidatedChan).
|
||||||
func (c *ConfigurationWatcher) listenProviders(stop chan bool) {
|
func (c *ConfigurationWatcher) listenProviders(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case configMsg, ok := <-c.configurationChan:
|
case configMsg, ok := <-c.configurationChan:
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -111,10 +112,10 @@ func (c *ConfigurationWatcher) listenProviders(stop chan bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigurationWatcher) listenConfigurations(stop chan bool) {
|
func (c *ConfigurationWatcher) listenConfigurations(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case configMsg, ok := <-c.configurationValidatedChan:
|
case configMsg, ok := <-c.configurationValidatedChan:
|
||||||
if !ok || configMsg.Configuration == nil {
|
if !ok || configMsg.Configuration == nil {
|
||||||
|
@ -178,8 +179,8 @@ func (c *ConfigurationWatcher) preLoadConfiguration(configMsg dynamic.Message) {
|
||||||
if !ok {
|
if !ok {
|
||||||
providerConfigUpdateCh = make(chan dynamic.Message)
|
providerConfigUpdateCh = make(chan dynamic.Message)
|
||||||
c.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh
|
c.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh
|
||||||
c.routinesPool.Go(func(stop chan bool) {
|
c.routinesPool.GoCtx(func(ctxPool context.Context) {
|
||||||
c.throttleProviderConfigReload(c.providersThrottleDuration, c.configurationValidatedChan, providerConfigUpdateCh, stop)
|
c.throttleProviderConfigReload(ctxPool, c.providersThrottleDuration, c.configurationValidatedChan, providerConfigUpdateCh)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,14 +191,14 @@ func (c *ConfigurationWatcher) preLoadConfiguration(configMsg dynamic.Message) {
|
||||||
// It will immediately publish a new configuration and then only publish the next configuration after the throttle duration.
|
// It will immediately publish a new configuration and then only publish the next configuration after the throttle duration.
|
||||||
// Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing,
|
// Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing,
|
||||||
// it will publish the last of the newly received configurations.
|
// it will publish the last of the newly received configurations.
|
||||||
func (c *ConfigurationWatcher) throttleProviderConfigReload(throttle time.Duration, publish chan<- dynamic.Message, in <-chan dynamic.Message, stop chan bool) {
|
func (c *ConfigurationWatcher) throttleProviderConfigReload(ctx context.Context, throttle time.Duration, publish chan<- dynamic.Message, in <-chan dynamic.Message) {
|
||||||
ring := channels.NewRingChannel(1)
|
ring := channels.NewRingChannel(1)
|
||||||
defer ring.Close()
|
defer ring.Close()
|
||||||
|
|
||||||
c.routinesPool.Go(func(stop chan bool) {
|
c.routinesPool.GoCtx(func(ctxPool context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctxPool.Done():
|
||||||
return
|
return
|
||||||
case nextConfig := <-ring.Out():
|
case nextConfig := <-ring.Out():
|
||||||
if config, ok := nextConfig.(dynamic.Message); ok {
|
if config, ok := nextConfig.(dynamic.Message); ok {
|
||||||
|
@ -210,7 +211,7 @@ func (c *ConfigurationWatcher) throttleProviderConfigReload(throttle time.Durati
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case nextConfig := <-in:
|
case nextConfig := <-in:
|
||||||
ring.In() <- nextConfig
|
ring.In() <- nextConfig
|
||||||
|
|
|
@ -58,7 +58,7 @@ func (s *Server) Start(ctx context.Context) {
|
||||||
s.tcpEntryPoints.Start()
|
s.tcpEntryPoints.Start()
|
||||||
s.watcher.Start()
|
s.watcher.Start()
|
||||||
|
|
||||||
s.routinesPool.Go(s.listenSignals)
|
s.routinesPool.GoCtx(s.listenSignals)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait blocks until the server shutdown.
|
// Wait blocks until the server shutdown.
|
||||||
|
@ -90,7 +90,7 @@ func (s *Server) Close() {
|
||||||
|
|
||||||
stopMetricsClients()
|
stopMetricsClients()
|
||||||
|
|
||||||
s.routinesPool.Cleanup()
|
s.routinesPool.Stop()
|
||||||
|
|
||||||
signal.Stop(s.signals)
|
signal.Stop(s.signals)
|
||||||
close(s.signals)
|
close(s.signals)
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
@ -13,10 +14,10 @@ func (s *Server) configureSignals() {
|
||||||
signal.Notify(s.signals, syscall.SIGUSR1)
|
signal.Notify(s.signals, syscall.SIGUSR1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) listenSignals(stop chan bool) {
|
func (s *Server) listenSignals(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case sig := <-s.signals:
|
case sig := <-s.signals:
|
||||||
if sig == syscall.SIGUSR1 {
|
if sig == syscall.SIGUSR1 {
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
package server
|
package server
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
func (s *Server) configureSignals() {}
|
func (s *Server) configureSignals() {}
|
||||||
|
|
||||||
func (s *Server) listenSignals(stop chan bool) {}
|
func (s *Server) listenSignals(ctx context.Context) {}
|
||||||
|
|
Loading…
Reference in a new issue