Commit a00536cb authored by Geoff Simmons's avatar Geoff Simmons

Add the CLI option incompleteRetryDelay, default 5s, must be > 0s.

parent d5c1528b
......@@ -56,6 +56,8 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
const defIncomplRetryDelay = 5 * time.Second
var (
versionF = flag.Bool("version", false, "print version and exit")
loglvlF = flag.String("log-level", "INFO",
......@@ -87,6 +89,11 @@ var (
"if non-zero, re-update the controller with the state of\n"+
"the cluster this often, even if nothing has changed,\n"+
"to synchronize state that may have been missed")
incomplRetryDelayF = flag.Duration("incompleteRetryDelay",
defIncomplRetryDelay,
"re-queue delay when the controller does not have all of the\n"+
"information required for a necessary cluster change\n"+
"must be > 0s")
logFormat = logrus.TextFormatter{
DisableColors: true,
FullTimestamp: true,
......@@ -167,6 +174,11 @@ func main() {
os.Exit(-1)
}
if *incomplRetryDelayF == 0 {
log.Fatal("incompleteRetryDelay must be > 0s")
os.Exit(-1)
}
log.Info("Starting Varnish Ingress controller version:", version)
log.Info("Ingress class:", *ingressClassF)
......@@ -220,7 +232,7 @@ func main() {
ingController, err := controller.NewIngressController(log,
*ingressClassF, kubeClient, vController, hController,
informerFactory, vcrInformerFactory, vsecrInformerFactory,
tsecrInformerFactory)
tsecrInformerFactory, *incomplRetryDelayF)
if err != nil {
log.Fatalf("Could not initialize controller: %v")
os.Exit(-1)
......
......@@ -149,6 +149,7 @@ func NewIngressController(
vcrInfFactory vcr_informers.SharedInformerFactory,
vsecrInfFactory informers.SharedInformerFactory,
tsecrInfFactory informers.SharedInformerFactory,
incomplRetryDelay time.Duration,
) (*IngressController, error) {
ingc := IngressController{
......@@ -215,7 +216,7 @@ func NewIngressController(
}
ingc.nsQs = NewNamespaceQueues(ingc.log, ingClass, vc, hc, ingc.listers,
ingc.client, ingc.recorder)
ingc.client, ingc.recorder, incomplRetryDelay)
return &ingc, nil
}
......
......@@ -52,14 +52,6 @@ import (
"code.uplex.de/uplex-varnish/k8s-ingress/pkg/varnish"
)
const (
// syncSuccess and syncFailure are reasons for Events
syncSuccess = "SyncSuccess"
syncFailure = "SyncFailure"
)
const incompleteRetryDelay = 5 * time.Second
// NamespaceWorker serves fanout of work items to workers for each
// namespace for which the controller is notified about a resource
// update. The NamespaceQueues object creates a new instance when it
......@@ -68,23 +60,24 @@ const incompleteRetryDelay = 5 * time.Second
// namespace is synced separately and sequentially, and all of the
// namespaces are synced in parallel.
type NamespaceWorker struct {
namespace string
ingClass string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
queue workqueue.RateLimitingInterface
listers *Listers
ing ext_listers.IngressNamespaceLister
svc core_v1_listers.ServiceNamespaceLister
endp core_v1_listers.EndpointsNamespaceLister
tsecr core_v1_listers.SecretNamespaceLister
vsecr core_v1_listers.SecretNamespaceLister
vcfg vcr_listers.VarnishConfigNamespaceLister
bcfg vcr_listers.BackendConfigNamespaceLister
client kubernetes.Interface
recorder record.EventRecorder
wg *sync.WaitGroup
namespace string
ingClass string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
queue workqueue.RateLimitingInterface
listers *Listers
ing ext_listers.IngressNamespaceLister
svc core_v1_listers.ServiceNamespaceLister
endp core_v1_listers.EndpointsNamespaceLister
tsecr core_v1_listers.SecretNamespaceLister
vsecr core_v1_listers.SecretNamespaceLister
vcfg vcr_listers.VarnishConfigNamespaceLister
bcfg vcr_listers.BackendConfigNamespaceLister
client kubernetes.Interface
recorder record.EventRecorder
wg *sync.WaitGroup
incomplRetryDelay time.Duration
}
func (worker *NamespaceWorker) event(obj interface{}, evtType, reason,
......@@ -261,7 +254,7 @@ func (worker *NamespaceWorker) next() {
worker.queue.AddRateLimited(obj)
case update.Incomplete:
worker.syncFailure(obj, msgPfx, status)
worker.queue.AddAfter(obj, incompleteRetryDelay)
worker.queue.AddAfter(obj, worker.incomplRetryDelay)
}
}
......@@ -282,16 +275,17 @@ func (worker *NamespaceWorker) work() {
// queue and places them on separate queues for NamespaceWorkers
// responsible for each namespace.
type NamespaceQueues struct {
Queue workqueue.RateLimitingInterface
ingClass string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
workers map[string]*NamespaceWorker
listers *Listers
client kubernetes.Interface
recorder record.EventRecorder
wg *sync.WaitGroup
Queue workqueue.RateLimitingInterface
ingClass string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
workers map[string]*NamespaceWorker
listers *Listers
client kubernetes.Interface
recorder record.EventRecorder
wg *sync.WaitGroup
incomplRetryDelay time.Duration
}
// NewNamespaceQueues creates a NamespaceQueues object.
......@@ -309,21 +303,24 @@ func NewNamespaceQueues(
hController *haproxy.Controller,
listers *Listers,
client kubernetes.Interface,
recorder record.EventRecorder) *NamespaceQueues {
recorder record.EventRecorder,
incomplRetryDelay time.Duration,
) *NamespaceQueues {
q := workqueue.NewNamedRateLimitingQueue(
workqueue.DefaultControllerRateLimiter(), "_ALL_")
return &NamespaceQueues{
Queue: q,
log: log,
ingClass: ingClass,
vController: vController,
hController: hController,
workers: make(map[string]*NamespaceWorker),
listers: listers,
client: client,
recorder: recorder,
wg: new(sync.WaitGroup),
Queue: q,
log: log,
ingClass: ingClass,
vController: vController,
hController: hController,
workers: make(map[string]*NamespaceWorker),
listers: listers,
client: client,
recorder: recorder,
wg: new(sync.WaitGroup),
incomplRetryDelay: incomplRetryDelay,
}
}
......@@ -358,23 +355,24 @@ func (qs *NamespaceQueues) next() {
q := workqueue.NewNamedRateLimitingQueue(
workqueue.DefaultControllerRateLimiter(), ns)
worker = &NamespaceWorker{
namespace: ns,
ingClass: qs.ingClass,
log: qs.log,
vController: qs.vController,
hController: qs.hController,
queue: q,
listers: qs.listers,
ing: qs.listers.ing.Ingresses(ns),
svc: qs.listers.svc.Services(ns),
endp: qs.listers.endp.Endpoints(ns),
vsecr: qs.listers.vsecr.Secrets(ns),
tsecr: qs.listers.tsecr.Secrets(ns),
vcfg: qs.listers.vcfg.VarnishConfigs(ns),
bcfg: qs.listers.bcfg.BackendConfigs(ns),
client: qs.client,
recorder: qs.recorder,
wg: qs.wg,
namespace: ns,
ingClass: qs.ingClass,
log: qs.log,
vController: qs.vController,
hController: qs.hController,
queue: q,
listers: qs.listers,
ing: qs.listers.ing.Ingresses(ns),
svc: qs.listers.svc.Services(ns),
endp: qs.listers.endp.Endpoints(ns),
vsecr: qs.listers.vsecr.Secrets(ns),
tsecr: qs.listers.tsecr.Secrets(ns),
vcfg: qs.listers.vcfg.VarnishConfigs(ns),
bcfg: qs.listers.bcfg.BackendConfigs(ns),
client: qs.client,
recorder: qs.recorder,
wg: qs.wg,
incomplRetryDelay: qs.incomplRetryDelay,
}
qs.workers[ns] = worker
qs.wg.Add(1)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment