Commit dd46b1e4 authored by Geoff Simmons's avatar Geoff Simmons

Fix intermittent failures to update Ingress.Status.LoadBalancer.

This almost always happens because the API server's current version
of the Ingress has a newer ResourceVersion than the controller's
cached copy. Sometimes it happens during e2e tests when the Ingress
has already been deleted but the worker queue has not caught up.

Make one attempt to fetch a fresh copy of the Ingress from the API
server, and if necessary, update the LoadBalancer field for the
fresh version.

Inspired by the nginx ingress' solution to this problem.
parent f4466945
......@@ -34,6 +34,7 @@ import (
"context"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"strings"
......@@ -1118,19 +1119,41 @@ func (worker *NamespaceWorker) updateIngStatus(
n++
}
updateOpts := metav1.UpdateOptions{}
for _, ing := range ings {
ctx := context.TODO()
ing.Status.LoadBalancer.Ingress =
make([]net_v1.IngressLoadBalancerIngress,
len(ips)+len(hosts))
copy(ing.Status.LoadBalancer.Ingress, lb)
ingClient := worker.client.NetworkingV1().
Ingresses(ing.Namespace)
if _, err := ingClient.UpdateStatus(context.TODO(), ing,
metav1.UpdateOptions{}); err != nil {
_, err := ingClient.UpdateStatus(ctx, ing, updateOpts)
if err == nil {
continue
}
// Commonly fails because the ResourceVersion has been updated.
// Retry if necessary with a fresh copy from the API server.
freshIng, err := ingClient.Get(ctx, ing.Name,
metav1.GetOptions{})
if err != nil {
return update.MakeFatal(
"Cannot update status for Ingress %s/%s: %v",
"Cannot update status for Ingress %s/%s, "+
"retry fetch failed: %v",
ing.Namespace, ing.Name, err)
}
if !reflect.DeepEqual(ing.Status.LoadBalancer,
freshIng.Status.LoadBalancer) {
freshIng.Status.LoadBalancer = ing.Status.LoadBalancer
_, err = ingClient.UpdateStatus(ctx, freshIng,
updateOpts)
if err != nil {
return update.MakeFatal(
"Cannot update status for Ingress "+
"%s/%s: %v",
ing.Namespace, ing.Name, err)
}
}
}
return update.MakeSuccess("")
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment