Commit 379a3129 authored by Geoff Simmons's avatar Geoff Simmons

Self-sharding with only one viking Service instance is not an error.

Only issue a log warning in this case. Verified with an e2e test.

Closes #44
parent 1f905dec
...@@ -417,7 +417,7 @@ func (worker *NamespaceWorker) configSharding(spec *vcl.Spec, ...@@ -417,7 +417,7 @@ func (worker *NamespaceWorker) configSharding(spec *vcl.Spec,
} }
} }
if len(spec.ShardCluster.Nodes) <= 1 { if len(spec.ShardCluster.Nodes) <= 1 {
return update.MakeFatal( worker.log.Warnf(
"Sharding requested, but %d endpoint addresses found "+ "Sharding requested, but %d endpoint addresses found "+
"for service %s/%s", "for service %s/%s",
len(spec.ShardCluster.Nodes), svc.Namespace, svc.Name) len(spec.ShardCluster.Nodes), svc.Namespace, svc.Name)
......
...@@ -86,6 +86,15 @@ make EXAMPLE=primary-only-by-clientid deploy verify undeploy ...@@ -86,6 +86,15 @@ make EXAMPLE=primary-only-by-clientid deploy verify undeploy
echo Self-sharding under conditions example echo Self-sharding under conditions example
make EXAMPLE=shard-conditions deploy verify undeploy make EXAMPLE=shard-conditions deploy verify undeploy
echo Self-sharding with 1 replica
make EXAMPLE=self-sharding deploy verify
kubectl scale --replicas=1 --timeout=2m deploy/varnish-ingress
wait_for_replica_count app=varnish-ingress 1
make EXAMPLE=self-sharding verify
kubectl scale --replicas=2 --timeout=2m deploy/varnish-ingress
wait_for_replica_count app=varnish-ingress 2
make EXAMPLE=self-sharding verify undeploy
echo Basic Authentication example echo Basic Authentication example
cd ${MYPATH}/../examples/authentication/ cd ${MYPATH}/../examples/authentication/
make EXAMPLE=basic-auth deploy verify undeploy make EXAMPLE=basic-auth deploy verify undeploy
......
...@@ -88,3 +88,29 @@ function wait_until_not_configured { ...@@ -88,3 +88,29 @@ function wait_until_not_configured {
wait_for_config_status ${label} ${ns} ${timeout} ${port} '503' wait_for_config_status ${label} ${ns} ${timeout} ${port} '503'
} }
# kubectl scale --timeout is evidently buggy in some versions, so we
# wait until kubectl get pods outputs the expected number of replicas.
function wait_for_replica_count {
local label="$1"
local replicas="$2"
local ns=${3-default}
local timeout=${4-120}
local N=0
while true; do
local npods=$(kubectl get pods -n ${ns} -l ${label} --no-headers | wc -l)
if [ $? -ne 0 ]; then
exit 1
fi
if [ ${npods} -eq ${replicas} ]; then
break
fi
if [ $N -ge ${timeout} ]; then
echo "Timed out waiting for $replicas replicas"
exit 1
fi
N=$(( N + 1 ))
sleep 1
done
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment