Commit 5e7899ee authored by Geoff Simmons's avatar Geoff Simmons

Change the sematics of the defaultKey field for self-sharding by cookie.

The field is still optional, but if no default key is specified, then
it is implicitly the empty string. We no longer support a use case
in which an error response is returned for sharding by key if the
key is missing from the request. This will also be the case when
we support sharding by other keys besides cookies, such as request
headers.

This makes it possible to move the clause in vcl_recv that includes
the synthetic response to health checks from other shard instances
to come after the code for primaryOnly. That had been moved to the
start of vcl_recv, because health checks were failing in the shard
by cookie case without a default key (since health probes have no
Cookie header). But the result was that primaryOnly code did not
work as expected. Now we have moved that clause back to where it
was previously -- health probes now don't fail in the shard by
cookie case, because there is always a default key.
parent 79b44d08
...@@ -58,11 +58,6 @@ deploy-shard-by-cookie-helm: ...@@ -58,11 +58,6 @@ deploy-shard-by-cookie-helm:
@helm install viking-ingress-shard-by-cookie \ @helm install viking-ingress-shard-by-cookie \
$(CHARTDIR)/viking-test-app --values values-shard-by-cookie.yaml $(CHARTDIR)/viking-test-app --values values-shard-by-cookie.yaml
deploy-shard-by-cookie-default-helm:
@helm install viking-ingress-shard-by-cookie-default \
$(CHARTDIR)/viking-test-app \
--values values-shard-by-cookie-default.yaml
deploy-primary-only-by-clientid-helm: deploy-primary-only-by-clientid-helm:
@helm install viking-ingress-primary-only-by-clientid $(CHARTDIR)/viking-test-app \ @helm install viking-ingress-primary-only-by-clientid $(CHARTDIR)/viking-test-app \
--values values-primary-only-by-clientid.yaml --values values-primary-only-by-clientid.yaml
...@@ -134,10 +129,6 @@ undeploy-shard-by-cookie-helm: ...@@ -134,10 +129,6 @@ undeploy-shard-by-cookie-helm:
@helm uninstall viking-ingress-shard-by-cookie @helm uninstall viking-ingress-shard-by-cookie
$(MAKE) wait $(MAKE) wait
undeploy-shard-by-cookie-default-helm:
@helm uninstall viking-ingress-shard-by-cookie-default
$(MAKE) wait
undeploy-primary-only-by-clientid-helm: undeploy-primary-only-by-clientid-helm:
@helm uninstall viking-ingress-primary-only-by-clientid @helm uninstall viking-ingress-primary-only-by-clientid
$(MAKE) wait $(MAKE) wait
...@@ -213,8 +204,6 @@ deploy-shard-by-key: deploy-shard-by-key-helm ...@@ -213,8 +204,6 @@ deploy-shard-by-key: deploy-shard-by-key-helm
undeploy-shard-by-key: undeploy-shard-by-key-helm undeploy-shard-by-key: undeploy-shard-by-key-helm
deploy-shard-by-cookie: deploy-shard-by-cookie-helm deploy-shard-by-cookie: deploy-shard-by-cookie-helm
undeploy-shard-by-cookie: undeploy-shard-by-cookie-helm undeploy-shard-by-cookie: undeploy-shard-by-cookie-helm
deploy-shard-by-cookie-default: deploy-shard-by-cookie-default-helm
undeploy-shard-by-cookie-default: undeploy-shard-by-cookie-default-helm
deploy-primary-only-by-clientid: deploy-primary-only-by-clientid-helm deploy-primary-only-by-clientid: deploy-primary-only-by-clientid-helm
undeploy-primary-only-by-clientid: undeploy-primary-only-by-clientid-helm undeploy-primary-only-by-clientid: undeploy-primary-only-by-clientid-helm
deploy-shard-conditions: deploy-shard-conditions-helm deploy-shard-conditions: deploy-shard-conditions-helm
...@@ -239,9 +228,6 @@ undeploy: undeploy-shard-by-key ...@@ -239,9 +228,6 @@ undeploy: undeploy-shard-by-key
else ifeq ($(EXAMPLE),shard-by-cookie) else ifeq ($(EXAMPLE),shard-by-cookie)
deploy: deploy-shard-by-cookie deploy: deploy-shard-by-cookie
undeploy: undeploy-shard-by-cookie undeploy: undeploy-shard-by-cookie
else ifeq ($(EXAMPLE),shard-by-cookie-default)
deploy: deploy-shard-by-cookie-default
undeploy: undeploy-shard-by-cookie-default
else ifeq ($(EXAMPLE),primary-only-by-clientid) else ifeq ($(EXAMPLE),primary-only-by-clientid)
deploy: deploy-primary-only-by-clientid deploy: deploy-primary-only-by-clientid
undeploy: undeploy-primary-only-by-clientid undeploy: undeploy-primary-only-by-clientid
...@@ -250,7 +236,7 @@ deploy: deploy-shard-conditions ...@@ -250,7 +236,7 @@ deploy: deploy-shard-conditions
undeploy: undeploy-shard-conditions undeploy: undeploy-shard-conditions
else else
deploy undeploy: deploy undeploy:
$(error EXAMPLE must be set to self-sharding, shard-conditions, primary-only[-by-clientid], or shard-by-[digest|url|key|cookie[-default]]) $(error EXAMPLE must be set to self-sharding, shard-conditions, primary-only[-by-clientid], or shard-by-[digest|url|key|cookie])
endif endif
.PHONY: all $(MAKECMDGOALS) .PHONY: all $(MAKECMDGOALS)
# looks like -*- vcl -*-
varnishtest "cafe example with self-sharding by Cookie, and default key"
# The beresp may send Connection:close, if Varnish went to pipe due to
# primary-only. So we run each test in a separate connection.
client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo -hdr "Host: cafe.example.com" \
-hdr "Cookie: baz=quux; foo=abcdefghijklmnopqrstuvwxyz; 47=11"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/foo$"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run
client c1 -connect "${localhost} ${localport}" {
txreq -url /tea/bar -hdr "Host: cafe.example.com" \
-hdr "Cookie: baz=quux; foo=foobar"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /tea/bar"
expect resp.body ~ "(?m)^Server name: tea-[a-z0-9]+-[a-z0-9]+$"
} -run
client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/baz -hdr "Host: cafe.example.com" \
-hdr "Cookie: foo=47; baz=quux"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/baz"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run
client c1 -connect "${localhost} ${localport}" {
txreq -url /tea/quux -hdr "Host: cafe.example.com" \
-hdr "Cookie: foo=fighter"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /tea/quux"
expect resp.body ~ "(?m)^Server name: tea-[a-z0-9]+-[a-z0-9]+$"
} -run
client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo -hdr "Host: cafe.example.com" \
-hdr "Cookie: baz=quux; 47=11"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/foo$"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run
client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo -hdr "Host: cafe.example.com"
rxresp
expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/foo$"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run
...@@ -44,11 +44,15 @@ client c1 -connect "${localhost} ${localport}" { ...@@ -44,11 +44,15 @@ client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo -hdr "Host: cafe.example.com" \ txreq -url /coffee/foo -hdr "Host: cafe.example.com" \
-hdr "Cookie: baz=quux; 47=11" -hdr "Cookie: baz=quux; 47=11"
rxresp rxresp
expect resp.status == 400 expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/foo$"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run } -run
client c1 -connect "${localhost} ${localport}" { client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo -hdr "Host: cafe.example.com" txreq -url /coffee/foo -hdr "Host: cafe.example.com"
rxresp rxresp
expect resp.status == 400 expect resp.status == 200
expect resp.body ~ "(?m)^URI: /coffee/foo$"
expect resp.body ~ "(?m)^Server name: coffee-[a-z0-9]+-[a-z0-9]+$"
} -run } -run
apps:
coffee:
image: nginxdemos/hello:plain-text
replicas: 2
tea:
image: nginxdemos/hello:plain-text
replicas: 3
ingress:
name: cafe-ingress
rules:
- host: cafe.example.com
paths:
- path: /tea
type: Prefix
app: tea
- path: /coffee
type: Prefix
app: coffee
vikingAdmSvc: varnish-ingress-admin
selfSharding:
rules:
- shard:
key: cookie=foo
primaryOnly: true
defaultKey: ""
# Use reqDisposition to bypass builtin vcl_recv so that responses to
# requests with the Cookie head may be cacheable.
# cf. "cookie pass" in examples/req-disposition
reqDisposition:
- conditions:
- comparand: req.http.Host
compare: not-exists
- comparand: req.esi_level
count: 0
- comparand: req.proto
compare: prefix
values:
- HTTP/1.1
match-flags:
case-sensitive: false
disposition:
action: synth
status: 400
- conditions:
- comparand: req.method
compare: not-equal
values:
- GET
- HEAD
- PUT
- POST
- TRACE
- OPTIONS
- DELETE
- PATCH
- CONNECT
disposition:
action: synth
status: 405
- conditions:
- comparand: req.method
compare: not-equal
values:
- GET
- HEAD
disposition:
action: pass
#! /bin/bash -ex
MYDIR=$(dirname ${BASH_SOURCE[0]})
source ${MYDIR}/../../test/utils.sh
LOCALPORT=${LOCALPORT:-8888}
wait_until_ready app=varnish-ingress
wait_until_configured app=varnish-ingress
kubectl port-forward svc/varnish-ingress ${LOCALPORT}:80 >/dev/null &
trap 'kill $(jobs -p)' EXIT
wait_for_port ${LOCALPORT}
# XXX hackish, see the comments in verify.sh
sleep 10
varnishtest ${TESTOPTS} -Dlocalport=${LOCALPORT} cafe-cookie-default.vtc
...@@ -76,10 +76,10 @@ type ShardRule struct { ...@@ -76,10 +76,10 @@ type ShardRule struct {
// ShardSpec specifies the configuration details for sharding. // ShardSpec specifies the configuration details for sharding.
type ShardSpec struct { type ShardSpec struct {
Key string `json:"key,omitempty"` Key string `json:"key,omitempty"`
Digest string `json:"digest,omitempty"` Digest string `json:"digest,omitempty"`
DefaultKey *string `json:"defaultKey,omitempty"` DefaultKey string `json:"defaultKey,omitempty"`
PrimaryOnly bool `json:"primaryOnly,omitempty"` PrimaryOnly bool `json:"primaryOnly,omitempty"`
} }
// ProbeSpec specifies health probes for self-sharding and BackendConfig. // ProbeSpec specifies health probes for self-sharding and BackendConfig.
......
...@@ -111,24 +111,6 @@ sub vcl_init { ...@@ -111,24 +111,6 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
{{ range $ridx, $rule := .Rules -}} {{ range $ridx, $rule := .Rules -}}
{{ if $rule.PrimaryOnly -}} {{ if $rule.PrimaryOnly -}}
{{ if $rule.Conditions -}} {{ if $rule.Conditions -}}
...@@ -149,14 +131,8 @@ sub vcl_recv { ...@@ -149,14 +131,8 @@ sub vcl_recv {
{{ end -}} {{ end -}}
{{- digest_update 'c' $rule }} {{- digest_update 'c' $rule }}
{{- if isCookieKey $rule }} {{- if isCookieKey $rule }}
if (!vk8s_shard_cookie_{{$ridx}}.match(req.http.Cookie)) { set req.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- if hasDefaultKey $rule }} if (vk8s_shard_cookie_{{$ridx}}.match(req.http.Cookie)) {
set req.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- else }}
return (synth(400));
{{- end }}
}
else {
set req.http.VK8S-Shard-Key set req.http.VK8S-Shard-Key
= vk8s_shard_cookie_{{$ridx}}.backref(1); = vk8s_shard_cookie_{{$ridx}}.backref(1);
} }
...@@ -168,9 +144,28 @@ sub vcl_recv { ...@@ -168,9 +144,28 @@ sub vcl_recv {
set req.backend_hint = vk8s_cluster_primary.get(); set req.backend_hint = vk8s_cluster_primary.get();
return (pipe); return (pipe);
} }
{{ if $rule.Conditions -}}}{{ end -}} {{ if $rule.Conditions -}}} else
{{ end -}} {{ end -}}
{{ end -}} {{ end -}}
{{ end -}}
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
} }
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
...@@ -196,14 +191,8 @@ sub vk8s_cluster_fetch { ...@@ -196,14 +191,8 @@ sub vk8s_cluster_fetch {
{{ closeIf $rule }} {{ closeIf $rule }}
{{- digest_update 'b' $rule }} {{- digest_update 'b' $rule }}
{{- if isCookieKey $rule }} {{- if isCookieKey $rule }}
if (!vk8s_shard_cookie_{{$ridx}}.match(bereq.http.Cookie)) { set bereq.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- if hasDefaultKey $rule }} if (vk8s_shard_cookie_{{$ridx}}.match(bereq.http.Cookie)) {
set bereq.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- else }}
return (error(400));
{{- end }}
}
else {
set bereq.http.VK8S-Shard-Key set bereq.http.VK8S-Shard-Key
= vk8s_shard_cookie_{{$ridx}}.backref(1); = vk8s_shard_cookie_{{$ridx}}.backref(1);
} }
...@@ -296,10 +285,6 @@ func isCookieKey(rule ShardRule) bool { ...@@ -296,10 +285,6 @@ func isCookieKey(rule ShardRule) bool {
return rule.By == Cookie return rule.By == Cookie
} }
func hasDefaultKey(rule ShardRule) bool {
return rule.DefaultKey != nil
}
func digestInit(rule ShardRule) string { func digestInit(rule ShardRule) string {
if rule.By != Blob { if rule.By != Blob {
return "" return ""
...@@ -357,7 +342,6 @@ const selfShardName = "self-sharding" ...@@ -357,7 +342,6 @@ const selfShardName = "self-sharding"
var shardFuncMap = template.FuncMap{ var shardFuncMap = template.FuncMap{
"key": keyParams, "key": keyParams,
"isCookieKey": isCookieKey, "isCookieKey": isCookieKey,
"hasDefaultKey": hasDefaultKey,
"digest_init": digestInit, "digest_init": digestInit,
"digest_update": digestUpdate, "digest_update": digestUpdate,
"hasPrimary": hasPrimary, "hasPrimary": hasPrimary,
......
...@@ -153,12 +153,11 @@ func TestShardByCookie(t *testing.T) { ...@@ -153,12 +153,11 @@ func TestShardByCookie(t *testing.T) {
} }
func TestShardByCookieDefault(t *testing.T) { func TestShardByCookieDefault(t *testing.T) {
defaultKey := "defaultKey"
varnishCluster.Rules = []ShardRule{{ varnishCluster.Rules = []ShardRule{{
PrimaryOnly: true, PrimaryOnly: true,
By: Cookie, By: Cookie,
Key: "bazquux", Key: "bazquux",
DefaultKey: &defaultKey, DefaultKey: "defaultKey",
Conditions: []Condition{}, Conditions: []Condition{},
}} }}
templateTest(t, shardTmpl, varnishCluster, templateTest(t, shardTmpl, varnishCluster,
......
...@@ -432,7 +432,7 @@ const ( ...@@ -432,7 +432,7 @@ const (
// conditions under which the configuration holds. // conditions under which the configuration holds.
type ShardRule struct { type ShardRule struct {
Conditions []Condition Conditions []Condition
DefaultKey *string DefaultKey string
Key string Key string
By KeyBy By KeyBy
Algo HashAlgo Algo HashAlgo
...@@ -461,6 +461,7 @@ func (shard ShardCluster) hash(hash hash.Hash) { ...@@ -461,6 +461,7 @@ func (shard ShardCluster) hash(hash hash.Hash) {
cond.hash(hash) cond.hash(hash)
} }
hash.Write([]byte(rule.Key)) hash.Write([]byte(rule.Key))
hash.Write([]byte(rule.DefaultKey))
hash.Write([]byte{byte(rule.By)}) hash.Write([]byte{byte(rule.By)})
hash.Write([]byte{byte(rule.Algo)}) hash.Write([]byte{byte(rule.Algo)})
if rule.PrimaryOnly { if rule.PrimaryOnly {
......
...@@ -152,7 +152,7 @@ sub vcl_recv { ...@@ -152,7 +152,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
} }
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -55,6 +55,13 @@ sub vcl_init { ...@@ -55,6 +55,13 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -73,14 +80,7 @@ sub vcl_recv { ...@@ -73,14 +80,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -73,7 +73,7 @@ sub vcl_recv { ...@@ -73,7 +73,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
} }
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -59,6 +59,20 @@ sub vcl_init { ...@@ -59,6 +59,20 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
if (vk8s_selfshard_cond_0_0.hasprefix(req.url)) {
set req.http.VK8S-Shard-Key = "";
if (vk8s_shard_cookie_0.match(req.http.Cookie)) {
set req.http.VK8S-Shard-Key
= vk8s_shard_cookie_0.backref(1);
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.VK8S-Shard-Key)));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
} else
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -77,22 +91,7 @@ sub vcl_recv { ...@@ -77,22 +91,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
if (vk8s_selfshard_cond_0_0.hasprefix(req.url)) { }
if (!vk8s_shard_cookie_0.match(req.http.Cookie)) {
return (synth(400));
}
else {
set req.http.VK8S-Shard-Key
= vk8s_shard_cookie_0.backref(1);
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.VK8S-Shard-Key)));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
import std;
import directors;
import blob;
import blobdigest;
import taskvar;
import re2;
import selector;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster"
"Connection: close";
.timeout = 2s;
.interval = 5s;
.initial = 2;
.window = 8;
.threshold = 3;
}
backend vk8s_default_varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend vk8s__ {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend vk8s_default_varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster_param = directors.shard_param();
new vk8s_cluster = directors.shard();
vk8s_cluster.associate(vk8s_cluster_param.use());
vk8s_cluster.add_backend(vk8s_default_varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(vk8s__);
vk8s_cluster.add_backend(vk8s_default_varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
new vk8s_cluster_forward = taskvar.bool();
new vk8s_cluster_primary = taskvar.backend();
new vk8s_shard_cookie_0 =
re2.regex("\bbazquux\s*=\s*([^,;[:space:]]+)");
}
sub vcl_recv {
set req.http.VK8S-Shard-Key = "defaultKey";
if (vk8s_shard_cookie_0.match(req.http.Cookie)) {
set req.http.VK8S-Shard-Key
= vk8s_shard_cookie_0.backref(1);
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.VK8S-Shard-Key)));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vk8s_cluster_fetch {
if (bereq.retries > 0
|| bereq.uncacheable
|| remote.ip ~ vk8s_cluster_acl
|| "" + vk8s_cluster.backend(resolve=NOW) == server.identity) {
return;
}
if (vk8s_cluster_forward.get(fallback=false)) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY);
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_fetch {
call vk8s_cluster_fetch;
}
sub vcl_backend_response {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
return (deliver);
}
}
sub vcl_deliver {
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl && ! vk8s_cluster_primary.defined()) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
...@@ -56,6 +56,14 @@ sub vcl_init { ...@@ -56,6 +56,14 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
vk8s_shard_digest.update(blob.decode(encoded=req.http.Host));
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -74,15 +82,7 @@ sub vcl_recv { ...@@ -74,15 +82,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
}
vk8s_shard_digest.update(blob.decode(encoded=req.http.Host));
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -55,6 +55,13 @@ sub vcl_init { ...@@ -55,6 +55,13 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.Host)));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -73,14 +80,7 @@ sub vcl_recv { ...@@ -73,14 +80,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.Host)));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -55,6 +55,13 @@ sub vcl_init { ...@@ -55,6 +55,13 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=URL));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -73,14 +80,7 @@ sub vcl_recv { ...@@ -73,14 +80,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
}
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=URL));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -58,6 +58,16 @@ sub vcl_init { ...@@ -58,6 +58,16 @@ sub vcl_init {
} }
sub vcl_recv { sub vcl_recv {
if (vk8s_selfshard_cond_0_0.hasprefix(req.url)) {
vk8s_shard_digest.update(blob.decode(encoded=req.http.Host));
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
} else
if (remote.ip ~ vk8s_cluster_acl) { if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") { if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") { if (req.url == "/vk8s_cluster_health") {
...@@ -76,16 +86,7 @@ sub vcl_recv { ...@@ -76,16 +86,7 @@ sub vcl_recv {
return (hash); return (hash);
} }
if (vk8s_selfshard_cond_0_0.hasprefix(req.url)) { }
vk8s_shard_digest.update(blob.decode(encoded=req.http.Host));
vk8s_cluster_primary.set(vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster_primary.get() != server.identity) {
set req.backend_hint = vk8s_cluster_primary.get();
return (pipe);
}
}}
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
if (bereq.retries > 0 if (bereq.retries > 0
......
...@@ -87,9 +87,6 @@ make EXAMPLE=shard-by-key deploy verify undeploy ...@@ -87,9 +87,6 @@ make EXAMPLE=shard-by-key deploy verify undeploy
echo Self-sharding by cookie example echo Self-sharding by cookie example
make EXAMPLE=shard-by-cookie deploy verify-cookie undeploy make EXAMPLE=shard-by-cookie deploy verify-cookie undeploy
echo Self-sharding by cookie with default key example
make EXAMPLE=shard-by-cookie-default deploy verify-cookie-default undeploy
echo Primary-only self-sharding by client.identity as key echo Primary-only self-sharding by client.identity as key
make EXAMPLE=primary-only-by-clientid deploy verify undeploy make EXAMPLE=primary-only-by-clientid deploy verify undeploy
......
...@@ -664,24 +664,6 @@ templates: ...@@ -664,24 +664,6 @@ templates:
} }
sub vcl_recv { sub vcl_recv {
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
{{ range $ridx, $rule := .Rules -}} {{ range $ridx, $rule := .Rules -}}
{{ if $rule.PrimaryOnly -}} {{ if $rule.PrimaryOnly -}}
{{ if $rule.Conditions -}} {{ if $rule.Conditions -}}
...@@ -702,14 +684,8 @@ templates: ...@@ -702,14 +684,8 @@ templates:
{{ end -}} {{ end -}}
{{- digest_update 'c' $rule }} {{- digest_update 'c' $rule }}
{{- if isCookieKey $rule }} {{- if isCookieKey $rule }}
if (!vk8s_shard_cookie_{{$ridx}}.match(req.http.Cookie)) { set req.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- if hasDefaultKey $rule }} if (vk8s_shard_cookie_{{$ridx}}.match(req.http.Cookie)) {
set req.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- else }}
return (synth(400));
{{- end }}
}
else {
set req.http.VK8S-Shard-Key set req.http.VK8S-Shard-Key
= vk8s_shard_cookie_{{$ridx}}.backref(1); = vk8s_shard_cookie_{{$ridx}}.backref(1);
} }
...@@ -721,9 +697,28 @@ templates: ...@@ -721,9 +697,28 @@ templates:
set req.backend_hint = vk8s_cluster_primary.get(); set req.backend_hint = vk8s_cluster_primary.get();
return (pipe); return (pipe);
} }
{{ if $rule.Conditions -}}}{{ end -}} {{ if $rule.Conditions -}}} else
{{ end -}}
{{ end -}} {{ end -}}
{{ end -}} {{ end -}}
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
} }
sub vk8s_cluster_fetch { sub vk8s_cluster_fetch {
...@@ -749,14 +744,8 @@ templates: ...@@ -749,14 +744,8 @@ templates:
{{ closeIf $rule }} {{ closeIf $rule }}
{{- digest_update 'b' $rule }} {{- digest_update 'b' $rule }}
{{- if isCookieKey $rule }} {{- if isCookieKey $rule }}
if (!vk8s_shard_cookie_{{$ridx}}.match(bereq.http.Cookie)) { set bereq.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- if hasDefaultKey $rule }} if (vk8s_shard_cookie_{{$ridx}}.match(bereq.http.Cookie)) {
set bereq.http.VK8S-Shard-Key = "{{ $rule.DefaultKey }}";
{{- else }}
return (error(400));
{{- end }}
}
else {
set bereq.http.VK8S-Shard-Key set bereq.http.VK8S-Shard-Key
= vk8s_shard_cookie_{{$ridx}}.backref(1); = vk8s_shard_cookie_{{$ridx}}.backref(1);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment