Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
k8s-ingress
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
3
Merge Requests
3
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Commits
Issue Boards
Open sidebar
uplex-varnish
k8s-ingress
Commits
c32047c2
Commit
c32047c2
authored
Oct 13, 2020
by
Geoff Simmons
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add VCL templating to set the key for the self-sharding.
parent
0460d430
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
751 additions
and
98 deletions
+751
-98
self_shard_tmpl.go
pkg/varnish/vcl/self_shard_tmpl.go
+64
-8
shard_test.go
pkg/varnish/vcl/shard_test.go
+196
-0
spec.go
pkg/varnish/vcl/spec.go
+77
-0
ingress_shard.golden
pkg/varnish/vcl/testdata/ingress_shard.golden
+2
-0
primaryonly_shard.golden
pkg/varnish/vcl/testdata/primaryonly_shard.golden
+2
-0
shard.golden
pkg/varnish/vcl/testdata/shard.golden
+2
-0
shard_by_digest.golden
pkg/varnish/vcl/testdata/shard_by_digest.golden
+138
-0
shard_by_key.golden
pkg/varnish/vcl/testdata/shard_by_key.golden
+135
-0
shard_by_url.golden
pkg/varnish/vcl/testdata/shard_by_url.golden
+135
-0
vcl_test.go
pkg/varnish/vcl/vcl_test.go
+0
-90
No files found.
pkg/varnish/vcl/self_shard_tmpl.go
View file @
c32047c2
...
...
@@ -28,11 +28,16 @@
package
vcl
import
"text/template"
import
(
"regexp"
"text/template"
)
const
selfShardTmplSrc
=
`
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
...
...
@@ -76,15 +81,19 @@ sub vcl_init {
vk8s_cluster.add_backend({{$node.Name}});
{{end -}}
vk8s_cluster.reconfigure();
{{- digest_init . }}
}
sub vcl_recv {
{{ if .PrimaryOnly -}}
unset req.http.VK8S-Shard-Primary-Only;
{{- digest_update 'c' . }}
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW) != server.identity) {
&& "" + vk8s_cluster.backend(resolve=NOW
{{- key 'c' .}}) != server.identity) {
set req.http.VK8S-Shard-Primary-Only = "true";
set req.backend_hint = vk8s_cluster.backend(resolve=NOW);
set req.backend_hint = vk8s_cluster.backend(resolve=NOW
{{- key 'c' .}});
return (pass);
}
else {{ end }}if (remote.ip ~ vk8s_cluster_acl) {
...
...
@@ -119,11 +128,14 @@ sub vcl_backend_fetch {
return (fetch);
}
{{- end }}
{{- digest_update 'b' . }}
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY);
&& "" + vk8s_cluster.backend(resolve=NOW
{{- key 'b' .}}) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY
{{- key 'b' . }});
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
...
...
@@ -135,7 +147,7 @@ sub vcl_backend_response {
return (deliver);
}
{{- end }}
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY
{{- key 'b' .}}
)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
...
...
@@ -152,7 +164,7 @@ sub vcl_backend_response {
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY
{{- key 'b' .}}
)) {
return (deliver);
}
}
...
...
@@ -173,7 +185,51 @@ sub vcl_deliver {
}
`
var
reqMatch
=
regexp
.
MustCompile
(
"^req"
)
func
context
(
ctx
rune
,
key
string
)
string
{
if
ctx
==
'c'
{
return
key
}
return
reqMatch
.
ReplaceAllLiteralString
(
key
,
"bereq"
)
}
func
keyParams
(
ctx
rune
,
shard
ShardCluster
)
string
{
switch
shard
.
By
{
case
ByHash
:
return
""
case
URL
:
return
", by=URL"
case
Key
:
return
", by=KEY, key=vk8s_cluster.key("
+
context
(
ctx
,
shard
.
Key
)
+
")"
}
return
", by=BLOB, key_blob=vk8s_shard_digest.final()"
}
func
digestInit
(
shard
ShardCluster
)
string
{
if
shard
.
By
!=
Blob
{
return
""
}
return
"
\n\t
new vk8s_shard_digest = blobdigest.digest("
+
shard
.
Algo
.
String
()
+
");"
}
func
digestUpdate
(
ctx
rune
,
shard
ShardCluster
)
string
{
if
shard
.
By
!=
Blob
{
return
""
}
return
"
\n\t
vk8s_shard_digest.update(blob.decode(encoded="
+
context
(
ctx
,
shard
.
Key
)
+
"));"
}
const
selfShardName
=
"self-sharding"
var
shardTmpl
=
template
.
Must
(
template
.
New
(
selfShardName
)
.
var
shardFuncMap
=
template
.
FuncMap
{
"key"
:
keyParams
,
"digest_init"
:
digestInit
,
"digest_update"
:
digestUpdate
,
}
var
shardTmpl
=
template
.
Must
(
template
.
New
(
selfShardName
)
.
Funcs
(
shardFuncMap
)
.
Parse
(
selfShardTmplSrc
))
pkg/varnish/vcl/shard_test.go
0 → 100644
View file @
c32047c2
/*
* Copyright (c) 2018 UPLEX Nils Goroll Systemoptimierung
* All rights reserved
*
* Author: Geoffrey Simmons <geoffrey.simmons@uplex.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
package
vcl
import
(
"bytes"
"testing"
)
var
varnishCluster
=
ShardCluster
{
Nodes
:
[]
Service
{
Service
{
Name
:
"varnish-8445d4f7f-z2b9p"
,
Addresses
:
[]
Address
{{
"172.17.0.12"
,
80
}},
},
Service
{
Name
:
"varnish-8445d4f7f-k22dn"
,
Addresses
:
[]
Address
{{
"172.17.0.13"
,
80
}},
},
Service
{
Name
:
"varnish-8445d4f7f-ldljf"
,
Addresses
:
[]
Address
{{
"172.17.0.14"
,
80
}},
},
},
Probe
:
Probe
{
Timeout
:
"2s"
,
Interval
:
"5s"
,
Initial
:
"2"
,
Window
:
"8"
,
Threshold
:
"3"
,
},
MaxSecondaryTTL
:
"5m"
,
}
func
TestShardTemplate
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"shard.golden"
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for self-sharding does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestPrimaryOnlyShardTemplate
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"primaryonly_shard.golden"
varnishCluster
.
PrimaryOnly
=
true
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for primary-only self-sharding does "
+
"not match gold file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestGetSrc
(
t
*
testing
.
T
)
{
gold
:=
"ingress_shard.golden"
cafeSpec
.
ShardCluster
=
varnishCluster
cafeSpec
.
ShardCluster
.
PrimaryOnly
=
false
src
,
err
:=
cafeSpec
.
GetSrc
()
if
err
!=
nil
{
t
.
Error
(
"Spec.GetSrc():"
,
err
)
return
}
ok
,
err
:=
cmpGold
([]
byte
(
src
),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL from GetSrc() does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
src
)
}
}
}
func
TestShardByURL
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"shard_by_url.golden"
varnishCluster
.
PrimaryOnly
=
true
varnishCluster
.
By
=
URL
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for self-sharding does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestShardByKey
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"shard_by_key.golden"
varnishCluster
.
PrimaryOnly
=
true
varnishCluster
.
By
=
Key
varnishCluster
.
Key
=
"req.http.Host"
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for self-sharding does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestShardByDigest
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"shard_by_digest.golden"
varnishCluster
.
PrimaryOnly
=
true
varnishCluster
.
By
=
Blob
varnishCluster
.
Key
=
"req.http.Host"
varnishCluster
.
Algo
=
Sha3_512
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for self-sharding does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
pkg/varnish/vcl/spec.go
View file @
c32047c2
...
...
@@ -252,12 +252,86 @@ func (probe Probe) hash(hash hash.Hash) {
hash
.
Write
([]
byte
(
probe
.
Threshold
))
}
// HashAlgo identifies digest algorithms.
type
HashAlgo
uint8
const
(
// Crc32 checksum
Crc32
HashAlgo
=
iota
// ICrc32 CRC32 with inverted initialization and result
ICrc32
// MD5 message digest
MD5
// RS Sedgewick's hash
RS
// Sha1 secure hash
Sha1
// Sha224 secure hash
Sha224
// Sha384 secure hash
Sha384
// Sha512 secure hash
Sha512
// Sha3_224 secure hash
Sha3_224
// Sha3_256 secure hash
Sha3_256
// Sha3_512 secure hash
Sha3_512
)
func
(
algo
HashAlgo
)
String
()
string
{
switch
algo
{
case
Crc32
:
return
"CRC32"
case
ICrc32
:
return
"ICRC32"
case
MD5
:
return
"MD5"
case
RS
:
return
"RS"
case
Sha1
:
return
"SHA1"
case
Sha224
:
return
"SHA224"
case
Sha384
:
return
"SHA384"
case
Sha512
:
return
"SHA512"
case
Sha3_224
:
return
"SHA3_224"
case
Sha3_256
:
return
"SHA3_256"
case
Sha3_512
:
return
"SHA3_512"
default
:
return
"**ILLEGAL HASH ENUM***"
}
}
// KeyBy classifies the shard key methid
type
KeyBy
uint8
const
(
// ByHash is the default (shard director by=HASH)
ByHash
KeyBy
=
iota
// URL by=URL
URL
// Key by=KEY
Key
// Blob by=BLOB
Blob
)
// ShardCluster represents the configuration for self-sharding derived
// from the VarnishConfig Custom Resource.
type
ShardCluster
struct
{
Nodes
[]
Service
Probe
Probe
MaxSecondaryTTL
string
Key
string
By
KeyBy
Algo
HashAlgo
PrimaryOnly
bool
}
...
...
@@ -270,6 +344,9 @@ func (shard ShardCluster) hash(hash hash.Hash) {
if
shard
.
PrimaryOnly
{
hash
.
Write
([]
byte
{
1
})
}
hash
.
Write
([]
byte
(
shard
.
Key
))
hash
.
Write
([]
byte
{
byte
(
shard
.
By
)})
hash
.
Write
([]
byte
{
byte
(
shard
.
Algo
)})
}
// AuthStatus is the response code to be sent for authentication
...
...
pkg/varnish/vcl/testdata/ingress_shard.golden
View file @
c32047c2
...
...
@@ -103,6 +103,8 @@ sub vcl_hit {
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
...
...
pkg/varnish/vcl/testdata/primaryonly_shard.golden
View file @
c32047c2
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
...
...
pkg/varnish/vcl/testdata/shard.golden
View file @
c32047c2
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
...
...
pkg/varnish/vcl/testdata/shard_by_digest.golden
0 → 100644
View file @
c32047c2
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster"
"Connection: close";
.timeout = 2s;
.interval = 5s;
.initial = 2;
.window = 8;
.threshold = 3;
}
backend varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-k22dn {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster = directors.shard();
vk8s_cluster.add_backend(varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(varnish-8445d4f7f-k22dn);
vk8s_cluster.add_backend(varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
new vk8s_shard_digest = blobdigest.digest(SHA3_512);
}
sub vcl_recv {
unset req.http.VK8S-Shard-Primary-Only;
vk8s_shard_digest.update(blob.decode(encoded=req.http.Host));
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()) != server.identity) {
set req.http.VK8S-Shard-Primary-Only = "true";
set req.backend_hint = vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final());
return (pass);
}
else if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vcl_pass {
if (req.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
}
sub vcl_backend_fetch {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
vk8s_shard_digest.update(blob.decode(encoded=bereq.http.Host));
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=BLOB, key_blob=vk8s_shard_digest.final()) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY, by=BLOB, key_blob=vk8s_shard_digest.final());
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=BLOB, key_blob=vk8s_shard_digest.final())) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=BLOB, key_blob=vk8s_shard_digest.final())) {
return (deliver);
}
}
sub vcl_deliver {
if (req.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
pkg/varnish/vcl/testdata/shard_by_key.golden
0 → 100644
View file @
c32047c2
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster"
"Connection: close";
.timeout = 2s;
.interval = 5s;
.initial = 2;
.window = 8;
.threshold = 3;
}
backend varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-k22dn {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster = directors.shard();
vk8s_cluster.add_backend(varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(varnish-8445d4f7f-k22dn);
vk8s_cluster.add_backend(varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
}
sub vcl_recv {
unset req.http.VK8S-Shard-Primary-Only;
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.Host)) != server.identity) {
set req.http.VK8S-Shard-Primary-Only = "true";
set req.backend_hint = vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(req.http.Host));
return (pass);
}
else if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vcl_pass {
if (req.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
}
sub vcl_backend_fetch {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=KEY, key=vk8s_cluster.key(bereq.http.Host)) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY, by=KEY, key=vk8s_cluster.key(bereq.http.Host));
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=KEY, key=vk8s_cluster.key(bereq.http.Host))) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=KEY, key=vk8s_cluster.key(bereq.http.Host))) {
return (deliver);
}
}
sub vcl_deliver {
if (req.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
pkg/varnish/vcl/testdata/shard_by_url.golden
0 → 100644
View file @
c32047c2
import std;
import directors;
import blob;
import blobdigest;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster"
"Connection: close";
.timeout = 2s;
.interval = 5s;
.initial = 2;
.window = 8;
.threshold = 3;
}
backend varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-k22dn {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster = directors.shard();
vk8s_cluster.add_backend(varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(varnish-8445d4f7f-k22dn);
vk8s_cluster.add_backend(varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
}
sub vcl_recv {
unset req.http.VK8S-Shard-Primary-Only;
if (remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=URL) != server.identity) {
set req.http.VK8S-Shard-Primary-Only = "true";
set req.backend_hint = vk8s_cluster.backend(resolve=NOW, by=URL);
return (pass);
}
else if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vcl_pass {
if (req.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
}
sub vcl_backend_fetch {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (fetch);
}
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW, by=URL) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=LAZY, by=URL);
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=URL)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY, by=URL)) {
return (deliver);
}
}
sub vcl_deliver {
if (req.http.VK8S-Shard-Primary-Only) {
return (deliver);
}
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
pkg/varnish/vcl/vcl_test.go
View file @
c32047c2
...
...
@@ -110,96 +110,6 @@ func TestIngressTemplate(t *testing.T) {
}
}
var
varnishCluster
=
ShardCluster
{
Nodes
:
[]
Service
{
Service
{
Name
:
"varnish-8445d4f7f-z2b9p"
,
Addresses
:
[]
Address
{{
"172.17.0.12"
,
80
}},
},
Service
{
Name
:
"varnish-8445d4f7f-k22dn"
,
Addresses
:
[]
Address
{{
"172.17.0.13"
,
80
}},
},
Service
{
Name
:
"varnish-8445d4f7f-ldljf"
,
Addresses
:
[]
Address
{{
"172.17.0.14"
,
80
}},
},
},
Probe
:
Probe
{
Timeout
:
"2s"
,
Interval
:
"5s"
,
Initial
:
"2"
,
Window
:
"8"
,
Threshold
:
"3"
,
},
MaxSecondaryTTL
:
"5m"
,
}
func
TestShardTemplate
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"shard.golden"
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for self-sharding does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestPrimaryOnlyShardTemplate
(
t
*
testing
.
T
)
{
var
buf
bytes
.
Buffer
gold
:=
"primaryonly_shard.golden"
varnishCluster
.
PrimaryOnly
=
true
if
err
:=
shardTmpl
.
Execute
(
&
buf
,
varnishCluster
);
err
!=
nil
{
t
.
Error
(
"cluster template Execute():"
,
err
)
return
}
ok
,
err
:=
cmpGold
(
buf
.
Bytes
(),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL for primary-only self-sharding does "
+
"not match gold file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
buf
.
String
())
}
}
}
func
TestGetSrc
(
t
*
testing
.
T
)
{
gold
:=
"ingress_shard.golden"
cafeSpec
.
ShardCluster
=
varnishCluster
cafeSpec
.
ShardCluster
.
PrimaryOnly
=
false
src
,
err
:=
cafeSpec
.
GetSrc
()
if
err
!=
nil
{
t
.
Error
(
"Spec.GetSrc():"
,
err
)
return
}
ok
,
err
:=
cmpGold
([]
byte
(
src
),
gold
)
if
err
!=
nil
{
t
.
Fatalf
(
"Reading %s: %v"
,
gold
,
err
)
}
if
!
ok
{
t
.
Errorf
(
"Generated VCL from GetSrc() does not match gold "
+
"file: %s"
,
gold
)
if
testing
.
Verbose
()
{
t
.
Logf
(
"Generated: %s"
,
src
)
}
}
}
var
auths
=
Spec
{
Auths
:
[]
Auth
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment