Commit 1f1961e2 authored by Nils Goroll's avatar Nils Goroll

add an example vcl

parent 5888647d
#!/bin/bash
cat <<'EOF'
vshard example setup
--------------------
this script will demo a cluster/sharding setup on linux.
It will reconfigure three addresses 127.0.1.{1-3} on the loopback
interface lo and require sudo privileges to do so.
Then three varnish instances in a cluster will be started with
varnish-cache.org as their backend on 127.0.1.{1-3}:8080
Requirements:
* varnish installed
* this vmod installed
* constant vmod from https://code.uplex.de/uplex-varnish/varnish-objvar
installed
* PATH contains directories to find varnishd and varnishadm
Press [enter] to continue...
EOF
read foo
typeset -r base="$(realpath $(dirname $0))"
typeset -ra cmds=(
varnishadm
varnishd
)
typeset -a pids=()
for cmd in "${cmds[@]}" ; do
if ! type "${cmd}" ; then
echo >&2 Required command not found in PATH
exit 1
fi
done
set -eux
exit_handler() {
set +e
local -i i
kill "${pids[@]}"
for((i=1; i<=3; i++)) ; do
sudo ifconfig lo:$i down
done
}
trap 'exit_handler $?' EXIT
typeset -i i
for((i=1; i<=3; i++)) ; do
# nutch the kernel to choose the source ip we want
sudo ifconfig lo:${i} 127.0.1.${i}/32 up
varnishd -F -p vcl_path="${base}" -f "${base}/vshard.example.vcl" \
-a 127.0.1.${i}:8080 -n varnish-${i} -i varnish-${i} &
pids+=($!)
done
sleep 1
cat <<'EOF'
If all went well, the test-setup should be working now.
Things to try:
* in three terminals, watch the varnishds:
varnishlog -n varnish-1 -q 'not Begin ~ "^sess" and not ReqHeader:Host ~^vshard"'
varnishlog -n varnish-2 -q 'not Begin ~ "^sess" and not ReqHeader:Host ~^vshard"'
varnishlog -n varnish-3 -q 'not Begin ~ "^sess" and not ReqHeader:Host ~^vshard"'
* send requests
curl -I -H 'Host: varnish-cache.org' 127.0.1.1:8080
-> shard master is varnish-3
It should cache for whatever time remainting from max-age - age
-> varnish-1 should forward to varnish-3
It should cache for max 5 minutes (TTL VCL ... showing Age + 5m)
curl -I -H 'Host: varnish-cache.org' 127.0.1.1:8080/_static/varnish-bunny.png
-> shard master is varnish-1, so same as the above, but if the request
is repeated to varnish-3 / 127.0.1.3:8080, it should forward it to
varnish-1
Press [enter] to stop test setup...
EOF
read foo
# varnish server sharding
vcl 4.1;
import cluster;
import directors;
import std;
# https://code.uplex.de/uplex-varnish/varnish-objvar
import constant;
backend default {
.host = "www.varnish-cache.org";
.port = "80";
}
# TODO:
#
# * names (e.g. varnish-1) must be identical to the actual
# server.identity or the use of server.identity below must be
# replaced with the actual varnish name
#
# * the probe should be kept unless you know what you are doing
#
# * change IPs
include "vshard_probe.inc.vcl";
backend varnish-1 {
.host = "127.0.1.1";
.port = "8080";
.probe = probe_vshard;
}
backend varnish-2 {
.host = "127.0.1.2";
.port = "8080";
.probe = probe_vshard;
}
backend varnish-3 {
.host = "127.0.1.3";
.port = "8080";
.probe = probe_vshard;
}
# TODO: add more servers
# TODO: same IPs as in backends above
acl acl_vshard {
"127.0.1.1"/32;
"127.0.1.2"/32;
"127.0.1.3"/32;
}
sub vcl_init {
new vshard = directors.shard();
# TODO: add all servers defined above
vshard.add_backend(varnish-1);
vshard.add_backend(varnish-2);
vshard.add_backend(varnish-3);
vshard.reconfigure();
new vcluster = cluster.cluster(vshard.backend());
vcluster.deny(directors.lookup(server.identity));
# only the cluster master has the full ttl to improve total
# cache memory scalability
#
# choose your secondary server TTL
#
# for use without the constant vmod, replace cluster_ttl.get()
# in vshard.inc.vcl (2x)
new vcluster_ttl = constant.duration(5m);
}
include "vshard.inc.vcl";
# return (synth(404)) here for manual control as in the example
#
# the sub must be defined for vshard.inc.vcl even if empty
sub vshard_recv_healthcheck_override {
# if (! std.file_exists("/tmp/varnish_online")) {
# return (synth(404));
#}
}
sub vcl_recv {
# FIRST
call vshard_recv;
# then any additional processing for clients,
# for example...
set req.http.X-Real-IP = client.ip;
}
sub vcl_deliver {
# FIRST
call vshard_deliver;
# then any additional processing for clients
}
sub vcl_backend_fetch {
# FIRST
call vshard_backend_fetch;
# the processing when talking to a real backend
}
sub vcl_backend_response {
# FIRST
call vshard_backend_response;
# the processing when talking to a real backend
}
# varnish server sharding - example
# Cluster rules:
#
# within the cluster, we only want minimal processing, so we want to
# process each vcl sub's user/business logic only at one place
#
# * vcl_recv: facing the client
# * vcl_deliver: facing the client
#
# -> iow no user logic when our remote.ip is a varnish
#
# * vcl_backend_fetch: on the primary cluster member
# * vcl_backend_response: on the primary cluster member
#
# -> iow no user logic when our backend is a varnish
sub vshard_recv {
if (remote.ip ~ acl_vshard) {
if (req.http.Host == "vshard") {
if (req.url == "/cluster_health") {
call vshard_recv_healthcheck_override;
return (synth(200));
}
return (synth(404));
}
# if we're async, don't deliver stale
if (req.http.X-Cluster-bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vshard_backend_fetch {
unset bereq.http.X-Cluster-bgfetch;
if (vcluster.cluster_selected(
real = bereq.backend,
direct = bereq.retries > 0 || remote.ip ~ acl_vshard)) {
set bereq.http.X-Cluster-bgfetch = bereq.is_bgfetch;
return (fetch);
}
# bereq.backend == vcluster.get_real() || bereq.backend == NULL
}
sub vshard_backend_response {
# bereq.http.X-Cluster-bgfetch is only set if this is a
# cluster request
if (bereq.http.X-Cluster-bgfetch) {
if (beresp.http.X-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.X-Cluster-TTL + "s", 1s);
if (beresp.ttl > vcluster_ttl.get()) {
set beresp.ttl = vcluster_ttl.get();
}
unset beresp.http.X-Cluster-TTL;
} else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vshard_deliver {
# irrespective of cache-control headers, communicate the ttl to the
# cluster upstream and return without any additional processing.
#
# no header = uncacheable
#
# ordinary vcl_deliver is only called facing the real client
unset resp.http.X-Cluster-TTL;
if (remote.ip ~ acl_vshard && req.http.X-Cluster-bgfetch) {
if (! obj.uncacheable) {
set resp.http.X-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
probe probe_vshard {
.request = "HEAD /cluster_health HTTP/1.1"
"Connection: close"
"Host: vshard";
.interval = 1s;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment