Commit 0965a167 authored by Nils Goroll's avatar Nils Goroll

working POC - this is work in progress

parents
# build system
.deps/
.libs/
autom4te.cache/
build-aux/
m4/
*.la
*.lo
*.o
*.tar.gz
Makefile
Makefile.in
aclocal.m4
config.h
config.h.in
config.log
config.status
configure
libtool
stamp-h1
# test suite
*.log
*.trs
# vmodtool
vcc_*_if.[ch]
vmod_*.rst
# man
*.1
*_options.rst
*_synopsis.rst
vmod_*.3
Copyright 2018 UPLEX Nils Goroll Systemoptimierung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
ACLOCAL_AMFLAGS = -I m4 -I @VARNISHAPI_DATAROOTDIR@/aclocal
DISTCHECK_CONFIGURE_FLAGS = RST2MAN=:
SUBDIRS = src
working POC for a director to facilitate cache-clustering.
This is work in progress.
As of 2018-05-09, needs master.patch applied to varnish-cache master
#!/bin/sh
set -e
set -u
WORK_DIR=$(pwd)
ROOT_DIR=$(dirname "$0")
cd "$ROOT_DIR"
if ! command -v libtoolize >/dev/null
then
echo "libtoolize: command not found, falling back to glibtoolize" >&2
alias libtoolize=glibtoolize
fi
mkdir -p m4
aclocal
libtoolize --copy --force
autoheader
automake --add-missing --copy --foreign
autoconf
cd "$WORK_DIR"
"$ROOT_DIR"/configure "$@"
AC_PREREQ([2.68])
AC_INIT([libvmod-cluster], [0.1])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_HEADER([config.h])
AM_INIT_AUTOMAKE([1.12 -Wall -Werror foreign parallel-tests])
AM_SILENT_RULES([yes])
AM_PROG_AR
LT_PREREQ([2.2.6])
LT_INIT([dlopen disable-static])
AC_ARG_WITH([rst2man],
AS_HELP_STRING(
[--with-rst2man=PATH],
[Location of rst2man (auto)]),
[RST2MAN="$withval"],
AC_CHECK_PROGS(RST2MAN, [rst2man rst2man.py], []))
VARNISH_PREREQ([5.2.0])
VARNISH_VMODS([cluster])
AC_CONFIG_FILES([
Makefile
src/Makefile
])
AC_OUTPUT
AS_ECHO("
==== $PACKAGE_STRING ====
varnish: $VARNISH_VERSION
prefix: $prefix
vmoddir: $vmoddir
vcldir: $vcldir
pkgvcldir: $pkgvcldir
compiler: $CC
cflags: $CFLAGS
ldflags: $LDFLAGS
")
diff --git a/bin/varnishd/cache/cache_director.c b/bin/varnishd/cache/cache_director.c
index f5552fe5f..c1c60b895 100644
--- a/bin/varnishd/cache/cache_director.c
+++ b/bin/varnishd/cache/cache_director.c
@@ -76,7 +76,7 @@ VDI_Ahealth(const struct director *d)
/* Resolve director --------------------------------------------------*/
-static VCL_BACKEND
+VCL_BACKEND
VDI_Resolve(VRT_CTX)
{
const struct director *d;
diff --git a/include/vrt.h b/include/vrt.h
index a0c142c52..d7eb04f76 100644
--- a/include/vrt.h
+++ b/include/vrt.h
@@ -461,6 +461,7 @@ VCL_BACKEND VRT_AddDirector(VRT_CTX, const struct vdi_methods *,
void VRT_SetHealth(VCL_BACKEND d, int health);
void VRT_DisableDirector(VCL_BACKEND);
void VRT_DelDirector(VCL_BACKEND *);
+VCL_BACKEND VDI_Resolve(VRT_CTX);
/* Suckaddr related */
int VRT_VSA_GetPtr(const struct suckaddr *sua, const unsigned char ** dst);
AM_CFLAGS = $(VARNISHAPI_CFLAGS)
# Modules
vmod_LTLIBRARIES = \
libvmod_cluster.la
libvmod_cluster_la_LDFLAGS = $(VMOD_LDFLAGS)
libvmod_cluster_la_SOURCES = vmod_cluster.c
nodist_libvmod_cluster_la_SOURCES = \
vcc_cluster_if.c \
vcc_cluster_if.h
@BUILD_VMOD_CLUSTER@
# Test suite
AM_TESTS_ENVIRONMENT = \
PATH="$(abs_builddir):$(VARNISH_TEST_PATH):$(PATH)" \
LD_LIBRARY_PATH="$(VARNISH_LIBRARY_PATH)"
TEST_EXTENSIONS = .vtc
VTC_LOG_COMPILER = varnishtest -v
AM_VTC_LOG_FLAGS = \
-p vcl_path="$(abs_top_srcdir)/vcl" \
-p vmod_path="$(abs_builddir)/.libs:$(vmoddir)"
TESTS = \
vtc/vmod_cluster.vtc
# Documentation
dist_doc_DATA = \
vmod_cluster.vcc \
$(TESTS)
dist_man_MANS = \
vmod_cluster.3
.rst.1:
$(AM_V_GEN) $(RST2MAN) $< $@
/*-
* Copyright 2018 UPLEX - Nils Goroll Systemoptimierung
* All rights reserved
*
* Author: Nils Goroll <nils.goroll@uplex.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "config.h"
#include <stdlib.h>
#include <string.h>
#include <cache/cache.h>
#include <vcl.h>
#include <vrt_obj.h>
#include "vcc_cluster_if.h"
struct vmod_cluster_cluster_param {
unsigned magic;
#define VMOD_CLUSTER_CLUSTER_PARAM_MAGIC 0x3ba2a0d5
VCL_BOOL uncacheable_direct;
VCL_BACKEND cluster;
VCL_BACKEND real;
int nblack;
int spcblack;
VCL_BACKEND blacklist[];
};
struct vmod_cluster_cluster {
unsigned magic;
#define VMOD_CLUSTER_CLUSTER_MAGIC 0x4e25630b
VCL_BACKEND dir;
const struct vmod_cluster_cluster_param *param;
};
static VCL_BACKEND vmod_cluster_resolve(VRT_CTX, VCL_BACKEND);
static VCL_BOOL vmod_cluster_healthy(VRT_CTX, VCL_BACKEND, VCL_TIME *);
static const struct vdi_methods vmod_cluster_methods[1] = {{
.magic = VDI_METHODS_MAGIC,
.type = "cluster",
.resolve = vmod_cluster_resolve,
.healthy = vmod_cluster_healthy,
}};
#define param_sz(p, spc) (sizeof(*(p)) + (spc) * sizeof(*(p)->blacklist))
/*
* return the appropriate parameters for the context, writeable
* for nblack == -1: do not create, return NULL if don't exist
*/
static struct vmod_cluster_cluster_param *
cluster_task_param_l(VRT_CTX, struct vmod_cluster_cluster *vc, int nblack)
{
int nspc;
const int nspc_initial = 2;
struct vmod_priv *task;
struct vmod_cluster_cluster_param *p = NULL;
const struct vmod_cluster_cluster_param *o = NULL;
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
CHECK_OBJ(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
assert(ctx->method == 0 ||
ctx->method & (VCL_MET_INIT | VCL_MET_BACKEND_FETCH));
task = VRT_priv_task(ctx, vc);
if (task == NULL) {
VRT_fail(ctx, "no priv_task");
return (NULL);
}
if (task->priv) {
CAST_OBJ_NOTNULL(p, task->priv,
VMOD_CLUSTER_CLUSTER_PARAM_MAGIC);
if (nblack <= p->spcblack)
return (p);
nspc = RUP2(nblack, 2);
o = p;
} else if (nblack == -1) {
return (NULL);
} else if (ctx->method & VCL_MET_INIT) {
nspc = nspc_initial;
} else if (ctx->method & VCL_MET_BACKEND_FETCH) {
o = vc->param;
if (nblack <= o->spcblack)
nspc = o->spcblack;
else
nspc = RUP2(nblack, 2);
} else {
INCOMPL();
}
if (ctx->method & VCL_MET_INIT) {
p = realloc(p, param_sz(p, nspc));
if (o == NULL)
INIT_OBJ(p, VMOD_CLUSTER_CLUSTER_PARAM_MAGIC);
vc->param = p;
} else {
AN(o);
p = WS_Alloc(ctx->ws, param_sz(p, nspc));
if (p == NULL)
return (NULL);
memcpy(p, o, param_sz(o, o->nblack));
}
p->spcblack = nspc;
task->priv = p;
return (p);
}
static const struct vmod_cluster_cluster_param *
cluster_task_param_r(VRT_CTX, struct vmod_cluster_cluster *vc)
{
const struct vmod_cluster_cluster_param *o;
if (ctx->method != 0 &&
(ctx->method & (VCL_MET_INIT | VCL_MET_BACKEND_FETCH)) == 0)
return (vc->param);
o = cluster_task_param_l(ctx, vc, -1);
if (o != NULL)
return (o);
o = vc->param;
AN(o);
return (o);
}
static void
cluster_blacklist_add(struct vmod_cluster_cluster_param *p,
VCL_BACKEND b)
{
CHECK_OBJ_NOTNULL(p, VMOD_CLUSTER_CLUSTER_PARAM_MAGIC);
assert(p->nblack < p->spcblack);
p->blacklist[p->nblack++] = b;
}
static void
cluster_blacklist_del(struct vmod_cluster_cluster_param *p,
VCL_BACKEND b)
{
int i;
CHECK_OBJ_NOTNULL(p, VMOD_CLUSTER_CLUSTER_PARAM_MAGIC);
for (i = 0; i < p->nblack; i++)
if (p->blacklist[i] == b) {
p->nblack--;
if (i < p->nblack)
memmove(&p->blacklist[i],
&p->blacklist[i+1],
(p->nblack - i) * sizeof(*p->blacklist));
return;
}
}
static int
cluster_blacklisted(const struct vmod_cluster_cluster_param *p,
VCL_BACKEND b)
{
int i;
CHECK_OBJ_NOTNULL(p, VMOD_CLUSTER_CLUSTER_PARAM_MAGIC);
for (i = 0; i < p->nblack; i++)
if (p->blacklist[i] == b)
return (1);
return (0);
}
VCL_VOID vmod_cluster__init(VRT_CTX,
struct vmod_cluster_cluster **vcp, const char *vcl_name,
struct vmod_cluster__init_arg *args)
{
struct vmod_cluster_cluster *vc;
struct vmod_cluster_cluster_param *p;
AN(vcp);
AZ(*vcp);
ALLOC_OBJ(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
if (vc == NULL) {
VRT_fail(ctx, "vc alloc failed");
return;
}
AN(vc);
p = cluster_task_param_l(ctx, vc, 0);
if (p == NULL) {
FREE_OBJ(vc);
return;
}
AN(vc->param);
*vcp = vc;
p->uncacheable_direct = args->uncacheable_direct;
p->cluster = args->cluster;
if (args->valid_real)
p->real = args->real;
if (args->valid_deny)
cluster_blacklist_add(p, args->deny);
vc->dir = VRT_AddDirector(ctx, vmod_cluster_methods, vc,
"%s", vcl_name);
}
VCL_VOID
vmod_cluster__fini(struct vmod_cluster_cluster **vcp)
{
struct vmod_cluster_cluster *vc = *vcp;
*vcp = NULL;
if (vc == NULL)
return;
CHECK_OBJ(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
VRT_DelDirector(&vc->dir);
free(TRUST_ME(vc->param));
FREE_OBJ(vc);
}
#define cluster_methods (VCL_MET_INIT | VCL_MET_BACKEND_FETCH)
#define cluster_check(ctx, name) do { \
if ((ctx->method & cluster_methods) == 0) { \
VRT_fail(ctx, \
"cluster." #name " can not be called here"); \
return; \
} \
} while(0)
VCL_VOID vmod_cluster_deny(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BACKEND b)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, deny);
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (cluster_blacklisted(pr, b))
return;
pl = cluster_task_param_l(ctx, vc, pr->nblack + 1);
cluster_blacklist_add(pl, b);
}
VCL_VOID vmod_cluster_allow(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BACKEND b)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, allow);
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (! cluster_blacklisted(pr, b))
return;
pl = cluster_task_param_l(ctx, vc, pr->nblack + 1);
cluster_blacklist_del(pl, b);
}
VCL_VOID vmod_cluster_real(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BACKEND b)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, real);
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (pr->real == b)
return;
pl = cluster_task_param_l(ctx, vc, 0);
pl->real = b;
}
VCL_VOID vmod_cluster_uncacheable_direct(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BOOL direct)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, uncacheable_direct);
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (pr->uncacheable_direct == direct)
return;
pl = cluster_task_param_l(ctx, vc, 0);
pl->uncacheable_direct = direct;
}
// XXX
#define VDI_HACK 1
static VCL_BACKEND
cluster_resolve(VRT_CTX,
const struct vmod_cluster_cluster_param *pr)
{
VCL_BACKEND obe, r;
#ifdef VDI_HACK
struct busyobj dummy[1];
struct vrt_ctx cctx[1];
#endif
if (pr->uncacheable_direct && ctx->bo &&
(ctx->bo->do_pass || ctx->bo->uncacheable))
return (pr->real);
#ifdef VDI_HACK
if (ctx->bo == NULL) {
INIT_OBJ(dummy, BUSYOBJ_MAGIC);
memcpy(cctx, ctx, sizeof *ctx);
cctx->bo = dummy;
ctx = cctx;
}
#endif
obe = VRT_r_bereq_backend(ctx);
AN(pr->cluster);
VRT_l_bereq_backend(ctx, pr->cluster);
r = VDI_Resolve(ctx);
if (cluster_blacklisted(pr, r))
r = pr->real;
VRT_l_bereq_backend(ctx, obe);
return (r);
}
static VCL_BACKEND v_matchproto_(vdi_resolve_f)
vmod_cluster_resolve(VRT_CTX, VCL_BACKEND dir)
{
return (cluster_resolve(ctx,
cluster_task_param_r(ctx, dir->priv)));
}
#define be_task_param_l(pl, pr, ctx, vc, arg) do { \
if ((pl) != NULL) { \
(void)0; \
} else if ((arg)->resolve == vmod_enum_LAZY) { \
(pr) = (pl) = cluster_task_param_l( \
(ctx), (vc), (pr)->nblack + 1); \
} else { \
(pr) = (pl) = alloca(param_sz(pl, (pr)->nblack + 1)); \
INIT_OBJ((pl), VMOD_CLUSTER_CLUSTER_PARAM_MAGIC); \
} \
} while (0)
VCL_BACKEND vmod_cluster_backend(VRT_CTX,
struct vmod_cluster_cluster *vc,
struct vmod_cluster_backend_arg *arg)
{
int modify = arg->valid_deny || arg->valid_real ||
arg->valid_uncacheable_direct;
const struct vmod_cluster_cluster_param *pr = NULL;
struct vmod_cluster_cluster_param *pl = NULL;
if (! modify) {
if (arg->resolve == vmod_enum_LAZY)
return (vc->dir);
return (vmod_cluster_resolve(ctx, vc->dir));
}
AN(modify);
if (arg->resolve == vmod_enum_LAZY &&
(ctx->method & cluster_methods) == 0) {
VRT_fail(ctx, "cluster.backend(resolve=LAZY)"
" can not be called here");
return NULL;
}
pr = cluster_task_param_r(ctx, vc);
if (arg->valid_deny && arg->deny != NULL &&
! cluster_blacklisted(pr, arg->deny)) {
be_task_param_l(pl, pr, ctx, vc, arg);
cluster_blacklist_add(pl, arg->deny);
}
if (arg->valid_real &&
pr->real != arg->real) {
be_task_param_l(pl, pr, ctx, vc, arg);
pl->real = arg->real;
}
if (arg->valid_uncacheable_direct &&
pr->uncacheable_direct != arg->valid_uncacheable_direct) {
be_task_param_l(pl, pr, ctx, vc, arg);
pl->uncacheable_direct = arg->valid_uncacheable_direct;
}
if (arg->resolve == vmod_enum_LAZY)
return (vc->dir);
return (cluster_resolve(ctx, pr));
}
static VCL_BOOL
vmod_cluster_healthy(VRT_CTX, VCL_BACKEND be, VCL_TIME *c)
{
be = vmod_cluster_resolve(ctx, be);
return VRT_Healthy(ctx, be, c);
}
#-
# Copyright 2018 UPLEX Nils Goroll Systemoptimierung
# All rights reserved
#
# Author: Nils Goroll <nils.goroll@uplex.de>
#
# See LICENSE
#
$Module cluster 3 Varnish cluster Module
DESCRIPTION
===========
This director facilitates the implementation of varnish cache
clustering, in particular in combination with the shard director. The
basic design idea is to shard objects among a number of varnish caches
configured in the director passed as the `cluster` argument: If the
local cache is the shard member designated for serving the respective
object, a backend request is to be made with a real
backend. Otherwise, the request is to be forwarded to another node
from the `cluster` argument, which, in turn, will either serve the
object from its cache or issue a request against a `real` backend.
For a shard director argument ``shard``, the following examples are
roughly equivalent if ``myself`` resolves to the local node
* explicit VCL code with shard director ``resolve=now``::
sub vcl_init {
new shard = directors.shard();
shard.add_backend(other_node);
shard.add_backend(myself);
shard.reconfigure();
new real = directors.whatever();
real.add_backend(...)
# ...
}
sub vcl_backend_fetch {
if (bereq.uncacheable || shard.backend() == myself) {
set bereq.backend = real.backend();
} else {
set bereq.backend = shard.backend();
}
}
* use of the cluster director::
sub vcl_init {
# same as above, plus
new cluster = cluster.cluster(shard,
deny = myself,
real = real.backend());
}
sub vcl_backend_fetch {
set bereq.backend = cluster.backend();
}
Differences between the two methods are:
* the cluster director works with lazy resolution where a director
does not return one of its configured backends, but rather a
reference to itself (as do all the varnish bundled directors except
for the shard director, which supports lazy resolution with the
``resolve=LAZY`` argument).
* when different ``deny`` or, in particular, ``real`` backends are to
be used, the cluster director can save substantial amounts of VCL
code: Using the ``.real()`` method, the director of real backends
can be changed in ``vcl_backend_fetch {}``.
Simply put, when using the cluster director, the ``.real()`` method
effectively replaces ``set bereq.backend``.
$Object cluster(BACKEND cluster, [ BACKEND deny ], [ BACKEND real ],
BOOL uncacheable_direct = 1)
Instantiate a cluster director on top of the director passed as the
`cluster` argument.
The optional `deny` argument allows to specify one backend for which
the cluster director will resolve to a `real` backend (the blacklist)
as if the `.deny(BACKEND)` method had been called.
The optional `real` argument allows to specify the director which use
if a denied backend would have been hit as if the `.real(BACKEND)`
method had been called.
With the default `uncacheable_direct` argument, the cluster director
always selects a `real` backend for uncacheable backend requests (as
the vcl example above illustrates). Seting this argument ``false``
disables special handling of uncacheable backend requests.
$Method VOID .deny(BACKEND)
Add a backend to the list of backends for which the cluster director
will resolve to a `real` backend (the blacklist).
When used in ``vcl_init{}``, changes the director's default
blacklist.
When used in ``vcl_backend_fetch {}``, changes the director's backlist
for this backend request only. When the `.backend()` method is used
with ``resolve=LAZY``, this method changes the behaviour irrespecitve
of being called before or after the `.backend()` method.
Use in vcl subs other than ``vcl_init{}`` and ``vcl_backend_fetch {}``
is invalid and will trigger a VCL error.
$Method VOID .allow(BACKEND)
Remove a backend to the list of backends for which the cluster
director will resolve to a `real` backend (the blacklist).
Otherwise identical to the `.deny()` method.
$Method VOID .real(BACKEND)
Only valid in ``vcl_backend_fetch {}``: Use a diffrent backend for
resolution of real backends.
When the `.backend()` method is used with ``resolve=LAZY``, this
method changes the behaviour irrespecitve of being called before or
after the `.backend()` method.
$Method VOID .uncacheable_direct(BOOL)
Only valid in ``vcl_backend_fetch {}``: If a ``true`` argument is
given, a `real` backend is always returned for uncacheable backend
requests (e.g. passes or cache lookups hitting hit-for-pass). For a
``false`` argument, no difference is made with regard to the
cacheability of the backend request.
When the `.backend()` method is used with ``resolve=LAZY``, this
method changes the behaviour irrespecitve of being called before or
after the `.backend()` method.
$Method BACKEND .backend(ENUM {NOW, LAZY} resolve=LAZY,
[ BACKEND deny ], [ BACKEND real ],
[ BOOL uncacheable_direct ])
Return a backend by the method described in the rest of this
documentation, either as a reference to cluster director for
``resolve=LAZY`` or immediately for ``resolve=NOW``.
The optional `deny`, `real` and `uncacheable_direct` arguments have
the same effect as calling the methods `.deny()`, `.real()` and
`.uncacheable_direct()` methods before the `.backend()` method with
``resolve=LAZY``, but can also be used with ``resolve=NOW``. Use of
these arguments with ``resolve=LAZY`` is only allowed in
``vcl_backend_fetch {}`` and ``vcl_init {}``.
SEE ALSO
========
vcl\(7),varnishd\(1)
varnishtest "vmod_cluster toy example with round-robin"
varnish v1 -vcl {
import cluster;
import directors;
backend s1 { .host = "${bad_backend}";}
backend s2 { .host = "${bad_backend}";}
backend s3 { .host = "${bad_backend}";}
sub vcl_init {
new rr = directors.round_robin();
rr.add_backend(s1);
rr.add_backend(s2);
new cl = cluster.cluster(rr.backend(), deny=s2, real=s3);
}
sub vcl_recv {
return (synth(200));
}
sub vcl_synth {
set resp.http.b1 = cl.backend(resolve=NOW);
set resp.http.b2 = cl.backend(resolve=NOW);
}
} -start
client c1 {
txreq
rxresp
expect resp.status == 200
expect resp.http.b1 == s1
expect resp.http.b2 == s3
} -run
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment