...
 
Commits (5)
......@@ -26,6 +26,12 @@ AC_CONFIG_FILES([
src/Makefile
])
save_CFLAGS="${CFLAGS}"
CFLAGS="${VARNISHAPI_CFLAGS}"
AC_CHECK_DECLS([VRT_DirectorResolve], [], [], [[#include "vdef.h"
#include "vrt.h"]])
CFLAGS="${save_CFLAGS}"
AC_OUTPUT
AS_ECHO("
......
......@@ -30,6 +30,7 @@ TESTS = \
vtc/cfg.vtc \
vtc/deep.vtc \
vtc/deep_stk.vtc \
vtc/direct.vtc \
vtc/shallow.vtc \
vtc/lazy.vtc \
vtc/lazy_shard.vtc
......
/*-
* Copyright 2018 UPLEX - Nils Goroll Systemoptimierung
* Copyright 2018, 2019 UPLEX - Nils Goroll Systemoptimierung
* All rights reserved
*
* Author: Nils Goroll <nils.goroll@uplex.de>
......@@ -42,7 +42,7 @@
* workaround missing VRT_DirectorResolve
* Ref https://github.com/varnishcache/varnish-cache/pull/2680
*/
#ifndef HAVE_DECL_VRT_DIRECTORRESOLVE
#include <cache/cache_director.h>
static VCL_BACKEND
......@@ -60,7 +60,7 @@ VRT_DirectorResolve(VRT_CTX, VCL_BACKEND d)
AN(d->vdir);
return (d);
}
#endif
/* ------------------------------------------------------------
*/
......@@ -83,6 +83,7 @@ struct vmod_cluster_cluster_param {
unsigned magic;
#define VMOD_CLUSTER_CLUSTER_PARAM_MAGIC 0x3ba2a0d5
VCL_BOOL uncacheable_direct;
VCL_BOOL direct;
VCL_BACKEND cluster;
VCL_BACKEND real;
int nblack;
......@@ -374,25 +375,6 @@ vmod_cluster_is_denied(VRT_CTX,
return (cluster_blacklisted(pr, b));
}
VCL_VOID
vmod_cluster_set_real(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BACKEND b)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, set_real, );
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (pr->real == b)
return;
pl = cluster_task_param_l(ctx, vc, 0, NULL);
pl->real = b;
}
VCL_BACKEND
vmod_cluster_get_cluster(VRT_CTX, struct vmod_cluster_cluster *vc)
{
......@@ -405,51 +387,70 @@ vmod_cluster_get_cluster(VRT_CTX, struct vmod_cluster_cluster *vc)
return (pr->cluster);
}
/* set a simple parameter attribute */
#define CLUSTER_L(ctx, vc, att, val) \
const struct vmod_cluster_cluster_param *pr; \
struct vmod_cluster_cluster_param *pl; \
\
cluster_check(ctx, set_ ## att, ); \
\
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC); \
\
pr = cluster_task_param_r(ctx, vc); \
if (pr->att == (val)) \
return; \
\
pl = cluster_task_param_l(ctx, vc, 0, NULL); \
pl->att = (val)
/* get a simple parameter attribute */
#define CLUSTER_R(ctx, vc, att, ret) \
const struct vmod_cluster_cluster_param *pr; \
\
cluster_check(ctx, get_ ## att, ret); \
\
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC); \
\
pr = cluster_task_param_r(ctx, vc); \
\
return (pr->att)
VCL_VOID
vmod_cluster_set_real(VRT_CTX, struct vmod_cluster_cluster *vc, VCL_BACKEND b)
{
CLUSTER_L(ctx, vc, real, b);
}
VCL_BACKEND
vmod_cluster_get_real(VRT_CTX, struct vmod_cluster_cluster *vc)
{
const struct vmod_cluster_cluster_param *pr;
cluster_check(ctx, get_real, NULL);
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
return (pr->real);
CLUSTER_R(ctx, vc, real, NULL);
}
VCL_VOID
vmod_cluster_set_uncacheable_direct(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BOOL direct)
struct vmod_cluster_cluster *vc, VCL_BOOL bool)
{
const struct vmod_cluster_cluster_param *pr;
struct vmod_cluster_cluster_param *pl;
cluster_check(ctx, set_uncacheable_direct, );
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
pr = cluster_task_param_r(ctx, vc);
if (pr->uncacheable_direct == direct)
return;
pl = cluster_task_param_l(ctx, vc, 0, NULL);
pl->uncacheable_direct = direct;
CLUSTER_L(ctx, vc, uncacheable_direct, bool);
}
VCL_BOOL
vmod_cluster_get_uncacheable_direct(VRT_CTX,
struct vmod_cluster_cluster *vc)
vmod_cluster_get_uncacheable_direct(VRT_CTX, struct vmod_cluster_cluster *vc)
{
const struct vmod_cluster_cluster_param *pr;
cluster_check(ctx, get_uncacheable_direct, 0);
CLUSTER_R(ctx, vc, uncacheable_direct, 0);
}
CHECK_OBJ_NOTNULL(vc, VMOD_CLUSTER_CLUSTER_MAGIC);
VCL_VOID
vmod_cluster_set_direct(VRT_CTX,
struct vmod_cluster_cluster *vc, VCL_BOOL bool)
{
CLUSTER_L(ctx, vc, direct, bool);
}
pr = cluster_task_param_r(ctx, vc);
return (pr->uncacheable_direct);
VCL_BOOL
vmod_cluster_get_direct(VRT_CTX, struct vmod_cluster_cluster *vc)
{
CLUSTER_R(ctx, vc, direct, 0);
}
static inline VCL_BACKEND
......@@ -471,7 +472,8 @@ cluster_resolve(VRT_CTX,
{
VCL_BACKEND r;
if (pr->uncacheable_direct && ctx->bo &&
if (pr->direct ||
pr->uncacheable_direct && ctx->bo &&
(ctx->bo->do_pass || ctx->bo->uncacheable))
return (by_resolve(ctx, pr->real, resolve));
......
......@@ -172,6 +172,21 @@ Return the currently configured behaviour.
See :ref:`meth_ctx` for limitations.
$Method VOID .set_direct(BOOL)
A ``true`` argument instructs the director to select a `real` backend
always.
A ``false`` argument restores the original behavior.
See :ref:`meth_ctx` for limitations.
$Method BOOL .get_direct()
Return the current `direct` value as set with :ref:`func_cluster.get_direct`.
See :ref:`meth_ctx` for limitations.
$Method BACKEND .backend(ENUM {LAZY, SHALLOW, DEEP, NOW} resolve=LAZY,
[ BACKEND deny ], [ BACKEND real ],
[ BOOL uncacheable_direct ])
......
varnishtest "vmod_cluster test shard director layering and backend.list"
server s1 {
} -start
server s2 {
}
server s3 -repeat 2 -keepalive {
rxreq
txresp
} -start
varnish v1 -vcl+backend {
import cluster;
import directors;
sub vcl_init {
new shard = directors.shard();
shard.add_backend(s1);
shard.add_backend(s2);
shard.reconfigure();
new cl = cluster.cluster(shard.backend(), deny=s2, real=s3);
cl.set_uncacheable_direct(false);
}
sub vcl_recv {
return (pass);
}
sub vcl_backend_fetch {
set bereq.http.shard = shard.backend();
set bereq.http.unc = cl.get_uncacheable_direct();
set bereq.http.dir1 = cl.get_direct();
cl.set_direct(bereq.http.shard != "s2");
set bereq.backend = cl.backend();
set bereq.http.dir2 = cl.get_direct();
}
sub vcl_backend_response {
set beresp.http.shard = bereq.http.shard;
set beresp.http.unc = bereq.http.unc;
set beresp.http.backend = beresp.backend;
set beresp.http.dir1 = bereq.http.dir1;
set beresp.http.dir2 = bereq.http.dir2;
}
} -start
varnish v1 -cliexpect "shard.*healthy" "backend.list"
varnish v1 -cliexpect "cl.*healthy" "backend.list"
client c1 {
txreq
rxresp
expect resp.status == 200
expect resp.http.unc == "false"
expect resp.http.shard == "s2"
expect resp.http.backend == "s3"
expect resp.http.dir1 == "false"
expect resp.http.dir2 == "false"
txreq -url "/foo"
rxresp
expect resp.status == 200
expect resp.http.unc == "false"
expect resp.http.shard == "s1"
expect resp.http.backend == "s3"
expect resp.http.dir1 == "false"
expect resp.http.dir2 == "true"
} -run