Commit c9e3914a authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Abstract some "manage list of backends" code which most of the directors

will need.
parent 7f4bf4bf
......@@ -14,6 +14,7 @@ noinst_LTLIBRARIES = libvmod_directors.la
libvmod_directors_la_LDFLAGS = $(AM_LDFLAGS) -module -export-dynamic -avoid-version -shared -rpath /nowhere
libvmod_directors_la_SOURCES = \
vdir.c \
round_robin.c
nodist_libvmod_directors_la_SOURCES = \
......
......@@ -36,61 +36,45 @@
#include "vrt.h"
#include "vcc_if.h"
struct rr_entry {
unsigned magic;
#define RR_ENTRY_MAGIC 0xa80970cf
VTAILQ_ENTRY(rr_entry) list;
VCL_BACKEND be;
};
#include "vdir.h"
struct vmod_directors_round_robin {
unsigned magic;
#define VMOD_DEBUG_RR_MAGIC 0x99f4b726
VTAILQ_HEAD(, rr_entry) listhead;
int nbe;
pthread_mutex_t mtx;
struct director *dir;
#define VMOD_DIRECTORS_ROUND_ROBIN_MAGIC 0xa80970cf
struct vdir *vd;
unsigned nxt;
};
static unsigned
static unsigned __match_proto__(vdi_healthy)
vmod_rr_healthy(const struct director *dir, const struct req *req)
{
struct rr_entry *ep;
struct vmod_directors_round_robin *rr;
unsigned retval = 0;
CAST_OBJ_NOTNULL(rr, dir->priv, VMOD_DEBUG_RR_MAGIC);
AZ(pthread_mutex_lock(&rr->mtx));
VTAILQ_FOREACH(ep, &rr->listhead, list) {
if (ep->be->healthy(ep->be, req)) {
retval = 1;
break;
}
}
AZ(pthread_mutex_unlock(&rr->mtx));
return (retval);
CAST_OBJ_NOTNULL(rr, dir->priv, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
return (vdir_any_healthy(rr->vd, req));
}
static struct vbc *
static struct vbc * __match_proto__(vdi_getfd_f)
vmod_rr_getfd(const struct director *dir, struct req *req)
{
struct rr_entry *ep = NULL;
struct vmod_directors_round_robin *rr;
int i;
CAST_OBJ_NOTNULL(rr, dir->priv, VMOD_DEBUG_RR_MAGIC);
AZ(pthread_mutex_lock(&rr->mtx));
for (i = 0; i < rr->nbe; i++) {
ep = VTAILQ_FIRST(&rr->listhead);
VTAILQ_REMOVE(&rr->listhead, ep, list);
VTAILQ_INSERT_TAIL(&rr->listhead, ep, list);
if (ep->be->healthy(ep->be, req))
unsigned u;
VCL_BACKEND be = NULL;
CAST_OBJ_NOTNULL(rr, dir->priv, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
vdir_lock(rr->vd);
for (u = 0; u < rr->vd->n_backend; u++) {
rr->nxt %= rr->vd->n_backend;
be = rr->vd->backend[rr->nxt];
rr->nxt++;
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
if (be->healthy(be, req))
break;
}
AZ(pthread_mutex_unlock(&rr->mtx));
if (i == rr->nbe || ep == NULL)
vdir_unlock(rr->vd);
if (u == rr->vd->n_backend || be == NULL)
return (NULL);
return (ep->be->getfd(ep->be, req));
return (be->getfd(be, req));
}
VCL_VOID
......@@ -99,65 +83,42 @@ vmod_round_robin__init(struct req *req, struct vmod_directors_round_robin **rrp,
{
struct vmod_directors_round_robin *rr;
(void)req;
AZ(req);
AN(rrp);
AZ(*rrp);
ALLOC_OBJ(rr, VMOD_DEBUG_RR_MAGIC);
ALLOC_OBJ(rr, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
AN(rr);
*rrp = rr;
AZ(pthread_mutex_init(&rr->mtx, NULL));
VTAILQ_INIT(&rr->listhead);
ALLOC_OBJ(rr->dir, DIRECTOR_MAGIC);
AN(rr->dir);
REPLACE(rr->dir->vcl_name, vcl_name);
rr->dir->priv = rr;
rr->dir->healthy = vmod_rr_healthy;
rr->dir->getfd = vmod_rr_getfd;
vdir_new(&rr->vd, vcl_name, vmod_rr_healthy, vmod_rr_getfd, rr);
}
VCL_VOID
vmod_round_robin__fini(struct req *req, struct vmod_directors_round_robin **rrp)
{
struct vmod_directors_round_robin *rr;
struct rr_entry *ep;
(void)req;
AZ(req);
rr = *rrp;
*rrp = NULL;
CHECK_OBJ_NOTNULL(rr, VMOD_DEBUG_RR_MAGIC);
AZ(pthread_mutex_destroy(&rr->mtx));
while (!VTAILQ_EMPTY(&rr->listhead)) {
ep = VTAILQ_FIRST(&rr->listhead);
VTAILQ_REMOVE(&rr->listhead, ep, list);
FREE_OBJ(ep);
}
free(rr->dir->vcl_name);
FREE_OBJ(rr->dir);
CHECK_OBJ_NOTNULL(rr, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
vdir_delete(&rr->vd);
FREE_OBJ(rr);
}
VCL_VOID
vmod_round_robin_add_backend(struct req *req, struct vmod_directors_round_robin * rr,
VCL_BACKEND be)
vmod_round_robin_add_backend(struct req *req,
struct vmod_directors_round_robin *rr, VCL_BACKEND be)
{
struct rr_entry *ep;
(void)req;
ALLOC_OBJ(ep, RR_ENTRY_MAGIC);
AN(ep);
ep->be = be;
AZ(pthread_mutex_lock(&rr->mtx));
VTAILQ_INSERT_TAIL(&rr->listhead, ep, list);
rr->nbe++;
AZ(pthread_mutex_unlock(&rr->mtx));
(void)req;
CHECK_OBJ_NOTNULL(rr, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
vdir_add_backend(rr->vd, be, 0.0);
}
VCL_BACKEND __match_proto__()
vmod_round_robin_backend(struct req *req, struct vmod_directors_round_robin *rr)
{
(void)req;
return (rr->dir);
CHECK_OBJ_NOTNULL(rr, VMOD_DIRECTORS_ROUND_ROBIN_MAGIC);
return (rr->vd->dir);
}
/*-
* Copyright (c) 2013 Varnish Software AS
* All rights reserved.
*
* Author: Poul-Henning Kamp <phk@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "config.h"
#include <stdlib.h>
#include "cache/cache.h"
#include "cache/cache_backend.h"
#include "vrt.h"
#include "vdir.h"
static void
vdir_expand(struct vdir *vd, unsigned n)
{
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
vd->backend = realloc(vd->backend, n * sizeof *vd->backend);
AN(vd->backend);
vd->weight = realloc(vd->weight, n * sizeof *vd->weight);
AN(vd->weight);
vd->l_backend = n;
}
void
vdir_new(struct vdir **vdp, const char *vcl_name, vdi_healthy *healthy,
vdi_getfd_f *getfd, void *priv)
{
struct vdir *vd;
AN(vcl_name);
AN(vdp);
AZ(*vdp);
ALLOC_OBJ(vd, VDIR_MAGIC);
AN(vd);
*vdp = vd;
AZ(pthread_mutex_init(&vd->mtx, NULL));
ALLOC_OBJ(vd->dir, DIRECTOR_MAGIC);
AN(vd->dir);
REPLACE(vd->dir->vcl_name, vcl_name);
vd->dir->priv = priv;
vd->dir->healthy = healthy;
vd->dir->getfd = getfd;
}
void
vdir_delete(struct vdir **vdp)
{
struct vdir *vd;
AN(vdp);
vd = *vdp;
*vdp = NULL;
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
free(vd->backend);
free(vd->weight);
AZ(pthread_mutex_destroy(&vd->mtx));
FREE_OBJ(vd->dir);
FREE_OBJ(vd);
}
void
vdir_lock(struct vdir *vd)
{
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
AZ(pthread_mutex_lock(&vd->mtx));
}
void
vdir_unlock(struct vdir *vd)
{
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
AZ(pthread_mutex_unlock(&vd->mtx));
}
unsigned
vdir_add_backend(struct vdir *vd, VCL_BACKEND be, double weight)
{
unsigned u;
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
AN(be);
vdir_lock(vd);
if (vd->n_backend >= vd->l_backend)
vdir_expand(vd, vd->l_backend + 16);
assert(vd->n_backend < vd->l_backend);
u = vd->n_backend++;
vd->backend[u] = be;
vd->weight[u] = weight;
vdir_unlock(vd);
return (u);
}
unsigned
vdir_any_healthy(struct vdir *vd, const struct req *req)
{
unsigned retval = 0;
VCL_BACKEND be;
unsigned u;
CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC);
vdir_lock(vd);
for (u = 0; u < vd->n_backend; u++) {
be = vd->backend[u];
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
if (be->healthy(be, req)) {
retval = 1;
break;
}
}
vdir_unlock(vd);
return (retval);
}
/*-
* Copyright (c) 2013 Varnish Software AS
* All rights reserved.
*
* Author: Poul-Henning Kamp <phk@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
struct vdir {
unsigned magic;
#define VDIR_MAGIC 0x99f4b726
pthread_mutex_t mtx;
unsigned n_backend;
unsigned l_backend;
VCL_BACKEND *backend;
double *weight;
struct director *dir;
};
void vdir_new(struct vdir **vdp, const char *vcl_name, vdi_healthy *healthy,
vdi_getfd_f *getfd, void *priv);
void vdir_delete(struct vdir **vdp);
void vdir_lock(struct vdir *vd);
void vdir_unlock(struct vdir *vd);
unsigned vdir_add_backend(struct vdir *vd, VCL_BACKEND be, double weight);
unsigned vdir_any_healthy(struct vdir *vd, const struct req *);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment