Commit 7e977924 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Expend a lock on keeping the backend statistics consistent.

Rename the fields to make more sense



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@1238 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 580eebe6
...@@ -214,9 +214,12 @@ vbe_connect(struct sess *sp, struct backend *bp) ...@@ -214,9 +214,12 @@ vbe_connect(struct sess *sp, struct backend *bp)
/* Get a backend connection ------------------------------------------ /* Get a backend connection ------------------------------------------
* *
* First locate the backend shadow, if necessary by creating one. * Try all cached backend connections for this backend, and use the
* If there are free connections, use the first, otherwise build a * first one that is looks like it is still connected.
* new connection. * If that fails to get us a connection, create a new one, reusing a
* connection from the freelist, if possible.
*
* This function is slightly complicated by optimizations on vbemtx.
*/ */
static struct vbe_conn * static struct vbe_conn *
...@@ -225,20 +228,17 @@ vbe_nextfd(struct sess *sp) ...@@ -225,20 +228,17 @@ vbe_nextfd(struct sess *sp)
struct vbe_conn *vc, *vc2; struct vbe_conn *vc, *vc2;
struct pollfd pfd; struct pollfd pfd;
struct backend *bp; struct backend *bp;
int reuse = 0;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
bp = sp->backend; bp = sp->backend;
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
vc2 = NULL;
while (1) { while (1) {
/*
* Try all connections on this backend until we find one
* that works. If that fails, grab a free connection
* (if any) while we have the lock anyway.
*/
vc2 = NULL;
LOCK(&vbemtx); LOCK(&vbemtx);
vc = TAILQ_FIRST(&bp->connlist); vc = TAILQ_FIRST(&bp->connlist);
if (vc != NULL) { if (vc != NULL) {
assert(vc->backend == bp);
assert(vc->fd >= 0); assert(vc->fd >= 0);
TAILQ_REMOVE(&bp->connlist, vc, list); TAILQ_REMOVE(&bp->connlist, vc, list);
} else { } else {
...@@ -256,8 +256,10 @@ vbe_nextfd(struct sess *sp) ...@@ -256,8 +256,10 @@ vbe_nextfd(struct sess *sp)
pfd.fd = vc->fd; pfd.fd = vc->fd;
pfd.events = POLLIN; pfd.events = POLLIN;
pfd.revents = 0; pfd.revents = 0;
if (!poll(&pfd, 1, 0)) if (!poll(&pfd, 1, 0)) {
reuse = 1;
break; break;
}
VBE_ClosedFd(sp->wrk, vc, 0); VBE_ClosedFd(sp->wrk, vc, 0);
} }
...@@ -266,34 +268,33 @@ vbe_nextfd(struct sess *sp) ...@@ -266,34 +268,33 @@ vbe_nextfd(struct sess *sp)
vc = vbe_new_conn(); vc = vbe_new_conn();
else else
vc = vc2; vc = vc2;
AN(vc); if (vc != NULL) {
assert(vc->fd == -1); assert(vc->fd == -1);
AZ(vc->backend); AZ(vc->backend);
} vc->fd = vbe_connect(sp, bp);
if (vc->fd < 0) {
/* If not connected yet, do so */ LOCK(&vbemtx);
if (vc->fd < 0) { TAILQ_INSERT_HEAD(&vbe_head, vc, list);
AZ(vc->backend); VSL_stats->backend_unused++;
vc->fd = vbe_connect(sp, bp); UNLOCK(&vbemtx);
LOCK(&vbemtx); vc = NULL;
if (vc->fd < 0) { } else {
vc->backend = NULL; vc->backend = bp;
TAILQ_INSERT_HEAD(&vbe_head, vc, list); }
VSL_stats->backend_unused++;
vc = NULL;
} else {
vc->backend = bp;
} }
UNLOCK(&vbemtx);
} else {
assert(vc->fd >= 0);
assert(vc->backend == bp);
} }
LOCK(&vbemtx);
if (vc != NULL ) { if (vc != NULL ) {
assert(vc->fd >= 0); VSL_stats->backend_reuse += reuse;
VSL_stats->backend_conn++; VSL_stats->backend_conn++;
} else {
VSL_stats->backend_fail++;
}
UNLOCK(&vbemtx);
if (vc != NULL ) {
WSL(sp->wrk, SLT_BackendXID, vc->fd, "%u", sp->xid); WSL(sp->wrk, SLT_BackendXID, vc->fd, "%u", sp->xid);
AN(vc->backend); assert(vc->fd >= 0);
assert(vc->backend == bp);
} }
return (vc); return (vc);
} }
...@@ -348,9 +349,9 @@ VBE_RecycleFd(struct worker *w, struct vbe_conn *vc) ...@@ -348,9 +349,9 @@ VBE_RecycleFd(struct worker *w, struct vbe_conn *vc)
CHECK_OBJ_NOTNULL(vc, VBE_CONN_MAGIC); CHECK_OBJ_NOTNULL(vc, VBE_CONN_MAGIC);
assert(vc->fd >= 0); assert(vc->fd >= 0);
AN(vc->backend); AN(vc->backend);
VSL_stats->backend_recycle++;
WSL(w, SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name); WSL(w, SLT_BackendReuse, vc->fd, "%s", vc->backend->vcl_name);
LOCK(&vbemtx); LOCK(&vbemtx);
VSL_stats->backend_recycle++;
TAILQ_INSERT_HEAD(&vc->backend->connlist, vc, list); TAILQ_INSERT_HEAD(&vc->backend->connlist, vc, list);
UNLOCK(&vbemtx); UNLOCK(&vbemtx);
} }
......
...@@ -133,13 +133,14 @@ void ...@@ -133,13 +133,14 @@ void
VRT_free_backends(struct VCL_conf *cp) VRT_free_backends(struct VCL_conf *cp)
{ {
(void)cp; (void)cp; /* XXX */
} }
void void
VRT_fini_backend(struct backend *be) VRT_fini_backend(struct backend *be)
{ {
(void)be;
(void)be; /* XXX */
} }
/*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment