Commit 70ba79fd authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Go over the session structure memory management code.

Always zap struct sess to zero before we start using it, but do so in the
recycling workerthread if we can get away with it, to reduce the amount
of work done in the acceptor.

Let worker threads pick up some of the pool-expansion work by pre-creating
sessmem structures, to offload this from the acceptor thread if possible.



git-svn-id: http://www.varnish-cache.org/svn/trunk@4493 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent ab72dc67
......@@ -427,7 +427,6 @@ struct sess {
#endif
};
/* -------------------------------------------------------------------*/
/* Backend connection */
......@@ -609,8 +608,8 @@ void WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func,
/* cache_session.c [SES] */
void SES_Init(void);
struct sess *SES_New(const struct sockaddr *addr, unsigned len);
struct sess *SES_Alloc(const struct sockaddr *addr, unsigned len);
struct sess *SES_New(void);
struct sess *SES_Alloc(void);
void SES_Delete(struct sess *sp);
void SES_Charge(struct sess *sp);
void SES_ResetBackendTimeouts(struct sess *sp);
......
......@@ -267,7 +267,7 @@ vca_acct(void *arg)
}
continue;
}
sp = SES_New(addr, l);
sp = SES_New();
if (sp == NULL) {
AZ(close(i));
VSL_stats->client_drop++;
......@@ -278,6 +278,9 @@ vca_acct(void *arg)
sp->t_open = now;
sp->t_end = now;
sp->mylsock = ls;
assert(l < sp->sockaddrlen);
memcpy(sp->sockaddr, addr, l);
sp->sockaddrlen = l;
sp->step = STP_FIRST;
WRK_QueueSession(sp);
......
......@@ -549,7 +549,7 @@ wrk_bgthread(void *arg)
CAST_OBJ_NOTNULL(bt, arg, BGTHREAD_MAGIC);
THR_SetName(bt->name);
sp = SES_Alloc(NULL, 0);
sp = SES_Alloc();
XXXAN(sp);
memset(&ww, 0, sizeof ww);
sp->wrk = &ww;
......
......@@ -94,50 +94,75 @@ SES_Charge(struct sess *sp)
#undef ACCT
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* This function allocates a session + assorted peripheral data
* structures in one single malloc operation.
*/
static struct sess *
ses_setup(struct sessmem *sm, const struct sockaddr *addr, unsigned len)
static struct sessmem *
ses_sm_alloc(void)
{
struct sess *sp;
unsigned char *p;
struct sessmem *sm;
unsigned char *p, *q;
volatile unsigned nws;
volatile unsigned nhttp;
unsigned l, hl;
if (sm == NULL) {
if (VSL_stats->n_sess_mem >= params->max_sess)
return (NULL);
/*
* It is not necessary to lock these, but we need to
* cache them locally, to make sure we get a consistent
* view of the value.
*/
nws = params->sess_workspace;
nhttp = params->http_headers;
hl = HTTP_estimate(nhttp);
l = sizeof *sm + nws + 2 * hl;
p = malloc(l);
if (p == NULL)
return (NULL);
if (VSL_stats->n_sess_mem >= params->max_sess)
return (NULL);
/*
* It is not necessary to lock these, but we need to
* cache them locally, to make sure we get a consistent
* view of the value.
*/
nws = params->sess_workspace;
nhttp = params->http_headers;
hl = HTTP_estimate(nhttp);
l = sizeof *sm + nws + 2 * hl;
p = malloc(l);
if (p == NULL)
return (NULL);
q = p + l;
Lck_Lock(&stat_mtx);
VSL_stats->n_sess_mem++;
Lck_Unlock(&stat_mtx);
/* Don't waste time zeroing the workspace */
memset(p, 0, l - nws);
sm = (void*)p;
p += sizeof *sm;
sm->magic = SESSMEM_MAGIC;
sm->workspace = nws;
sm->http[0] = HTTP_create(p, nhttp);
p += hl;
sm->http[1] = HTTP_create(p, nhttp);
p += hl;
sm->wsp = p;
p += nws;
assert(p == q);
return (sm);
}
/*--------------------------------------------------------------------
* This prepares a session for use, based on its sessmem structure.
*/
static void
ses_setup(struct sessmem *sm)
{
struct sess *sp;
/* Don't waste time zeroing the workspace */
memset(p, 0, l - nws);
sm = (void*)p;
p += sizeof *sm;
sm->magic = SESSMEM_MAGIC;
sm->workspace = nws;
VSL_stats->n_sess_mem++;
sm->http[0] = HTTP_create(p, nhttp);
p += hl;
sm->http[1] = HTTP_create(p, nhttp);
p += hl;
sm->wsp = p;
}
CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC);
VSL_stats->n_sess++;
sp = &sm->sess;
memset(sp, 0, sizeof *sp);
/* We assume that the sess has been zeroed by the time we get here */
AZ(sp->magic);
sp->magic = SESS_MAGIC;
sp->mem = sm;
sp->sockaddr = (void*)(&sm->sockaddr[0]);
......@@ -150,31 +175,23 @@ ses_setup(struct sessmem *sm, const struct sockaddr *addr, unsigned len)
sp->t_resp = NAN;
sp->t_end = NAN;
sp->grace = NAN;
sp->disable_esi = 0;
assert(len <= sp->sockaddrlen);
if (addr != NULL) {
memcpy(sp->sockaddr, addr, len);
sp->sockaddrlen = len;
}
WS_Init(sp->ws, "sess", sm->wsp, sm->workspace);
sp->http = sm->http[0];
sp->http0 = sm->http[1];
SES_ResetBackendTimeouts(sp);
return (sp);
}
/*--------------------------------------------------------------------
* Try to recycle an existing session.
* Get a new session, preferably by recycling an already ready one
*/
struct sess *
SES_New(const struct sockaddr *addr, unsigned len)
SES_New(void)
{
struct sessmem *sm;
struct sess *sp;
assert(pthread_self() == VCA_thread);
assert(ses_qp <= 1);
......@@ -189,27 +206,52 @@ SES_New(const struct sockaddr *addr, unsigned len)
Lck_Unlock(&ses_mem_mtx);
sm = VTAILQ_FIRST(&ses_free_mem[ses_qp]);
}
if (sm != NULL)
if (sm != NULL) {
VTAILQ_REMOVE(&ses_free_mem[ses_qp], sm, list);
return (ses_setup(sm, addr, len));
sp = &sm->sess;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
} else {
sm = ses_sm_alloc();
if (sm == NULL)
return (NULL);
ses_setup(sm);
sp = &sm->sess;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
}
VSL_stats->n_sess++; /* XXX: locking ? */
return (sp);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Allocate a session for use by background threads.
*/
struct sess *
SES_Alloc(const struct sockaddr *addr, unsigned len)
SES_Alloc(void)
{
return (ses_setup(NULL, addr, len));
struct sess *sp;
struct sessmem *sm;
sm = ses_sm_alloc();
AN(sm);
ses_setup(sm);
sp = &sm->sess;
sp->sockaddrlen = 0;
return (sp);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Recycle a session. If the workspace has changed, deleted it,
* otherwise wash it, and put it up for adoption.
*/
void
SES_Delete(struct sess *sp)
{
struct acct *b = &sp->acct;
struct sessmem *sm;
// unsigned workspace;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
sm = sp->mem;
......@@ -217,7 +259,7 @@ SES_Delete(struct sess *sp)
AZ(sp->obj);
AZ(sp->vcl);
VSL_stats->n_sess--;
VSL_stats->n_sess--; /* XXX: locking ? */
assert(!isnan(b->first));
assert(!isnan(sp->t_end));
VSL(SLT_StatSess, sp->id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju",
......@@ -225,28 +267,28 @@ SES_Delete(struct sess *sp)
b->sess, b->req, b->pipe, b->pass,
b->fetch, b->hdrbytes, b->bodybytes);
if (sm->workspace != params->sess_workspace) {
Lck_Lock(&stat_mtx);
VSL_stats->n_sess_mem--;
Lck_Unlock(&stat_mtx);
free(sm);
} else {
/* Clean and prepare for reuse */
// workspace = sm->workspace;
// memset(sm, 0, sizeof *sm);
// sm->magic = SESSMEM_MAGIC;
// sm->workspace = workspace;
/* XXX: cache_waiter_epoll.c evaluates data.ptr. If it's
* XXX: not nulled, things go wrong during load.
* XXX: Should probably find a better method to deal with
* XXX: this scenario...
*/
#if defined(HAVE_EPOLL_CTL)
sm->sess.ev.data.ptr = NULL;
#endif
ses_setup(sm);
Lck_Lock(&ses_mem_mtx);
VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list);
Lck_Unlock(&ses_mem_mtx);
}
/* Try to precreate some ses-mem so the acceptor will not have to */
if (VSL_stats->n_sess_mem < VSL_stats->n_sess + 10) {
sm = ses_sm_alloc();
if (sm != NULL) {
ses_setup(sm);
Lck_Lock(&ses_mem_mtx);
VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list);
Lck_Unlock(&ses_mem_mtx);
}
}
}
/*--------------------------------------------------------------------*/
......@@ -259,6 +301,8 @@ SES_Init()
Lck_New(&ses_mem_mtx);
}
/* XXX: We should use NAN as default marker */
void
SES_ResetBackendTimeouts(struct sess *sp)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment