Commit ed02f4f0 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Give each -spersistent segment its own lru list, and prevent the

expiry thread from moving objcores from one list to another.



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@4218 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 0f16bc7d
......@@ -283,11 +283,12 @@ struct objcore {
struct object *obj;
struct objhead *objhead;
double timer_when;
unsigned char flags;
unsigned flags;
#define OC_F_ONLRU (1<<0)
#define OC_F_BUSY (1<<1)
#define OC_F_PASS (1<<2)
#define OC_F_PERSISTENT (1<<3)
#define OC_F_LRUDONTMOVE (1<<4)
unsigned timer_idx;
VTAILQ_ENTRY(objcore) list;
VLIST_ENTRY(objcore) lru_list;
......
......@@ -163,21 +163,33 @@ EXP_Insert(struct object *o)
int
EXP_Touch(const struct object *o)
{
int retval = 0;
int retval;
struct objcore *oc;
struct lru *lru;
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
oc = o->objcore;
if (oc == NULL)
return (retval);
return (0);
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
/* We must have an objhead, otherwise we have no business on a LRU */
CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
/*
* For -spersistent we don't move objects on the lru list. Each
* segment has its own LRU list, and the order on it is not material
* for anything. The code below would move the objects to the
* LRU list of the currently open segment, which would prevent
* the cleaner from doing its job.
*/
if (oc->flags & OC_F_LRUDONTMOVE)
return (0);
if (o->objstore == NULL) /* XXX ?? */
return (retval);
return (0);
lru = STV_lru(o->objstore);
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
retval = 0;
if (Lck_Trylock(&exp_mtx))
return (retval);
if (oc->flags & OC_F_ONLRU) { /* XXX ?? */
......
......@@ -132,6 +132,7 @@ HSH_Object(const struct sess *sp)
CHECK_OBJ_NOTNULL(sp->obj, OBJECT_MAGIC);
CHECK_OBJ_NOTNULL(sp->obj->objstore, STORAGE_MAGIC);
CHECK_OBJ_NOTNULL(sp->obj->objstore->stevedore, STEVEDORE_MAGIC);
AN(ObjIsBusy(sp->obj));
if (sp->obj->objstore->stevedore->object != NULL)
sp->obj->objstore->stevedore->object(sp);
}
......@@ -372,7 +373,10 @@ hsh_testmagic(void *result)
fprintf(stderr, ">\n");
}
/**********************************************************************/
/**********************************************************************
* Insert an object which magically appears out of nowhere or, more likely,
* comes off some persistent storage device.
*/
struct objcore *
HSH_Insert(const struct sess *sp)
......
......@@ -43,7 +43,16 @@ static VTAILQ_HEAD(, stevedore) stevedores =
static const struct stevedore * volatile stv_next;
static struct lru *
/*********************************************************************
* NB! Dirty trick alert:
*
* We use a captive objcore as tail senteniel for LRU lists, but to
* make sure it does not get into play by accident, we do _not_
* initialize its magic with OBJCORE_MAGIC.
*
*/
struct lru *
LRU_Alloc(void)
{
struct lru *l;
......@@ -55,6 +64,8 @@ LRU_Alloc(void)
return (l);
}
/*********************************************************************/
struct storage *
STV_alloc(struct sess *sp, size_t size)
{
......
......@@ -71,6 +71,7 @@ void STV_open(void);
void STV_close(void);
struct lru *STV_lru(const struct storage *st);
struct lru *LRU_Alloc(void);
int STV_GetFile(const char *fn, int *fdp, const char **fnp, const char *ctx);
uintmax_t STV_FileSize(int fd, const char *size, unsigned *granularity, const char *ctx);
......
......@@ -89,6 +89,7 @@ struct smp_seg {
#define SMP_SEG_MAGIC 0x45c61895
struct smp_sc *sc;
struct lru *lru;
VTAILQ_ENTRY(smp_seg) list; /* on smp_sc.smp_segments */
......@@ -276,12 +277,11 @@ smp_sync_sign(const struct smp_signctx *ctx)
{
int i;
#if 1
/* XXX: round to pages */
i = msync(ctx->ss, ctx->ss->length + SHA256_LEN, MS_SYNC);
if (i)
fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
ctx->ss, ctx->id, i, strerror(errno));
#endif
if (i && 0)
fprintf(stderr, "SyncSign(%p %s) = %d %s\n",
ctx->ss, ctx->id, i, strerror(errno));
}
/*--------------------------------------------------------------------
......@@ -805,7 +805,6 @@ static void
smp_load_seg(struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
{
void *ptr;
uint64_t length;
struct smp_segment *ss;
struct smp_object *so;
struct objcore *oc;
......@@ -820,7 +819,6 @@ smp_load_seg(struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
if (smp_chk_sign(ctx))
return;
ptr = SIGN_DATA(ctx);
length = ctx->ss->length;
ss = ptr;
so = (void*)(sc->ptr + ss->objlist);
no = ss->nalloc;
......@@ -829,7 +827,7 @@ smp_load_seg(struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
continue;
HSH_Prealloc(sp);
oc = sp->wrk->nobjcore;
oc->flags |= OC_F_PERSISTENT;
oc->flags |= OC_F_PERSISTENT | OC_F_LRUDONTMOVE;
oc->flags &= ~OC_F_BUSY;
oc->obj = (void*)so;
oc->smp_seg = sg;
......@@ -837,7 +835,7 @@ smp_load_seg(struct sess *sp, const struct smp_sc *sc, struct smp_seg *sg)
memcpy(sp->wrk->nobjhead->digest, so->hash, SHA256_LEN);
(void)HSH_Insert(sp);
AZ(sp->wrk->nobjcore);
EXP_Inject(oc, sc->parent->lru, so->ttl);
EXP_Inject(oc, sg->lru, so->ttl);
sg->nalloc++;
}
WRK_SumStat(sp->wrk);
......@@ -865,6 +863,7 @@ smp_open_segs(struct smp_sc *sc, struct smp_signctx *ctx)
for(; length > 0; length -= sizeof *ss, ss ++) {
ALLOC_OBJ(sg, SMP_SEG_MAGIC);
AN(sg);
sg->lru = LRU_Alloc();
sg->offset = ss->offset;
sg->length = ss->length;
/* XXX: check that they are inside silo */
......@@ -1081,6 +1080,7 @@ smp_object(const struct sess *sp)
CHECK_OBJ_NOTNULL(sp->obj->objstore->stevedore, STEVEDORE_MAGIC);
CAST_OBJ_NOTNULL(sc, sp->obj->objstore->priv, SMP_SC_MAGIC);
sp->obj->objcore->flags |= OC_F_LRUDONTMOVE;
Lck_Lock(&sc->mtx);
sg = sc->cur_seg;
sc->objreserv += sizeof *so;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment