Commit 12f5a29c authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Move HSH_Lookup() all the way to the V4 state, compensate backwards

to V3 in cnt_lookup()

Amazingly all the vtc's still pass.
parent ccec288d
......@@ -304,31 +304,10 @@ hsh_insert_busyobj(struct worker *wrk, struct objhead *oh)
}
/*---------------------------------------------------------------------
* XXX: future action:
*
* if (always_miss)
* return (insert_busy_obj())
* switch (Lookup()) {
* case HIT:
* return (object);
* case BUSY_ONLY:
* if (!ignore_body)
* return (WAIT)
* // fallthrough
* case MISS:
* return (insert_busy_obj())
* case EXPIRED_AND_BUSY:
* if (!ignore_body)
* return (expired_object)
* // fallthrough
* case EXPIRED:
* return (expired_object + insert_busy_obj())
* }
*
*/
enum lookup_e
HSH_Lookup(struct req *req, struct objcore **ocp, struct busyobj **bop,
HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp,
int wait_for_busy, int always_insert)
{
struct worker *wrk;
......@@ -338,11 +317,12 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct busyobj **bop,
struct object *o, *exp_o;
double exp_entered;
int busy_found;
enum lookup_e retval;
AN(ocp);
*ocp = NULL;
AN(bop);
*bop = NULL;
AN(bocp);
*bocp = NULL;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
wrk = req->wrk;
......@@ -374,7 +354,7 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct busyobj **bop,
if (always_insert) {
/* Insert new objcore in objecthead and release mutex */
*ocp = hsh_insert_busyobj(wrk, oh);
*bocp = hsh_insert_busyobj(wrk, oh);
/* NB: no deref of objhead, new object inherits reference */
Lck_Unlock(&oh->mtx);
return (HSH_MISS);
......@@ -418,8 +398,17 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct busyobj **bop,
if (EXP_Ttl(req, o) >= req->t_req) {
/* If still valid, use it */
break;
} else if (o->exp.entered > exp_entered) {
assert(oh->refcnt > 1);
assert(oc->objhead == oh);
oc->refcnt++;
Lck_Unlock(&oh->mtx);
assert(HSH_DerefObjHead(&wrk->stats, &oh));
if (!cache_param->obj_readonly && o->hits < INT_MAX)
o->hits++;
*ocp = oc;
return (HSH_HIT);
}
if (o->exp.entered > exp_entered) {
/* record the newest object */
exp_oc = oc;
exp_o = o;
......@@ -427,74 +416,66 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct busyobj **bop,
}
}
/*
* If we have seen a busy object or the backend is unhealthy, and
* we have an object in grace, use it, if req.grace is also
* satisified.
* XXX: VDI_Healty() call with oh->mtx is not so cool.
*/
if (exp_oc != NULL) {
AN(exp_o);
assert(oh->refcnt > 1);
assert(exp_oc->objhead == oh);
exp_oc->refcnt++;
AZ(req->objcore);
if (oc == NULL /* We found no live object */
&& exp_oc != NULL /* There is a grace candidate */
&& (busy_found /* Somebody else is already busy */
|| !VDI_Healthy(req->director, req))) {
/* Or it is impossible to fetch */
oc = exp_oc;
o = exp_o;
if (!busy_found) {
AZ(req->hash_ignore_busy);
*bocp = hsh_insert_busyobj(wrk, oh);
retval = HSH_EXPBUSY;
} else {
retval = HSH_EXP;
}
Lck_Unlock(&oh->mtx);
if (retval == HSH_EXP)
assert(HSH_DerefObjHead(&wrk->stats, &oh));
if (!cache_param->obj_readonly && exp_o->hits < INT_MAX)
exp_o->hits++;
*ocp = exp_oc;
return (retval);
}
if (oc != NULL) {
AN(o);
/* We found an object we like */
assert(oh->refcnt > 1);
assert(oc->objhead == oh);
oc->refcnt++;
if (!busy_found) {
/* Insert objcore in objecthead and release mutex */
*bocp = hsh_insert_busyobj(wrk, oh);
/* NB: no deref of objhead, new object inherits reference */
Lck_Unlock(&oh->mtx);
assert(HSH_DerefObjHead(&wrk->stats, &oh));
if (!cache_param->obj_readonly && o->hits < INT_MAX)
o->hits++;
*ocp = oc;
return (HSH_HIT);
return (HSH_MISS);
}
if (busy_found) {
/* There are one or more busy objects, wait for them */
if (wait_for_busy) {
CHECK_OBJ_NOTNULL(wrk->nwaitinglist,
WAITINGLIST_MAGIC);
if (oh->waitinglist == NULL) {
oh->waitinglist = wrk->nwaitinglist;
wrk->nwaitinglist = NULL;
}
VTAILQ_INSERT_TAIL(&oh->waitinglist->list,
req, w_list);
if (DO_DEBUG(DBG_WAITINGLIST))
VSLb(req->vsl, SLT_Debug,
"on waiting list <%p>", oh);
} else {
if (DO_DEBUG(DBG_WAITINGLIST))
VSLb(req->vsl, SLT_Debug,
"hit busy obj <%p>", oh);
}
/* There are one or more busy objects, wait for them */
wrk->stats.busy_sleep++;
SES_Charge(req->wrk, req);
/*
* The objhead reference transfers to the sess, we get it
* back when the sess comes off the waiting list and
* calls us again
*/
req->hash_objhead = oh;
Lck_Unlock(&oh->mtx);
return (HSH_BUSY);
AZ(req->hash_ignore_busy);
if (wait_for_busy) {
CHECK_OBJ_NOTNULL(wrk->nwaitinglist, WAITINGLIST_MAGIC);
if (oh->waitinglist == NULL) {
oh->waitinglist = wrk->nwaitinglist;
wrk->nwaitinglist = NULL;
}
VTAILQ_INSERT_TAIL(&oh->waitinglist->list,
req, w_list);
if (DO_DEBUG(DBG_WAITINGLIST))
VSLb(req->vsl, SLT_Debug, "on waiting list <%p>", oh);
} else {
if (DO_DEBUG(DBG_WAITINGLIST))
VSLb(req->vsl, SLT_Debug, "hit busy obj <%p>", oh);
}
/* Insert (precreated) objcore in objecthead and release mutex */
*ocp = hsh_insert_busyobj(wrk, oh);
/* NB: no deref of objhead, new object inherits reference */
wrk->stats.busy_sleep++;
SES_Charge(req->wrk, req);
/*
* The objhead reference transfers to the sess, we get it
* back when the sess comes off the waiting list and
* calls us again
*/
req->hash_objhead = oh;
Lck_Unlock(&oh->mtx);
return (HSH_MISS);
return (HSH_BUSY);
}
/*---------------------------------------------------------------------
......
......@@ -736,7 +736,7 @@ DOT lookup:yes -> pass [style=bold,color=red]
static enum req_fsm_nxt
cnt_lookup(struct worker *wrk, struct req *req)
{
struct objcore *oc;
struct objcore *oc, *boc;
struct object *o;
struct objhead *oh;
struct busyobj *bo;
......@@ -752,7 +752,7 @@ cnt_lookup(struct worker *wrk, struct req *req)
VRY_Prep(req);
AZ(req->objcore);
lr = HSH_Lookup(req, &oc, &bo,
lr = HSH_Lookup(req, &oc, &boc,
req->esi_level == 0 ? 1 : 0,
req->hash_always_miss ? 1 : 0
);
......@@ -767,6 +767,45 @@ cnt_lookup(struct worker *wrk, struct req *req)
}
AZ(req->objcore);
switch (lr) {
case HSH_EXP:
VSLb(req->vsl, SLT_Debug, "XXXX EXP\n");
AN(oc);
AZ(boc);
break;
case HSH_EXPBUSY:
VSLb(req->vsl, SLT_Debug, "XXXX EXPBUSY\n");
AN(oc);
AN(boc);
if (VDI_Healthy(req->director, req)) {
VSLb(req->vsl, SLT_Debug, "deref oc\n");
(void)HSH_Deref(&wrk->stats, oc, NULL);
oc = boc;
boc = NULL;
} else {
VSLb(req->vsl, SLT_Debug, "drop boc\n");
(void)HSH_Deref(&wrk->stats, boc, NULL);
boc = NULL;
}
break;
case HSH_MISS:
VSLb(req->vsl, SLT_Debug, "XXXX MISS\n");
AZ(oc);
AN(boc);
oc = boc;
boc = NULL;
AN(oc->flags & OC_F_BUSY);
break;
case HSH_HIT:
VSLb(req->vsl, SLT_Debug, "XXXX HIT\n");
AN(oc);
AZ(boc);
break;
default:
INCOMPL();
}
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
oh = oc->objhead;
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
......
......@@ -94,9 +94,20 @@ sub vcl_hash {
}
sub vcl_lookup {
/*
if (!obj) {
return (deliver);
}
if (obj.uncacheable) {
return (pass);
}
if (obj.ttl >= 0s) {
return (deliver);
}
if (obj.ttl + obj.grace > 0s) {
return (deliver_stale);
}
*/
return (deliver);
}
......@@ -104,6 +115,7 @@ sub vcl_miss {
return (fetch);
}
sub vcl_fetch {
return (fetch);
}
......
......@@ -63,7 +63,7 @@ enum lookup_e {
/* cache_hash.c */
void HSH_Cleanup(struct worker *w);
enum lookup_e HSH_Lookup(struct req *, struct objcore **, struct busyobj **,
enum lookup_e HSH_Lookup(struct req *, struct objcore **, struct objcore **,
int wait_for_busy, int always_insert);
// struct objcore *HSH_Lookup(struct req *, int wait_for_busy, int always_insert);
void HSH_Ref(struct objcore *o);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment