Commit e4f08065 authored by Andrew Wiik's avatar Andrew Wiik Committed by Reza Naghibi

Make waitinglist rushes propagate on streaming delivery

This makes waitinglist rushes happen also in HSH_Lookup when encountering
cache hits. This helps to get the requests on the waitinglist restarted
when doing streaming delivery. Fixes #2977. Backported from @mbgrydeland
master commits.

Based on: 37368496
parent 42b138ad
......@@ -658,6 +658,7 @@ vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo)
assert(bo->fetch_objcore->boc->state == BOS_STREAM);
else {
assert(bo->fetch_objcore->boc->state == BOS_REQ_DONE);
ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM);
HSH_Unbusy(wrk, bo->fetch_objcore);
}
......@@ -839,6 +840,7 @@ vbf_stp_error(struct worker *wrk, struct busyobj *bo)
}
AZ(ObjSetU64(wrk, bo->fetch_objcore, OA_LEN, o));
VSB_destroy(&synth_body);
ObjSetState(wrk, bo->fetch_objcore, BOS_PREP_STREAM);
HSH_Unbusy(wrk, bo->fetch_objcore);
ObjSetState(wrk, bo->fetch_objcore, BOS_FINISHED);
return (F_STP_DONE);
......@@ -1027,11 +1029,10 @@ VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc,
(void)VRB_Ignore(req);
} else {
ObjWaitState(oc, BOS_STREAM);
if (oc->boc->state == BOS_FAILED) {
if (oc->boc->state == BOS_FAILED)
AN((oc->flags & OC_F_FAILED));
} else {
else
AZ(oc->flags & OC_F_BUSY);
}
}
}
AZ(bo);
......
......@@ -77,7 +77,8 @@ static void hsh_rush1(const struct worker *, struct objhead *,
struct rush *, int);
static void hsh_rush2(struct worker *, struct rush *);
static int hsh_deref_objhead(struct worker *wrk, struct objhead **poh);
static int hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh);
static int hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh,
int);
/*---------------------------------------------------------------------*/
......@@ -408,11 +409,11 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp)
continue;
CHECK_OBJ_ORNULL(oc->boc, BOC_MAGIC);
if (oc->boc != NULL && oc->boc->state < BOS_STREAM) {
if (oc->flags & OC_F_BUSY) {
if (req->hash_ignore_busy)
continue;
if (oc->boc->vary != NULL &&
if (oc->boc && oc->boc->vary != NULL &&
!VRY_Match(req, oc->boc->vary))
continue;
......@@ -455,7 +456,7 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp)
if (oc != NULL && oc->flags & OC_F_HFP) {
xid = ObjGetXID(wrk, oc);
dttl = EXP_Dttl(req, oc);
AN(hsh_deref_objhead_unlock(wrk, &oh));
AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY));
wrk->stats->cache_hitpass++;
VSLb(req->vsl, SLT_HitPass, "%u %.6f", xid, dttl);
return (HSH_HITPASS);
......@@ -475,7 +476,7 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp)
}
if (oc->hits < LONG_MAX)
oc->hits++;
AN(hsh_deref_objhead_unlock(wrk, &oh));
AN(hsh_deref_objhead_unlock(wrk, &oh, HSH_RUSH_POLICY));
return (HSH_HIT);
}
......@@ -518,7 +519,7 @@ HSH_Lookup(struct req *req, struct objcore **ocp, struct objcore **bocp)
*ocp = exp_oc;
if (exp_oc->hits < LONG_MAX)
exp_oc->hits++;
AN(hsh_deref_objhead_unlock(wrk, &oh));
AN(hsh_deref_objhead_unlock(wrk, &oh, 0));
return (HSH_GRACE);
}
......@@ -959,9 +960,11 @@ HSH_DerefObjCore(struct worker *wrk, struct objcore **ocp, int rushmax)
}
static int
hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh)
hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh, int max)
{
struct objhead *oh;
struct rush rush;
int r;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC);
......@@ -976,11 +979,19 @@ hsh_deref_objhead_unlock(struct worker *wrk, struct objhead **poh)
return(1);
}
INIT_OBJ(&rush, RUSH_MAGIC);
if (!VTAILQ_EMPTY(&oh->waitinglist)) {
assert(oh->refcnt > 1);
hsh_rush1(wrk, oh, &rush, max);
}
if (oh->refcnt == 1)
assert(VTAILQ_EMPTY(&oh->waitinglist));
assert(oh->refcnt > 0);
return (hash->deref(wrk, oh));
r = hash->deref(wrk, oh); /* Unlocks oh->mtx */
hsh_rush2(wrk, &rush);
return (r);
}
static int
......@@ -992,7 +1003,7 @@ hsh_deref_objhead(struct worker *wrk, struct objhead **poh)
TAKE_OBJ_NOTNULL(oh, poh, OBJHEAD_MAGIC);
Lck_Lock(&oh->mtx);
return (hsh_deref_objhead_unlock(wrk, &oh));
return (hsh_deref_objhead_unlock(wrk, &oh, 0));
}
void
......
......@@ -354,6 +354,8 @@ cnt_transmit(struct worker *wrk, struct req *req)
/* Grab a ref to the bo if there is one (=streaming) */
boc = HSH_RefBoc(req->objcore);
if (boc && boc->state < BOS_STREAM)
ObjWaitState(req->objcore, BOS_STREAM);
clval = http_GetContentLength(req->resp);
/* RFC 7230, 3.3.3 */
status = http_GetStatus(req->resp);
......
varnishtest "Streaming delivery and waitinglist rushing"
# Barrier to make sure that c1 connects to s1
barrier b1 cond 2
# Barrier to make sure that all requests are on waitinglist before
# HSH_Unbusy is called
barrier b2 cond 2
# Barrier to control that all requests start streaming before the object
# finishes. This tests that waitinglists are rushed before
# HSH_DerefObjCore().
barrier b3 sock 4
server s1 {
rxreq
barrier b1 sync
barrier b2 sync
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
import vtc;
sub vcl_hit {
vtc.barrier_sync("${b3_sock}");
}
} -start
client c1 {
txreq
rxresp
} -start
barrier b1 sync
client c2 {
txreq
rxresp
} -start
client c3 {
txreq
rxresp
} -start
client c4 {
txreq
rxresp
} -start
# Wait until c2-c4 are on the waitinglist
delay 1
varnish v1 -expect busy_sleep == 3
# Open up the response headers from s1, and as a result HSH_Unbusy
barrier b2 sync
client c1 -wait
client c2 -wait
client c3 -wait
client c4 -wait
varnishtest "Hit-for-pass and waitinglist rushing"
# Barrier to make sure that s1 is run first
barrier b1 cond 2
# Barrier to make sure that all requests are on waitinglist before
# HSH_Unbusy is called
barrier b2 cond 2
# Barrier to control that all backends are reached before any request
# finishes. This tests that waitinglists are rushed before
# HSH_DerefObjCore().
barrier b3 cond 6
server s1 {
rxreq
barrier b1 sync
barrier b2 sync
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s2 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s3 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s4 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s5 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s6 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=30" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
sub vcl_backend_fetch {
if (bereq.http.client == "1") {
set bereq.backend = s1;
} else if (bereq.http.client == "2") {
set bereq.backend = s2;
} else if (bereq.http.client == "3") {
set bereq.backend = s3;
} else if (bereq.http.client == "4") {
set bereq.backend = s4;
} else if (bereq.http.client == "5") {
set bereq.backend = s5;
} else if (bereq.http.client == "6") {
set bereq.backend = s6;
}
}
sub vcl_backend_response {
return (pass(1m));
}
} -start
client c1 {
txreq -url /hfp -hdr "Client: 1"
rxresp
} -start
# This makes sure that c1->s1 is done first
barrier b1 sync
client c2 {
txreq -url /hfp -hdr "Client: 2"
rxresp
} -start
client c3 {
txreq -url /hfp -hdr "Client: 3"
rxresp
} -start
client c4 {
txreq -url /hfp -hdr "Client: 4"
rxresp
} -start
client c5 {
txreq -url /hfp -hdr "Client: 5"
rxresp
} -start
client c6 {
txreq -url /hfp -hdr "Client: 6"
rxresp
} -start
# Wait until c2-c6 are on the waitinglist
delay 1
varnish v1 -expect busy_sleep == 5
# Open up the response headers from s1, and as a result HSH_Unbusy
barrier b2 sync
client c1 -wait
client c2 -wait
client c3 -wait
client c4 -wait
client c5 -wait
client c6 -wait
varnishtest "Hit-for-miss and waitinglist rushing"
# Barrier to make sure that s1 is run first
barrier b1 cond 2
# Barrier to make sure that all requests are on waitinglist before
# HSH_Unbusy is called
barrier b2 cond 2
# Barrier to control that all backends are reached before any request
# finishes. This tests that waitinglists are rushed before
# HSH_DerefObjCore().
barrier b3 cond 6
server s1 {
rxreq
barrier b1 sync
barrier b2 sync
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s2 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s3 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s4 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s5 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
server s6 {
rxreq
txresp -nolen -hdr "Transfer-Encoding: chunked"
chunkedlen 10
barrier b3 sync
chunkedlen 10
chunkedlen 0
} -start
varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=30" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
sub vcl_backend_fetch {
if (bereq.http.client == "1") {
set bereq.backend = s1;
} else if (bereq.http.client == "2") {
set bereq.backend = s2;
} else if (bereq.http.client == "3") {
set bereq.backend = s3;
} else if (bereq.http.client == "4") {
set bereq.backend = s4;
} else if (bereq.http.client == "5") {
set bereq.backend = s5;
} else if (bereq.http.client == "6") {
set bereq.backend = s6;
}
}
sub vcl_backend_response {
set beresp.uncacheable = true;
}
} -start
client c1 {
txreq -url /hfm -hdr "Client: 1"
rxresp
} -start
# This makes sure that c1->s1 is done first
barrier b1 sync
client c2 {
txreq -url /hfm -hdr "Client: 2"
rxresp
} -start
client c3 {
txreq -url /hfm -hdr "Client: 3"
rxresp
} -start
client c4 {
txreq -url /hfm -hdr "Client: 4"
rxresp
} -start
client c5 {
txreq -url /hfm -hdr "Client: 5"
rxresp
} -start
client c6 {
txreq -url /hfm -hdr "Client: 6"
rxresp
} -start
# Wait until c2-c6 are on the waitinglist
delay 1
varnish v1 -expect busy_sleep == 5
# Open up the response headers from s1, and as a result HSH_Unbusy
barrier b2 sync
client c1 -wait
client c2 -wait
client c3 -wait
client c4 -wait
client c5 -wait
client c6 -wait
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment