Commit 699dd4bc authored by Nils Goroll's avatar Nils Goroll

variation in ReqAcct understood

parent 8880e2a3
......@@ -85,14 +85,21 @@ client c1 {
}
}
# Because ReqAcct includes chunk headers, depending on the order of
# events and whether or not we use (partial) sequential delivery (for
# example, when no threads are available), ReqAcct adds n x 8 to the
# net data size.
#
# in this test case we see either one or two chunk headers in addition
# to the end chunk.
client c1 -run
varnish v1 -expect esi_errors == 0
delay 1
logexpect l1 -v v1 -d 1 -g vxid -q "vxid == 1001" {
expect 0 1001 Begin "^req .* rxreq"
# ReqAcct body counts include chunked overhead
expect * = ReqAcct "^29 0 29 187 104 291$"
expect * = ReqAcct "^29 0 29 187 (104|96) (291|283)$"
expect 0 = End
} -run
......@@ -112,12 +119,6 @@ logexpect l4 -v v1 -d 1 -g vxid -q "vxid == 1004" {
expect * = End
} -run
# XXX
# sometimes accounting is missing 8 bytes - we would expect:
# ReqAcct "^29 0 29 192 104 296$
#
# because, when this happens, the subreq accounting and the response are
# correct, I assume this to be an issue in varnish-cache
logexpect l5 -v v1 -d 1 -g vxid -q "vxid == 1005" {
expect * 1005 Begin "^req .* rxreq"
......
......@@ -848,7 +848,6 @@ ved_task(struct worker *wrk, void *priv)
* the direct call, otherwise via Pool_Task()
*/
if (wrk->aws->r != NULL) {
assert(priv == wrk->aws->f);
priv = *(void **)priv;
WS_Release(wrk->aws, 0);
}
......@@ -1151,9 +1150,8 @@ vped_include(struct req *preq, const char *src, const char *host,
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
#if WIP
if (use_thread_blocking == 0) {
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
if (0 && Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
ved_task, &req, sizeof req) == 1)
return (1);
/*
......@@ -1167,7 +1165,6 @@ vped_include(struct req *preq, const char *src, const char *host,
}
AN(use_thread_blocking);
#endif
/* XXX because of T_FINAL blocking in the subreq thread,
* these threads *have* to run.
*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment