Commit 31790959 authored by Nils Goroll's avatar Nils Goroll

add a switch to enable serial processing

parent 640cd75e
......@@ -65,6 +65,21 @@ missing error handling in varnish-cache. Notice that the VMOD has no
known way to check for this condition, so please address any blame
appropriately.
.. _vmod_pesi.serial:
VOID serial(BOOL serial=0)
--------------------------
To be called from vcl_deliver {} only, the default argument is
``false``.
If called with a ``true`` argument, no new threads will be started
from this request and all ESI subrequests at the next level only will
be processed by the current thread.
In other words, the setting only affects include processing for the
current response body.
.. _vmod_pesi.version:
STRING version()
......@@ -81,6 +96,19 @@ REQUIREMENTS
The VDP currently requires the Varnish master branch. ...
NOTES
=====
XXX @geoff pls reword
Because ReqAcct includes chunk headers, depending on the order of
events and whether or not we use (partial) sequential delivery (for
example, when no threads are available), ReqAcct adds n x 8 to the net
data size.
INSTALLATION
============
......
varnishtest "basic test for serial processing"
# XXX @geoff please add more
server s1 {
rxreq
expect req.http.esi0 == "foo"
txresp -body {
<html>
Before include
<esi:include src="/body" sr="foo"/>
After include
</html>
}
} -start
server s2 {
rxreq
expect req.url == "/body1"
expect req.http.esi0 != "foo"
txresp -body {
Included file
}
} -start
varnish v1 -arg "-p debug=+syncvsl" -vcl+backend {
import ${vmod_pesi};
sub vcl_recv {
if (req.esi_level > 0) {
set req.url = req.url + req.esi_level;
} else {
set req.http.esi0 = "foo";
}
}
sub vcl_backend_fetch {
if (bereq.url == "/") {
set bereq.backend = s1;
}
else {
set bereq.backend = s2;
}
}
sub vcl_backend_response {
if (bereq.url == "/") {
set beresp.do_esi = true;
}
}
sub vcl_deliver {
pesi.activate();
pesi.serial(true);
}
} -start
client c1 {
txreq -hdr "Host: foo"
rxresp
expect resp.status == 200
expect resp.bodylen == 75
expect resp.body == {
<html>
Before include
Included file
After include
</html>
}
delay .1
# test that there is no difference on miss/hit
txreq -hdr "Host: foo"
rxresp
expect resp.status == 200
expect resp.bodylen == 75
expect resp.body == {
<html>
Before include
Included file
After include
</html>
}
}
# Because ReqAcct includes chunk headers, depending on the order of
# events and whether or not we use (partial) sequential delivery (for
# example, when no threads are available), ReqAcct adds n x 8 to the
# net data size.
#
# in this test case we see either one or two chunk headers in addition
# to the end chunk.
client c1 -run
varnish v1 -expect esi_errors == 0
delay 1
logexpect l1 -v v1 -d 1 -g vxid -q "vxid == 1001" {
expect 0 1001 Begin "^req .* rxreq"
expect * = ReqAcct "^29 0 29 187 (104|96) (291|283)$"
expect 0 = End
} -run
logexpect l2 -v v1 -d 1 -g vxid -q "vxid == 1002" {
expect * 1002 Begin "^bereq "
expect * = End
} -run
logexpect l3 -v v1 -d 1 -g vxid -q "vxid == 1003" {
expect * 1003 Begin "^req .* esi"
expect * = ReqAcct "^0 0 0 0 18 18$"
expect 0 = End
} -run
logexpect l4 -v v1 -d 1 -g vxid -q "vxid == 1004" {
expect * 1004 Begin "^bereq "
expect * = End
} -run
logexpect l5 -v v1 -d 1 -g vxid -q "vxid == 1005" {
expect * 1005 Begin "^req .* rxreq"
# ReqAcct body counts include chunked overhead
# Header bytes is 5 larger than in l1 due to two item X-Varnish hdr
expect * = ReqAcct "^29 0 29 192 (104|96) (296|288)$"
expect 0 = End
} -run
logexpect l6 -v v1 -d 1 -g vxid -q "vxid == 1006" {
expect * 1006 Begin "^req .* esi"
expect * = ReqAcct "^0 0 0 0 18 18$"
expect 0 = End
} -run
## HTTP/2
varnish v1 -cliok "param.set feature +http2"
client c1 {
stream 1 {
txreq -hdr host foo
rxresp
expect resp.status == 200
expect resp.bodylen == 75
expect resp.body == {
<html>
Before include
Included file
After include
</html>
}
} -run
} -run
......@@ -303,6 +303,8 @@ fini_subreq(struct req *req, struct node *node);
static void
fini_data(struct req *req, struct node *node);
/* id for PRIV_TASK */
const void * const priv_task_id_serial = &priv_task_id_serial;
/* shared object globals */
static unsigned loadcnt = 0, warmcnt = 0;
......@@ -851,8 +853,8 @@ ved_task(struct worker *wrk, void *priv)
priv = *(void **)priv;
WS_Release(wrk->aws, 0);
}
CAST_OBJ_NOTNULL(req, priv, REQ_MAGIC);
VSLdbgv(req, "ved_task: req=%p", req);
assert(req->esi_level > 0);
......@@ -1025,6 +1027,21 @@ req_fini(struct req **reqp, struct worker *wrk)
Req_Release(req);
}
static int
want_serial(struct req *preq)
{
struct vmod_priv *vclserial;
struct vrt_ctx dummy_ctx[1];
INIT_OBJ(dummy_ctx, VRT_CTX_MAGIC);
dummy_ctx->req = preq;
dummy_ctx->ws = preq->ws;
vclserial = VRT_priv_task(dummy_ctx, priv_task_id_serial);
return (vclserial != NULL && vclserial->priv == (void *)1);
}
static int
vped_include(struct req *preq, const char *src, const char *host,
struct pesi *pesi, struct node *node)
......@@ -1151,15 +1168,23 @@ vped_include(struct req *preq, const char *src, const char *host,
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
if (use_thread_blocking == 0) {
if (0 && Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
if (want_serial(preq)) {
ved_task(wrk, req);
return (0);
}
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
ved_task, &req, sizeof req) == 1)
return (1);
/*
* XXX @geoff add counter
*
* we can not use the self-rescheduling facility of
* Pool_Task_Arg because we cannot unschedule ourself
*/
wrk->task.func = NULL;
wrk->task.priv = NULL;
WS_Release(wrk->aws, 0);
ved_task(wrk, req);
return (0);
}
......@@ -2825,6 +2850,34 @@ vmod_activate(VRT_CTX)
req->filter_list = filters;
}
VCL_VOID
vmod_serial(VRT_CTX, VCL_BOOL b)
{
struct vmod_priv *task;
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
if (ctx->method != VCL_MET_DELIVER) {
VRT_fail(ctx, "pesi.serial() may only be called "
"from vcl_deliver{}");
return;
}
task = VRT_priv_task(ctx, priv_task_id_serial);
if (task == NULL) {
VRT_fail(ctx, "no priv_task");
return;
}
/*
* for now, encode the boolean directly, we will probably need to
* WS_Alloc() something if we need more configuration
*/
task->priv = b ? (void *)1 : (void *)0;
}
/* Event function */
/*
......
......@@ -54,6 +54,18 @@ missing error handling in varnish-cache. Notice that the VMOD has no
known way to check for this condition, so please address any blame
appropriately.
$Function VOID serial(BOOL serial=0)
To be called from vcl_deliver {} only, the default argument is
``false``.
If called with a ``true`` argument, no new threads will be started
from this request and all ESI subrequests at the next level only will
be processed by the current thread.
In other words, the setting only affects include processing for the
current response body.
$Function STRING version()
Return the version string for this VDP.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment