Commit 91da0143 authored by Nils Goroll's avatar Nils Goroll

implement PF_CFG_THREAD, add set(thread, ...)

there also was a stupid precedence error in PF_CFG_DEFAULT :(

docs are still WIP
parent e2b0546e
......@@ -67,8 +67,8 @@ appropriately.
.. _vmod_pesi.set:
VOID set(ENUM {serial} parameter, [BOOL bool])
----------------------------------------------
VOID set(ENUM {serial, thread} parameter, [BOOL bool])
------------------------------------------------------
To be called from ``vcl_deliver {}`` only.
......@@ -87,9 +87,50 @@ error.
processing for the current response body.
It is strongly recommended to _not_ use serial mode from ESI level 0
because the ESI level 0 thread is responsible for sending available
data to the client and thus should be running concurrently to other
parallel ESI threads.
because the ESI level 0 thread can send available data to the client
concurrently to other parallel ESI threads.
* ``thread``, requires *bool* argument
Whether we always request a new thread for includes, default is
``true``.
- ``false``
Only use a new thread if immediately available, process the
include in the same thread otherwise.
- ``true``
Request a new thread, potentially waiting for one to become
available.
For parallel ESI to work as efficiently as possible, it should
traverse the ESI tree *breadth first*, processing any ESI object
completely, with new threads scheduled for any includes
encountered. Completing processing of an ESI object allows for data
from the subtree (the ESI object and anything below) to be sent to
the client concurrently. As soon as ESI object processing is
complete, the respective thread will be returned to the thread pool
and become available for any other varnish task (except for the
request for esi_level 0, which will wait for completion of the
entire ESI request, sending data to the client in parallel).
With this setting to ``true`` (the default), this is what happens
always, but a thread for processing of includes may not be
immediately available and thus may have to wait if the thread pool
is not sufficiently large.
With this setting to ``false``, include processing happens in the
same thread as if ``serial`` mode had been activated, but only in
the case when there is no new thread available. While this may sound
like the more sensible option at first, we did not make this the
default for two reasons:
- except for ESI level 0, the current thread will become available
as soon as ESI processing has completed
*XXX slink to continue*
.. _vmod_pesi.version:
......
VMODENUM(serial, PF_CFG_SERIAL)
VMODENUM(thread, PF_CFG_THREAD)
#undef VMODENUM
......@@ -290,8 +290,8 @@ struct pesi_tree {
#define PF_CFG_DEFAULT \
( PF_CFG_THREAD \
| block_final ? PF_CFG_BLOCK_FINAL : 0 \
| front_push ? PF_CFG_FRONT_PUSH : 0 \
| (block_final ? PF_CFG_BLOCK_FINAL : 0) \
| (front_push ? PF_CFG_FRONT_PUSH : 0) \
)
#define PF_MASK_CFG \
......@@ -1245,14 +1245,39 @@ vped_include(struct req *preq, const char *src, const char *host,
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
if ((pesi->flags & PF_CFG_BLOCK_FINAL) == 0) {
if (pesi->flags & PF_CFG_BLOCK_FINAL) {
/* XXX because of T_FINAL blocking in the subreq thread, these
* threads *have* to run.
*
* ordering should ensure that we run top->bottom left-right in
* the ESI tree, so delivery should be able to let T_FINAL
* threads run, and anything but T_FINAL should terminate
* eventually, but if we spend all available threads on T_FINAL,
* we're doomed.
*
* -- find a way to reserve or check for T_FINAL
*
* for now, force the other flags:
* - serial mode off
* - thread mode on
*/
pesi->flags &= ~PF_CFG_SERIAL;
pesi->flags |= PF_CFG_THREAD;
}
if (pesi->flags & PF_CFG_SERIAL) {
ved_task(wrk, req);
return (0);
}
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
ved_task, &req, sizeof req) == 1)
if (pesi->flags & PF_CFG_THREAD) {
req->task.func = ved_task;
req->task.priv = req;
AZ(Pool_Task(wrk->pool, &req->task, TASK_QUEUE_RUSH));
return (1);
}
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ, ved_task, &req, sizeof req) == 1)
return (1);
pesi->no_thread++;
......@@ -1261,31 +1286,14 @@ vped_include(struct req *preq, const char *src, const char *host,
VXID(req->vsl->wid));
/*
* we can not use the self-rescheduling facility of
* Pool_Task_Arg because we cannot unschedule ourself
* we can not use the self-rescheduling facility of Pool_Task_Arg
* because we cannot unschedule ourself
*/
wrk->task.func = NULL;
wrk->task.priv = NULL;
WS_Release(wrk->aws, 0);
ved_task(wrk, req);
return (0);
}
AN(pesi->flags & PF_CFG_BLOCK_FINAL);
/* XXX because of T_FINAL blocking in the subreq thread,
* these threads *have* to run.
*
* ordering should ensure that we run top->bottom left-right in the ESI
* tree, so delivery should be able to let T_FINAL threads run, and
* anything but T_FINAL should terminate eventually, but if we spend all
* available threads on T_FINAL, we're doomed.
*
* -- find a way to reserve or check for T_FINAL
*/
req->task.func = ved_task;
req->task.priv = req;
AZ(Pool_Task(wrk->pool, &req->task, TASK_QUEUE_RUSH));
return (1);
}
/*--------------------------------------------------------------------*/
......
......@@ -54,7 +54,7 @@ missing error handling in varnish-cache. Notice that the VMOD has no
known way to check for this condition, so please address any blame
appropriately.
$Function VOID set(ENUM { serial } parameter, [BOOL bool])
$Function VOID set(ENUM { serial, thread } parameter, [BOOL bool])
To be called from ``vcl_deliver {}`` only.
......@@ -73,9 +73,50 @@ error.
processing for the current response body.
It is strongly recommended to _not_ use serial mode from ESI level 0
because the ESI level 0 thread is responsible for sending available
data to the client and thus should be running concurrently to other
parallel ESI threads.
because the ESI level 0 thread can send available data to the client
concurrently to other parallel ESI threads.
* ``thread``, requires *bool* argument
Whether we always request a new thread for includes, default is
``true``.
- ``false``
Only use a new thread if immediately available, process the
include in the same thread otherwise.
- ``true``
Request a new thread, potentially waiting for one to become
available.
For parallel ESI to work as efficiently as possible, it should
traverse the ESI tree *breadth first*, processing any ESI object
completely, with new threads scheduled for any includes
encountered. Completing processing of an ESI object allows for data
from the subtree (the ESI object and anything below) to be sent to
the client concurrently. As soon as ESI object processing is
complete, the respective thread will be returned to the thread pool
and become available for any other varnish task (except for the
request for esi_level 0, which will wait for completion of the
entire ESI request, sending data to the client in parallel).
With this setting to ``true`` (the default), this is what happens
always, but a thread for processing of includes may not be
immediately available and thus may have to wait if the thread pool
is not sufficiently large.
With this setting to ``false``, include processing happens in the
same thread as if ``serial`` mode had been activated, but only in
the case when there is no new thread available. While this may sound
like the more sensible option at first, we did not make this the
default for two reasons:
- except for ESI level 0, the current thread will become available
as soon as ESI processing has completed
*XXX slink to continue*
$Function STRING version()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment