Commit 91da0143 authored by Nils Goroll's avatar Nils Goroll

implement PF_CFG_THREAD, add set(thread, ...)

there also was a stupid precedence error in PF_CFG_DEFAULT :(

docs are still WIP
parent e2b0546e
...@@ -67,8 +67,8 @@ appropriately. ...@@ -67,8 +67,8 @@ appropriately.
.. _vmod_pesi.set: .. _vmod_pesi.set:
VOID set(ENUM {serial} parameter, [BOOL bool]) VOID set(ENUM {serial, thread} parameter, [BOOL bool])
---------------------------------------------- ------------------------------------------------------
To be called from ``vcl_deliver {}`` only. To be called from ``vcl_deliver {}`` only.
...@@ -87,9 +87,50 @@ error. ...@@ -87,9 +87,50 @@ error.
processing for the current response body. processing for the current response body.
It is strongly recommended to _not_ use serial mode from ESI level 0 It is strongly recommended to _not_ use serial mode from ESI level 0
because the ESI level 0 thread is responsible for sending available because the ESI level 0 thread can send available data to the client
data to the client and thus should be running concurrently to other concurrently to other parallel ESI threads.
parallel ESI threads.
* ``thread``, requires *bool* argument
Whether we always request a new thread for includes, default is
``true``.
- ``false``
Only use a new thread if immediately available, process the
include in the same thread otherwise.
- ``true``
Request a new thread, potentially waiting for one to become
available.
For parallel ESI to work as efficiently as possible, it should
traverse the ESI tree *breadth first*, processing any ESI object
completely, with new threads scheduled for any includes
encountered. Completing processing of an ESI object allows for data
from the subtree (the ESI object and anything below) to be sent to
the client concurrently. As soon as ESI object processing is
complete, the respective thread will be returned to the thread pool
and become available for any other varnish task (except for the
request for esi_level 0, which will wait for completion of the
entire ESI request, sending data to the client in parallel).
With this setting to ``true`` (the default), this is what happens
always, but a thread for processing of includes may not be
immediately available and thus may have to wait if the thread pool
is not sufficiently large.
With this setting to ``false``, include processing happens in the
same thread as if ``serial`` mode had been activated, but only in
the case when there is no new thread available. While this may sound
like the more sensible option at first, we did not make this the
default for two reasons:
- except for ESI level 0, the current thread will become available
as soon as ESI processing has completed
*XXX slink to continue*
.. _vmod_pesi.version: .. _vmod_pesi.version:
......
VMODENUM(serial, PF_CFG_SERIAL) VMODENUM(serial, PF_CFG_SERIAL)
VMODENUM(thread, PF_CFG_THREAD)
#undef VMODENUM #undef VMODENUM
...@@ -288,10 +288,10 @@ struct pesi_tree { ...@@ -288,10 +288,10 @@ struct pesi_tree {
#define PF_CFG_BLOCK_FINAL (1U<<3) #define PF_CFG_BLOCK_FINAL (1U<<3)
#define PF_CFG_FRONT_PUSH (1U<<4) #define PF_CFG_FRONT_PUSH (1U<<4)
#define PF_CFG_DEFAULT \ #define PF_CFG_DEFAULT \
( PF_CFG_THREAD \ ( PF_CFG_THREAD \
| block_final ? PF_CFG_BLOCK_FINAL : 0 \ | (block_final ? PF_CFG_BLOCK_FINAL : 0) \
| front_push ? PF_CFG_FRONT_PUSH : 0 \ | (front_push ? PF_CFG_FRONT_PUSH : 0) \
) )
#define PF_MASK_CFG \ #define PF_MASK_CFG \
...@@ -1245,47 +1245,55 @@ vped_include(struct req *preq, const char *src, const char *host, ...@@ -1245,47 +1245,55 @@ vped_include(struct req *preq, const char *src, const char *host,
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req); VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
if ((pesi->flags & PF_CFG_BLOCK_FINAL) == 0) { if (pesi->flags & PF_CFG_BLOCK_FINAL) {
if (pesi->flags & PF_CFG_SERIAL) { /* XXX because of T_FINAL blocking in the subreq thread, these
ved_task(wrk, req); * threads *have* to run.
return (0); *
} * ordering should ensure that we run top->bottom left-right in
* the ESI tree, so delivery should be able to let T_FINAL
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ, * threads run, and anything but T_FINAL should terminate
ved_task, &req, sizeof req) == 1) * eventually, but if we spend all available threads on T_FINAL,
return (1); * we're doomed.
*
pesi->no_thread++; * -- find a way to reserve or check for T_FINAL
VSLb(preq->vsl, SLT_Error, "vdp pesi: No thread available " *
"for ESI subrequest %u, continuing in serial", * for now, force the other flags:
VXID(req->vsl->wid)); * - serial mode off
* - thread mode on
/*
* we can not use the self-rescheduling facility of
* Pool_Task_Arg because we cannot unschedule ourself
*/ */
wrk->task.func = NULL; pesi->flags &= ~PF_CFG_SERIAL;
wrk->task.priv = NULL; pesi->flags |= PF_CFG_THREAD;
WS_Release(wrk->aws, 0); }
if (pesi->flags & PF_CFG_SERIAL) {
ved_task(wrk, req); ved_task(wrk, req);
return (0); return (0);
} }
AN(pesi->flags & PF_CFG_BLOCK_FINAL); if (pesi->flags & PF_CFG_THREAD) {
/* XXX because of T_FINAL blocking in the subreq thread, req->task.func = ved_task;
* these threads *have* to run. req->task.priv = req;
* AZ(Pool_Task(wrk->pool, &req->task, TASK_QUEUE_RUSH));
* ordering should ensure that we run top->bottom left-right in the ESI return (1);
* tree, so delivery should be able to let T_FINAL threads run, and }
* anything but T_FINAL should terminate eventually, but if we spend all
* available threads on T_FINAL, we're doomed. if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ, ved_task, &req, sizeof req) == 1)
* return (1);
* -- find a way to reserve or check for T_FINAL
pesi->no_thread++;
VSLb(preq->vsl, SLT_Error, "vdp pesi: No thread available "
"for ESI subrequest %u, continuing in serial",
VXID(req->vsl->wid));
/*
* we can not use the self-rescheduling facility of Pool_Task_Arg
* because we cannot unschedule ourself
*/ */
req->task.func = ved_task; wrk->task.func = NULL;
req->task.priv = req; wrk->task.priv = NULL;
AZ(Pool_Task(wrk->pool, &req->task, TASK_QUEUE_RUSH)); WS_Release(wrk->aws, 0);
return (1); ved_task(wrk, req);
return (0);
} }
/*--------------------------------------------------------------------*/ /*--------------------------------------------------------------------*/
......
...@@ -54,7 +54,7 @@ missing error handling in varnish-cache. Notice that the VMOD has no ...@@ -54,7 +54,7 @@ missing error handling in varnish-cache. Notice that the VMOD has no
known way to check for this condition, so please address any blame known way to check for this condition, so please address any blame
appropriately. appropriately.
$Function VOID set(ENUM { serial } parameter, [BOOL bool]) $Function VOID set(ENUM { serial, thread } parameter, [BOOL bool])
To be called from ``vcl_deliver {}`` only. To be called from ``vcl_deliver {}`` only.
...@@ -73,9 +73,50 @@ error. ...@@ -73,9 +73,50 @@ error.
processing for the current response body. processing for the current response body.
It is strongly recommended to _not_ use serial mode from ESI level 0 It is strongly recommended to _not_ use serial mode from ESI level 0
because the ESI level 0 thread is responsible for sending available because the ESI level 0 thread can send available data to the client
data to the client and thus should be running concurrently to other concurrently to other parallel ESI threads.
parallel ESI threads.
* ``thread``, requires *bool* argument
Whether we always request a new thread for includes, default is
``true``.
- ``false``
Only use a new thread if immediately available, process the
include in the same thread otherwise.
- ``true``
Request a new thread, potentially waiting for one to become
available.
For parallel ESI to work as efficiently as possible, it should
traverse the ESI tree *breadth first*, processing any ESI object
completely, with new threads scheduled for any includes
encountered. Completing processing of an ESI object allows for data
from the subtree (the ESI object and anything below) to be sent to
the client concurrently. As soon as ESI object processing is
complete, the respective thread will be returned to the thread pool
and become available for any other varnish task (except for the
request for esi_level 0, which will wait for completion of the
entire ESI request, sending data to the client in parallel).
With this setting to ``true`` (the default), this is what happens
always, but a thread for processing of includes may not be
immediately available and thus may have to wait if the thread pool
is not sufficiently large.
With this setting to ``false``, include processing happens in the
same thread as if ``serial`` mode had been activated, but only in
the case when there is no new thread available. While this may sound
like the more sensible option at first, we did not make this the
default for two reasons:
- except for ESI level 0, the current thread will become available
as soon as ESI processing has completed
*XXX slink to continue*
$Function STRING version() $Function STRING version()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment