Commit 097e8f92 authored by Nils Goroll's avatar Nils Goroll

introduce an unsigned flags field, some cleanup and prep

this should preserve existing functionality, but is not complete with
regards to the configuration from vcl.
parent fb12f204
......@@ -61,16 +61,14 @@
VRT_fail((ctx), "vdp pesi failure: " fmt, __VA_ARGS__)
/*
* whether to use blocking threads for pass (private/hfm/fpm) objects
* whether to use blocking threads for final (private/hfm/fpm) objects
*
* if false, buffering will be used
* XXX make configurable per req/task
*/
static int use_thread_blocking = 0;
static int block_final = 0;
/*
* whether to push bytes early
*
* XXX make configurable per req/task
*/
static int front_push = 0;
......@@ -281,6 +279,26 @@ struct pesi_tree {
int task_finishing;
};
#define PF_HAS_TASK 1U
/* vcl-controlled flags */
#define PF_CFG_SERIAL (1U<<1)
#define PF_CFG_THREAD (1U<<2)
/* undocumented for now */
#define PF_CFG_BLOCK_FINAL (1U<<3)
#define PF_CFG_FRONT_PUSH (1U<<4)
#define PF_CFG_DEFAULT \
( PF_CFG_THREAD \
| block_final ? PF_CFG_BLOCK_FINAL : 0 \
| front_push ? PF_CFG_FRONT_PUSH : 0 \
)
#define PF_MASK_CFG \
( PF_CFG_SERIAL \
| PF_CFG_THREAD \
| PF_CFG_BLOCK_PRIVATE \
| PF_CFG_BLOCK_FINAL \
)
/*
* per request state
*
......@@ -301,7 +319,7 @@ struct pecx {
struct pesi {
unsigned magic;
#define PESI_MAGIC 0xa6ba54a0
unsigned has_task:1;
unsigned flags;
struct pesi_tree *pesi_tree;
struct worker *wrk;
......@@ -397,7 +415,7 @@ pesi_new(struct ws *ws, struct pesi_tree *pesi_tree)
INIT_OBJ(pesi, PESI_MAGIC);
pesi->pecx->magic = PECX_MAGIC;
pesi->pesi_tree = pesi_tree;
pesi->has_task = 1;
pesi->flags = PF_HAS_TASK | PF_CFG_DEFAULT;
Lck_Lock(&pesi_tree->task_lock);
VTAILQ_INSERT_TAIL(&pesi_tree->task_head, pesi, list);
......@@ -446,7 +464,7 @@ pesi_destroy(struct pesi **pesip)
assert(pesi_tree->task_running >= 0);
if (pesi->has_task == 1)
if (pesi->flags & PF_HAS_TASK)
pesi_tree->task_finishing++;
Lck_Unlock(&pesi_tree->task_lock);
......@@ -464,8 +482,8 @@ task_fini(struct pesi_tree *pesi_tree, struct pesi *pesi)
pesi_tree->task_finishing--;
}
else {
AN(pesi->has_task);
pesi->has_task = 0;
AN(pesi->flags & PF_HAS_TASK);
pesi->flags &= ~PF_HAS_TASK;
}
pesi_tree->task_running--;
if (pesi_tree->task_running == 0) {
......@@ -1113,7 +1131,7 @@ want_serial(struct req *preq)
vclserial = VRT_priv_task(dummy_ctx, priv_task_id_serial);
return (vclserial != NULL && vclserial->priv == (void *)1);
return (vclserial != NULL && vclserial->priv == (void *)PF_CFG_SERIAL);
}
static int
......@@ -1237,7 +1255,7 @@ vped_include(struct req *preq, const char *src, const char *host,
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
if (use_thread_blocking == 0) {
if ((pesi->flags & PF_CFG_BLOCK_FINAL) == 0) {
if (want_serial(preq)) {
ved_task(wrk, req);
return (0);
......@@ -1259,7 +1277,7 @@ vped_include(struct req *preq, const char *src, const char *host,
return (0);
}
AN(use_thread_blocking);
AN(pesi->flags & PF_CFG_BLOCK_FINAL);
/* XXX because of T_FINAL blocking in the subreq thread,
* these threads *have* to run.
*
......@@ -2050,7 +2068,7 @@ pesi_buf_bytes(struct req *req, enum vdp_action act, void **priv,
* check unlocked if an attempt to unpend makes any sense at all
*/
AN(tree->front);
if (front_push &&
if (pesi->flags & PF_CFG_FRONT_PUSH &&
tree->unpend_owner != NULL &&
(tree->front->parent == NULL ||
tree->front == parent ||
......@@ -2709,7 +2727,7 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
}
}
if (is_final && !is_esi && !use_thread_blocking) {
if (is_final && !is_esi && (pesi->flags & PF_CFG_BLOCK_FINAL) == 0) {
VSLdbg(req, "vped_deliver: T_NEXUS buffering no ESI");
VDP_Push(req, &VDP_pesi_buf, pesi);
......@@ -2973,12 +2991,9 @@ vmod_serial(VRT_CTX, VCL_BOOL b)
return;
}
/*
* for now, encode the boolean directly, we will probably need to
* WS_Alloc() something if we need more configuration
*/
task->priv = b ? (void *)1 : (void *)0;
/* for now, just put the flags in the priv */
assert(sizeof task->priv >= sizeof(unsigned));
task->priv = b ? (void *)PF_CFG_SERIAL : (void *)0;
}
/* Event function */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment