Commit f1254975 authored by Nils Goroll's avatar Nils Goroll

we cannot run ved_tasks in a parent's thread

... because of T_FINAL.

Because the parent's thread (except for the root) will terminate
eventually, this is also all good, except for the case that we spend all
available threads on T_FINAL.

Leave the hard problems for later, *sigh*
parent 6632dc5d
......@@ -822,8 +822,7 @@ ved_task(struct worker *wrk, void *priv)
enum req_fsm_nxt s;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
req = *((struct req **)priv);
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CAST_OBJ_NOTNULL(req, priv, REQ_MAGIC);
VSLdbgv(req, "ved_task: req=%p", req);
assert(req->esi_level > 0);
......@@ -840,7 +839,6 @@ ved_task(struct worker *wrk, void *priv)
CHECK_OBJ_NOTNULL(node, NODE_MAGIC);
CHECK_OBJ_NOTNULL(pesi_tree->tree, BYTES_TREE_MAGIC);
WS_Release(wrk->aws, sizeof(req));
THR_SetRequest(req);
VSLb_ts_req(req, "Start", W_TIM_real(wrk));
......@@ -944,7 +942,6 @@ vped_include(struct req *preq, const char *src, const char *host,
struct req *req;
struct bytes_tree *tree;
struct pesi *pesi2;
int parallel = 1;
VSLdbg(preq, "ved_include: enter");
CHECK_OBJ_NOTNULL(preq, REQ_MAGIC);
......@@ -1058,23 +1055,26 @@ vped_include(struct req *preq, const char *src, const char *host,
assert(node->type == T_NEXUS);
node->req = req;
// XXX still needed?
preq->wrk->task.func = NULL;
if (wrk->aws->r != NULL)
WS_ReleaseP(wrk->aws, wrk->aws->r);
VSLdbgv(preq, "ved_include: attempt new thread req=%p", req);
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ, ved_task, &req, sizeof(req)))
VSLdbgv(preq, "Parallel ESI request started on a new thread "
"xid=%d", VXID(req->vsl->wid));
else {
parallel = 0;
VSLb(preq->vsl, SLT_VCL_Error, "No idle threads available for "
"parallel ESI request xid=%d, continued in the same "
"thread", VXID(req->vsl->wid));
ved_task(wrk, &req);
VSLdbgv(preq, "ved_include: done in same thread req=%p", req);
}
VSLdbgv(preq, "ved_include: exit parallel=%d", parallel);
return (parallel);
/* XXX because of T_FINAL blocking in the subreq thread,
* these threads *have* to run.
*
* ordering should ensure that we run top->bottom left-right in the ESI
* tree, so delivery should be able to let T_FINAL threads run, and
* anything but T_FINAL should terminate eventually, but if we spend all
* available threads on T_FINAL, we're doomed.
*
* -- find a way to reserve or check for T_FINAL
*/
req->task.func = ved_task;
req->task.priv = req;
AZ(Pool_Task(wrk->pool, &req->task, TASK_QUEUE_RUSH));
return (1);
}
/*--------------------------------------------------------------------*/
......@@ -2233,18 +2233,7 @@ vdp_pesi_bytes(struct req *req, enum vdp_action act, void **priv,
VSLdbgv(req, "ved_vdp: vped_include()=%d",
parallel);
/* XXX error if parallel < 0 */
#if 0
// slink: shouldn't ved_task work irrespective
// of being in a different thread?
if (!parallel) {
VSLdbgv(req,
"ved_vdp: pushing VDP tree=%p",
tree);
XXXAZ(push_vdps_esi(req));
}
#else
AN(parallel);
#endif
assert(parallel == 1);
Debug("INCL [%s][%s] END\n", q, pecx->p);
pecx->p = r + 1;
break;
......@@ -2322,7 +2311,8 @@ vdp_pesi_bytes(struct req *req, enum vdp_action act, void **priv,
#else
(void)(Lck_CondWait(&tree->cond,
&tree->tree_lock,
VTIM_real() + 0.1));
VTIM_real() + 2));
AZ(errno);
#endif
bytes_unpend(req, tree);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment