Commit 178fcb8e authored by Nils Goroll's avatar Nils Goroll

WIP push bytes up one parent only

in for nexus nodes (= ESI subrequests), we fini the pesi vdp (and,
conceptually, the buf also, but that requires details still) when we are
done with parsing, such that any other vdps are still in place.

Also we push a vdp which pushes bytes up one parent like ved_ved in
varnish-cache.

So we should now have a VDP chain for each subreq basically matching
that of varnish-cache
parent 33a3d782
......@@ -291,6 +291,39 @@ static struct vsc_seg *vsc_seg = NULL;
static struct mempool *mempool = NULL;
static unsigned node_alloc_sz;
/* ------------------------------------------------------------
* basically ved_ved / ved_bytes without the ecx
* (may be an intermediate solution)
*/
static int v_matchproto_(vdp_fini_f)
vped_toparent_fini(struct req *req, void **priv)
{
(void)req;
*priv = NULL;
return (0);
}
static int v_matchproto_(vdp_bytes_f)
vped_toparent_bytes(struct req *req, enum vdp_action act, void **priv,
const void *ptr, ssize_t len)
{
struct req *preq;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CAST_OBJ_NOTNULL(preq, *priv, REQ_MAGIC);
req->acct.resp_bodybytes += len;
return (VDP_bytes(preq, act, ptr, len));
}
static const struct vdp vped_toparent = {
.name = "V2P",
.bytes = vped_toparent_bytes,
.fini = vped_toparent_fini,
};
/* ------------------------------------------------------------
* pesi tasks
*
......@@ -642,13 +675,22 @@ set_delivered(struct bytes_tree *tree, struct node *node)
node->state == ST_CLOSED ||
node->state == ST_UNPENDING);
if (node->type == T_NEXUS &&
node->nexus.oc != NULL) {
assert(node->state == ST_CLOSED);
if (node->type == T_NEXUS) {
req = tree->root->req;
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC);
(void) HSH_DerefObjCore(req->wrk, &node->nexus.oc, 0);
if (node->nexus.oc != NULL) {
assert(node->state == ST_CLOSED);
(void) HSH_DerefObjCore(req->wrk, &node->nexus.oc, 0);
}
CHECK_OBJ_NOTNULL(node->req, REQ_MAGIC);
if (node->parent != NULL) {
VDP_close(node->req);
req_fini(node->req, req->wrk);
node->req = NULL;
}
}
node->state = ST_DELIVERED;
......@@ -846,10 +888,14 @@ ved_task(struct worker *wrk, void *priv)
AZ(pthread_cond_signal(&node->subreq.cond));
Lck_Unlock(&pesi_tree->tree->tree_lock);
break;
default:
req_fini(req, wrk);
case T_NEXUS:
// moved to set_delivered
// req_fini(req, wrk);
pesi = NULL;
task_fini(pesi_tree, req->transport_priv);
break;
default:
INCOMPL();
}
wrk->task.func = NULL;
......@@ -987,6 +1033,7 @@ vped_include(struct req *preq, const char *src, const char *host,
req->ws_req = WS_Snapshot(req->ws);
assert(node->type == T_NEXUS);
node->req = req;
preq->wrk->task.func = NULL;
......@@ -1456,6 +1503,8 @@ push_data(struct req *req, struct bytes_tree *tree,
}
/* pesi_buf is bypassed while we run. */
// XXX TODO return (VDP_bytes(node->req, act, p, node->data.len));
// does not work yet due to CRC
return (VDP_bytes(req, act, p, node->data.len));
}
......@@ -2033,8 +2082,11 @@ vdp_pesi_fini(struct req *req, void **priv)
node = pecx->node;
CHECK_OBJ_NOTNULL(node, NODE_MAGIC);
assert(pesi == req->transport_priv);
if (req->esi_level > 0) {
*priv = NULL;
req->transport_priv = NULL;
if (node->state == ST_OPEN || node->state == ST_PRIVATE) {
/* node had no esi */
......@@ -2323,6 +2375,8 @@ vdp_pesi_bytes(struct req *req, enum vdp_action act, void **priv,
* we need to wait for completion here because at vdp
* fini time, the V1D is already closed
*/
// XXX TODO vped_close_pesi_vdps(req);
pesi->bypass = 1;
bytes_unpend(req, tree);
while (!tree->retval
......@@ -2390,6 +2444,36 @@ vped_reembark(struct worker *wrk, struct req *req)
Lck_Unlock(&req->sp->mtx);
}
static void
vped_close_pesi_vdps(struct req *req)
{
struct vdp_entry *vdpe;
struct vdp_ctx *vdc;
vdc = req->vdc;
vdpe = VTAILQ_FIRST(&vdc->vdp);
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
assert(vdpe->vdp == &VDP_pesi);
AN(vdpe->vdp->fini);
AZ(vdpe->vdp->fini(req, &vdpe->priv));
AZ(vdpe->priv);
VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
vdpe = VTAILQ_FIRST(&vdc->vdp);
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
assert(vdpe->vdp == &VDP_pesi_buf);
#if XXXTODO_WHEN_REMOVED_GZIP_MAGIC_FROM_BUF_BYTES
AN(vdpe->vdp->fini);
AZ(vdpe->vdp->fini(req, &vdpe->priv));
AZ(vdpe->priv);
VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
vdpe = VTAILQ_FIRST(&vdc->vdp);
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
#endif
}
static void v_matchproto_(vtr_deliver_f)
vped_deliver(struct req *req, struct boc *boc, int wantbody)
{
......@@ -2410,12 +2494,14 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
tree = pesi->pesi_tree->tree;
if (wantbody == 0)
return;
goto clean; // XXX
VSLdbgv(req, "vped_deliver: ObjGetLen=%lu",
ObjGetLen(req->wrk, req->objcore));
if (boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0)
return;
goto clean; // XXX
node = pesi->pecx->node;
if ((req->objcore->flags & OC_F_FINAL) != 0 &&
!ObjHasAttr(req->wrk, req->objcore, OA_ESIDATA)) {
......@@ -2424,8 +2510,6 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
/* XXX WIP / STILL HACKY */
XXXAZ(push_vdps_NOesi(req));
node = pesi->pecx->node;
/* XXX TODO CHANGE NODE TYPES -- this is a NEXUS atm */
assert(node->type == T_NEXUS);
assert(node->state == ST_PRIVATE);
......@@ -2471,8 +2555,6 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
AZ(req->objcore->flags & OC_F_FINAL);
XXXAZ(push_vdps_NOesi(req));
node = pesi->pecx->node;
/* XXX TODO CHANGE NODE TYPES -- this is a NEXUS atm */
assert(node->type == T_NEXUS);
assert(node->state == ST_PRIVATE);
......@@ -2524,12 +2606,24 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
if (req->objcore->flags & OC_F_FAILED) {
/* No way of signalling errors in the middle of
the ESI body. Omit this ESI fragment. */
return;
goto clean; // XXX
}
}
XXXAZ(push_vdps_esi(req));
AN(node->parent);
AZ(VDP_Push(req, &vped_toparent, node->parent->req));
(void)VDP_DeliverObj(req);
VDP_close(req);
/*
* was: VDP_close(req);
*
* only close the pesi VDPs, the others are to run from topreq delivery
*
* XXX cleanup
*/
clean:
vped_close_pesi_vdps(req);
}
static const struct transport VPED_transport = {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment