Add VDP_END action for delivery processors

VDP_END marks the end of successful processing, it is issued by
VDP_DeliverObj() and may also be sent downstream by processors ending
the stream (for return value != 0)

VDP_END must at most be generated once per processor, so any VDP
sending it downstream must itself not forward it a second time.

* VDP_bytes()

The only relevant change is the assertion that VDP_END must not be
received by any VDP more than once. This comes with minor
restructuring of the handling of the three actions

* VDP_DeliverObj()

Here, VDP_END is pushed down after succesful iteration over the
objects. We could also send the last storage segment with VDP_END, but
this would require a change in all stevedores. Unless there is a good
case for it, I do not see much value in this minor optimization.

* ESI:

In ved_vdp_esi_bytes we now push down VDP_END whenever we are done
with an ESI object at esi_level == 0.

ved_bytes() handles the pushes from higher esi levels downwards, so
it now changes VDP_END into VDP_FLUSH. We need to remove the
additional VDP_FLUSH from ved_deliver because the VDP_END from
VDP_DeliverObj() needs to be the last bit sent.

The gzgz vdp actually does something which would be impossible for an
esi_level == 0 VDP: push bytes from _fini. This could be reworked, but
there is also no need to.

* range VDP

Here we now send the VDP_END with the last segment before the end of
the range is it.

We also take the opportunity and eleminate null VDP_bytes calls before
the range is reached.

* rot13 debug VDP

Here we need to ensure that we pass on VDP_END
parent c5e9cf1c
......@@ -45,6 +45,13 @@
* VDP_bytes will return that value directly without calling the next
* processor.
*
* VDP_END marks the end of successful processing, it is issued by
* VDP_DeliverObj() and may also be sent downstream by processors ending the
* stream (for return value != 0)
*
* VDP_END must at most be received once per processor, so any VDP sending it
* downstream must itself not forward it a second time.
*
* Valid return values (of VDP_bytes and any VDP function):
* r < 0: Error, breaks out early on an error condition
* r == 0: Continue
......@@ -59,15 +66,23 @@ VDP_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len)
CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
vdc = req->vdc;
assert(act == VDP_NULL || act == VDP_FLUSH);
if (vdc->retval)
return (vdc->retval);
vdpe = vdc->nxt;
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
vdc->nxt = VTAILQ_NEXT(vdpe, list);
assert(act > VDP_NULL || len > 0);
/* at most one VDP_END call */
assert(vdpe->end == VDP_NULL);
if (act == VDP_NULL)
assert(len > 0);
else if (act == VDP_END)
vdpe->end = VDP_END;
else
assert(act == VDP_FLUSH);
/* Call the present layer, while pointing to the next layer down */
vdc->nxt = VTAILQ_NEXT(vdpe, list);
retval = vdpe->vdp->bytes(req, act, &vdpe->priv, ptr, len);
if (retval && (vdc->retval == 0 || retval < vdc->retval))
vdc->retval = retval; /* Latch error value */
......@@ -158,6 +173,8 @@ VDP_DeliverObj(struct req *req)
final = req->objcore->flags & (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP)
? 1 : 0;
r = ObjIterate(req->wrk, req->objcore, req, vdp_objiterator, final);
if (r == 0)
r = VDP_bytes(req, VDP_END, NULL, 0);
if (r < 0)
return (r);
return (0);
......
......@@ -413,17 +413,20 @@ ved_vdp_esi_bytes(struct req *req, enum vdp_action act, void **priv,
/* MOD(2^32) length */
vle32enc(tailbuf + 9, ecx->l_crc);
(void)VDP_bytes(req, VDP_NULL, tailbuf, 13);
retval = VDP_bytes(req, VDP_END, tailbuf, 13);
} else if (ecx->pecx != NULL) {
ecx->pecx->crc = crc32_combine(ecx->pecx->crc,
ecx->crc, ecx->l_crc);
ecx->pecx->l_crc += ecx->l_crc;
retval = VDP_bytes(req, VDP_FLUSH, NULL, 0);
} else {
retval = VDP_bytes(req, VDP_END, NULL, 0);
}
retval = VDP_bytes(req, VDP_FLUSH, NULL, 0);
ecx->state = 99;
return (retval);
case 3:
case 4:
assert(act != VDP_END);
/*
* There is no guarantee that the 'l' bytes are all
* in the same storage segment, so loop over storage
......@@ -473,6 +476,8 @@ ved_bytes(struct req *req, struct ecx *ecx, enum vdp_action act,
const void *ptr, ssize_t len)
{
req->acct.resp_bodybytes += len;
if (act == VDP_END)
act = VDP_FLUSH;
return (VDP_bytes(ecx->preq, act, ptr, len));
}
......@@ -757,6 +762,12 @@ ved_gzgz_fini(struct req *req, void **priv)
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC);
*priv = NULL;
/* XXX
* this works due to the esi layering, a VDP pushing bytes from _fini
* will otherwise have it's own _bytes method called.
*
* Could rewrite use VDP_END
*/
(void)ved_bytes(req, foo->ecx, VDP_FLUSH, NULL, 0);
icrc = vle32dec(foo->tailbuf);
......@@ -857,9 +868,6 @@ ved_deliver(struct req *req, struct boc *boc, int wantbody)
req->doclose = SC_OVERLOAD;
}
if (i == 0)
i = VDP_bytes(req, VDP_FLUSH, NULL, 0);
if (i && req->doclose == SC_NULL)
req->doclose = SC_REM_CLOSE;
......
......@@ -99,6 +99,7 @@ void VRT_RemoveVFP(VRT_CTX, const struct vfp *);
enum vdp_action {
VDP_NULL, /* Input buffer valid after call */
VDP_FLUSH, /* Input buffer will be invalidated */
VDP_END, /* Last buffer or after, implies VDP_FLUSH */
};
typedef int vdp_init_f(struct req *, void **priv);
......@@ -123,6 +124,7 @@ struct vdp {
struct vdp_entry {
unsigned magic;
#define VDP_ENTRY_MAGIC 0x353eb781
enum vdp_action end; // VDP_NULL or VDP_END
const struct vdp *vdp;
void *priv;
VTAILQ_ENTRY(vdp_entry) list;
......
......@@ -84,13 +84,14 @@ vrg_range_bytes(struct req *req, enum vdp_action act, void **priv,
l = vrg_priv->range_high - vrg_priv->range_off;
if (l > len)
l = len;
vrg_priv->range_off += len;
if (vrg_priv->range_off >= vrg_priv->range_high)
act = VDP_END;
if (l > 0)
retval = VDP_bytes(req, act, p, l);
else if (act > VDP_NULL)
else if (l == 0 && act > VDP_NULL)
retval = VDP_bytes(req, act, p, 0);
vrg_priv->range_off += len;
return (retval ||
vrg_priv->range_off >= vrg_priv->range_high ? 1 : 0);
return (retval || act == VDP_END ? 1 : 0);
}
/*--------------------------------------------------------------------*/
......
......@@ -117,12 +117,13 @@ xyzzy_rot13_bytes(struct req *req, enum vdp_action act, void **priv,
const char *pp;
int i, j, retval = 0;
(void)act;
AN(priv);
AN(*priv);
AN(ptr);
if (len <= 0)
return (0);
return (VDP_bytes(req, act, ptr, len));
AN(ptr);
if (act != VDP_END)
act = VDP_FLUSH;
q = *priv;
pp = ptr;
......@@ -134,14 +135,14 @@ xyzzy_rot13_bytes(struct req *req, enum vdp_action act, void **priv,
else
q[i] = pp[j];
if (i == ROT13_BUFSZ - 1) {
retval = VDP_bytes(req, VDP_FLUSH, q, ROT13_BUFSZ);
retval = VDP_bytes(req, act, q, ROT13_BUFSZ);
if (retval != 0)
return (retval);
i = -1;
}
}
if (i >= 0)
retval = VDP_bytes(req, VDP_FLUSH, q, i + 1L);
retval = VDP_bytes(req, act, q, i + 1L);
return (retval);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment