Finish implementation of onerror=continue

This concludes the fix for !11

Some bits were already implemented in previous commits.

See https://github.com/varnishcache/varnish-cache/issues/3938 for why
this took so long.

Also implemented a fix for https://github.com/varnishcache/varnish-cache/issues/3939
(which is not yet in varnish-cache)
parent 77b89851
......@@ -22,6 +22,11 @@ NEXT
.. !13 was a regression introduced since 7.2, so it does not qualify
as a change herein
* Implement ``onerror`` attribute like varnish-cache (`!11`_), except
for one difference: Because pESI works in parallel, it runs into
errors earlier and thus may output less data than serial ESI in
Varnish-Cache.
* Achieved an "all green" coverage report (>95% by lines, 100% by
functions)
......@@ -43,6 +48,7 @@ NEXT
* Adjusted to vxid and other changes in varnish-cache
.. _`!11`: https://gitlab.com/uplex/varnish/libvdp-pesi/-/issues/11
.. _`!14`: https://gitlab.com/uplex/varnish/libvdp-pesi/-/issues/14
1097f6f48a8ea89fed89227965d94f630fb93c1f / 7.2 branch
......
......@@ -7,8 +7,8 @@ server s1 {
txresp -body {
<html>
Before include
<esi:include src="/bodyA" sr="foo"/>
<esi:include src="/bodyB"/>
<esi:include src="/bodyA" sr="foo" onerror="continue"/>
<esi:include src="/bodyB" onerror="continue"/>
After include
</html>
}
......@@ -34,7 +34,7 @@ server s3 {
expect_close
} -start
varnish v1 -vcl+backend {
varnish v1 -arg "-p feature=+esi_include_onerror" -vcl+backend {
import ${vmod_pesi};
import ${vmod_pesi_debug};
include "debug.inc.vcl";
......
......@@ -40,6 +40,7 @@ client c1 {
rxresphdrs
expect resp.status == 200
rxchunk
rxchunk
expect_close
expect resp.body == "before "
} -run
......@@ -52,6 +53,7 @@ client c1 {
rxresphdrs
expect resp.status == 200
rxchunk
rxchunk
expect_close
expect resp.body ~ "^(before )?$"
} -run
......
......@@ -283,7 +283,7 @@ vped_task(struct worker *wrk, void *priv)
static int
vped_include(struct req *preq, const char *src, const char *host,
struct pesi *pesi, struct node *node, int gzip)
struct pesi *pesi, struct node *node, int gzip, int incl_cont)
{
struct worker *wrk;
struct sess *sp;
......@@ -325,6 +325,7 @@ vped_include(struct req *preq, const char *src, const char *host,
tree_latch_error(tree, -1);
return (-1);
}
pesi2->pecx->incl_cont = incl_cont;
req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER);
VSLdbgv(preq, "vped_include: new xid=%ju", VXID(req->vsl->wid));
......@@ -990,14 +991,14 @@ vdp_pesi_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
T_NEXUS, ST_PRIVATE);
CHECK_OBJ_NOTNULL(child, NODE_MAGIC);
pesi->pecx->incl_cont = incl_cont;
VSLdbgv(vdx, "vped_vdp: call vped_include "
"incl_cont=%d", incl_cont);
parallel =
vped_include(req, (const char*)q,
(const char*)pecx->p,
pesi, child,
node->nexus.gzip.is
node->nexus.gzip.is,
incl_cont
);
VSLdbgv(vdx, "vped_vdp: vped_include()=%d",
parallel);
......@@ -1242,6 +1243,13 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
req, boc, wantbody, pesi->pecx->incl_cont);
assert(parent->type == T_NEXUS);
if (!pesi->pecx->incl_cont &&
req->resp->status != 200 &&
req->resp->status != 204) {
tree_latch_error(tree, -1);
return;
}
if (wantbody == 0) {
(void) VDP_Close(req->vdc, req->objcore, boc);
return;
......@@ -1268,8 +1276,8 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
ObjWaitState(req->objcore, BOS_FINISHED);
if (req->objcore->flags & OC_F_FAILED) {
/* No way of signalling errors in the middle of
the ESI body. Omit this ESI fragment. */
if (!pesi->pecx->incl_cont)
tree_latch_error(tree, -1);
(void) VDP_Close(req->vdc, req->objcore, boc);
return;
}
......@@ -1278,6 +1286,8 @@ vped_deliver(struct req *req, struct boc *boc, int wantbody)
if (vgzgz == NULL) {
VSLb(req->vsl, SLT_Error,
"Insufficient workspace for ESI gzip data");
if (!pesi->pecx->incl_cont)
tree_latch_error(tree, -1);
(void) VDP_Close(req->vdc, req->objcore, boc);
return;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment