Commit c0e25e6a authored by Nils Goroll's avatar Nils Goroll Committed by Dridi Boukelmoune

Complement VDP error handling

* remember push errors in the context
* also fail additional pushes once we saw an error

Try to fail requests as early as possible. Otherwise just calling VDP
functions after an error is a noop.

Fixes #2618
parent 5300ba5e
......@@ -73,7 +73,7 @@ VDP_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len)
return (vdc->retval);
}
void
int
VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
{
struct vdp_entry *vdpe;
......@@ -85,12 +85,18 @@ VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
AN(vdp->name);
AN(vdp->func);
if (vdc->retval)
return (vdc->retval);
if (DO_DEBUG(DBG_PROCESSORS))
VSLb(req->vsl, SLT_Debug, "VDP_push(%s)", vdp->name);
vdpe = WS_Alloc(req->ws, sizeof *vdpe);
if (vdpe == NULL)
return;
if (vdpe == NULL) {
AZ(vdc->retval);
vdc->retval = -1;
return (vdc->retval);
}
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC);
vdpe->vdp = vdp;
vdpe->priv = priv;
......@@ -100,7 +106,9 @@ VDP_push(struct req *req, const struct vdp *vdp, void *priv, int bottom)
VTAILQ_INSERT_HEAD(&vdc->vdp, vdpe, list);
vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
AZ(vdpe->vdp->func(req, VDP_INIT, &vdpe->priv, NULL, 0));
AZ(vdc->retval);
vdc->retval = vdpe->vdp->func(req, VDP_INIT, &vdpe->priv, NULL, 0);
return (vdc->retval);
}
void
......@@ -113,10 +121,15 @@ VDP_close(struct req *req)
vdc = req->vdc;
while (!VTAILQ_EMPTY(&vdc->vdp)) {
vdpe = VTAILQ_FIRST(&vdc->vdp);
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
AZ(vdpe->vdp->func(req, VDP_FINI, &vdpe->priv, NULL, 0));
AZ(vdpe->priv);
if (vdc->retval >= 0)
AN(vdpe);
if (vdpe != NULL) {
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
AZ(vdpe->vdp->func(req, VDP_FINI, &vdpe->priv,
NULL, 0));
AZ(vdpe->priv);
}
vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
}
}
......
......@@ -815,9 +815,9 @@ ved_deliver(struct req *req, struct boc *boc, int wantbody)
ved_stripgzip(req, boc);
} else {
if (ecx->isgzip && !i)
VDP_push(req, &ved_vdp_pgz, ecx, 1);
(void)VDP_push(req, &ved_vdp_pgz, ecx, 1);
else
VDP_push(req, &ved_ved, ecx->preq, 1);
(void)VDP_push(req, &ved_ved, ecx->preq, 1);
(void)VDP_DeliverObj(req);
(void)VDP_bytes(req, VDP_FLUSH, NULL, 0);
}
......
......@@ -121,8 +121,8 @@ struct vdp_ctx {
#define VDP_CTX_MAGIC 0xee501df7
struct vdp_entry_s vdp;
struct vdp_entry *nxt;
unsigned retval;
int retval;
};
int VDP_bytes(struct req *, enum vdp_action act, const void *ptr, ssize_t len);
void VDP_push(struct req *, const struct vdp *, void *priv, int bottom);
int VDP_push(struct req *, const struct vdp *, void *priv, int bottom);
......@@ -175,7 +175,8 @@ vrg_dorange(struct req *req, const char *r)
vrg_priv->range_off = 0;
vrg_priv->range_low = low;
vrg_priv->range_high = high + 1;
VDP_push(req, &vrg_vdp, vrg_priv, 1);
if (VDP_push(req, &vrg_vdp, vrg_priv, 1))
return ("WS too small");
http_PutResponse(req->resp, "HTTP/1.1", 206, NULL);
return (NULL);
}
......
......@@ -342,7 +342,7 @@ cnt_transmit(struct worker *wrk, struct req *req)
struct boc *boc;
const char *r;
uint16_t status;
int sendbody;
int err, sendbody;
intmax_t clval;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
......@@ -375,15 +375,18 @@ cnt_transmit(struct worker *wrk, struct req *req)
} else
sendbody = 1;
err = 0;
if (sendbody >= 0) {
if (!req->disable_esi && req->resp_len != 0 &&
ObjHasAttr(wrk, req->objcore, OA_ESIDATA))
VDP_push(req, &VDP_esi, NULL, 0);
ObjHasAttr(wrk, req->objcore, OA_ESIDATA) &&
VDP_push(req, &VDP_esi, NULL, 0) < 0)
err++;
if (cache_param->http_gzip_support &&
ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED) &&
!RFC2616_Req_Gzip(req->http))
VDP_push(req, &VDP_gunzip, NULL, 1);
!RFC2616_Req_Gzip(req->http) &&
VDP_push(req, &VDP_gunzip, NULL, 1) < 0)
err++;
if (cache_param->http_range_support &&
http_IsStatus(req->resp, 200)) {
......@@ -405,7 +408,12 @@ cnt_transmit(struct worker *wrk, struct req *req)
"Content-Length: %jd", req->resp_len);
}
req->transport->deliver(req, boc, sendbody);
if (err == 0)
req->transport->deliver(req, boc, sendbody);
else {
VSLb(req->vsl, SLT_Error, "Failure to push processors");
req->doclose = SC_OVERLOAD;
}
VSLb_ts_req(req, "Resp", W_TIM_real(wrk));
......
......@@ -128,8 +128,11 @@ V1D_Deliver(struct req *req, struct boc *boc, int sendbody)
if (req->resp_len == 0)
sendbody = 0;
if (sendbody)
VDP_push(req, &v1d_vdp, NULL, 1);
if (sendbody && VDP_push(req, &v1d_vdp, NULL, 1)) {
v1d_error(req, "workspace_thread overflow");
AZ(req->wrk->v1l);
return;
}
AZ(req->wrk->v1l);
V1L_Open(req->wrk, req->wrk->aws,
......
......@@ -265,10 +265,13 @@ h2_deliver(struct req *req, struct boc *boc, int sendbody)
WS_Release(req->ws, 0);
if (sendbody) {
VDP_push(req, &h2_vdp, NULL, 1);
/* XXX someone into H2 please add appropriate error handling */
while (sendbody) {
err = VDP_push(req, &h2_vdp, NULL, 1);
if (err)
break;
err = VDP_DeliverObj(req);
/*XXX*/(void)err;
break;
}
AZ(req->wrk->v1l);
......
varnishtest "sweep through tight client workspace conditions in deliver"
server s1 {
rxreq
txresp -hdr "Cache-Control: mag-age=3600" -bodylen 1024
} -start
varnish v1 -vcl+backend {
import vtc;
import std;
sub vcl_recv {
return (hash);
}
sub vcl_deliver {
vtc.workspace_alloc(client, -4 *
(std.integer(req.xid, 1001) - 1001) / 2);
}
} -start
client c1 -repeat 100 {
txreq -url "/"
# some responses will fail (503), some won't. All we care
# about here is the fact that we don't panic
rxresp
} -run
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment