Commit 729e69be authored by Dridi Boukelmoune's avatar Dridi Boukelmoune

vsl: Give up when it's too late to buffer records

When a VUT is slow enough, it might very well be overrun while it is
scanning logs. For our built-in VUTs like varnishncsa or varnishlog
this can happen if writing the output can block waiting for IO ops
or when the output is piped to a slow consumer.
parent eba8c51d
......@@ -431,7 +431,7 @@ chunk_shm_to_buf(struct VSLQ *vslq, struct chunk *chunk)
}
/* Append a set of records to a vtx structure */
static void
static enum vsl_status
vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start,
size_t len)
{
......@@ -443,6 +443,9 @@ vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start,
AN(start);
i = VSL_Check(vslq->c, start);
if (i == vsl_check_e_inval)
return (vsl_e_overrun);
if (i == vsl_check_valid && !VTAILQ_EMPTY(&vtx->shmchunks_free)) {
/* Shmref it */
chunk = VTAILQ_FIRST(&vtx->shmchunks_free);
......@@ -457,7 +460,6 @@ vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start,
/* Append to shmref list */
VTAILQ_INSERT_TAIL(&vslq->shmrefs, chunk, shm.shmref);
} else {
assert(i != vsl_check_e_inval);
/* Buffer it */
chunk = VTAILQ_LAST(&vtx->chunks, chunkhead);
CHECK_OBJ_ORNULL(chunk, CHUNK_MAGIC);
......@@ -472,6 +474,7 @@ vtx_append(struct VSLQ *vslq, struct vtx *vtx, const struct VSLC_ptr *start,
}
}
vtx->len += len;
return (vsl_more);
}
/* Allocate a new vtx structure */
......@@ -1314,8 +1317,9 @@ vslq_next(struct VSLQ *vslq)
AN(vtx);
}
if (vtx != NULL) {
vtx_append(vslq, vtx, &c->rec, len);
vtx_scan(vslq, vtx);
r = vtx_append(vslq, vtx, &c->rec, len);
if (r == vsl_more)
vtx_scan(vslq, vtx);
}
return (r);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment