Commit bcd8e84b authored by Geoff Simmons's avatar Geoff Simmons

Add the PESI.no_thread counter, and a corresponding log error message.

parent 094feed0
......@@ -32,6 +32,8 @@ Makefile.in
/src/vcc_if.c
/src/vcc_if.h
/src/vmod_*rst
/src/VSC_pesi.c
/src/VSC_pesi.h
/src/tests/*.log
/src/tests/*.trs
......
......@@ -39,6 +39,7 @@ m4_ifndef([VARNISH_PREREQ], AC_MSG_ERROR([Need varnish.m4 -- see README.rst]))
VARNISH_PREREQ([trunk])
VARNISH_VMODS([pesi])
VARNISH_COUNTERS([pesi])
VMOD_TESTS="$(cd $srcdir/src && echo tests/*.vtc)"
AC_SUBST(VMOD_TESTS)
......
......@@ -16,11 +16,15 @@ libvmod_pesi_la_SOURCES = \
nodist_libvmod_pesi_la_SOURCES = \
vcc_if.c \
vcc_if.h
vcc_if.h \
VSC_pesi.c \
VSC_pesi.h
dist_man_MANS = vdp_pesi.3
vdp_pesi.lo: vcc_if.h
@BUILD_VSC_PESI@
vdp_pesi.lo: vcc_if.h VSC_pesi.c VSC_pesi.h
vcc_if.h vmod_pesi.rst vmod_pesi.man.rst: vcc_if.c
......@@ -45,6 +49,7 @@ TESTS = @VMOD_TESTS@
EXTRA_DIST = \
vdp_pesi.vcc \
selector.vsc \
$(VMOD_TESTS)
CLEANFILES = \
......@@ -52,4 +57,6 @@ CLEANFILES = \
$(builddir)/vcc_if.h \
$(builddir)/vmod_pesi.rst \
$(builddir)/vmod_pesi.man.rst \
$(builddir)/vdp_pesi.3
$(builddir)/vdp_pesi.3 \
$(builddir)/VSC_pesi.c \
$(builddir)/VSC_pesi.h
..
This is *NOT* a RST file but the syntax has been chosen so
that it may become an RST file at some later date.
.. varnish_vsc_begin:: pesi
:oneliner: VDP pesi stats
:order: 90
.. varnish_vsc:: no_thread
:type: counter
:oneliner: No threads available for parallel ESI
Number of times no threads were available for parallel ESI.
.. varnish_vsc_end:: pesi
......@@ -12,6 +12,13 @@ varnish v1 -expect MEMPOOL.pesi.pool > 0
varnish v1 -expect MEMPOOL.pesi.sz_wanted > 0
varnish v1 -expect MEMPOOL.pesi.sz_actual > 0
varnish v1 -vsc PESI.*
varnish v1 -expect PESI.no_thread == 0
varnish v1 -vsc LCK.pesi.*
varnish v1 -expect LCK.pesi.stats.creat == 1
varnish v1 -expect LCK.pesi.stats.locks == 0
varnish v1 -vcl {backend b { .host = "${bad_ip}"; }}
varnish v1 -vsc MEMPOOL.pesi.*
......@@ -42,6 +49,9 @@ varnish v1 -expect MEMPOOL.pesi.pool > 0
varnish v1 -expect MEMPOOL.pesi.sz_wanted > 0
varnish v1 -expect MEMPOOL.pesi.sz_actual > 0
varnish v1 -vsc PESI.*
varnish v1 -expect PESI.no_thread == 0
varnish v1 -cli "vcl.state vcl2 warm"
varnish v1 -vsc MEMPOOL.pesi.*
......@@ -49,6 +59,13 @@ varnish v1 -expect MEMPOOL.pesi.pool > 0
varnish v1 -expect MEMPOOL.pesi.sz_wanted > 0
varnish v1 -expect MEMPOOL.pesi.sz_actual > 0
varnish v1 -vsc PESI.*
varnish v1 -expect PESI.no_thread == 0
varnish v1 -vsc LCK.pesi.*
varnish v1 -expect LCK.pesi.stats.creat == 1
varnish v1 -expect LCK.pesi.stats.locks == 0
varnish v1 -cli "vcl.show vcl1"
varnish v1 -cli "vcl.use vcl2"
varnish v1 -cli "vcl.discard vcl1"
......
This diff is collapsed.
......@@ -56,6 +56,7 @@
#include "vqueue.h"
#include "vcc_if.h"
#include "VSC_pesi.h"
#define VFAIL(ctx, fmt, ...) \
VRT_fail((ctx), "vdp pesi failure: " fmt, __VA_ARGS__)
......@@ -328,6 +329,8 @@ struct pesi {
struct pecx pecx[1];
VTAILQ_ENTRY(pesi) list;
unsigned no_thread;
};
/* Forward declarations */
......@@ -348,10 +351,12 @@ const void * const priv_task_id_cfg = &priv_task_id_cfg;
/* shared object globals */
static unsigned loadcnt = 0, warmcnt = 0;
static struct VSC_lck *lck_bytes_tree, *lck_pesi_tree;
static struct vsc_seg *vsc_seg = NULL;
static struct VSC_lck *lck_bytes_tree, *lck_pesi_tree, *lck_stats;
static struct vsc_seg *vsc_seg = NULL, *pesi_vsc_seg = NULL;
static struct mempool *mempool = NULL;
static unsigned node_alloc_sz;
static struct VSC_pesi *stats;
static struct lock stats_lock;
/* also used by our version of cache_esi_deliver.c */
......@@ -1249,9 +1254,13 @@ vped_include(struct req *preq, const char *src, const char *host,
if (Pool_Task_Arg(wrk, TASK_QUEUE_REQ,
ved_task, &req, sizeof req) == 1)
return (1);
pesi->no_thread++;
VSLb(preq->vsl, SLT_Error, "vdp pesi: No thread available "
"for ESI subrequest %u, continuing in serial",
VXID(req->vsl->wid));
/*
* XXX @geoff add counter
*
* we can not use the self-rescheduling facility of
* Pool_Task_Arg because we cannot unschedule ourself
*/
......@@ -2299,6 +2308,12 @@ vdp_pesi_fini(struct req *req, void **priv)
CHECK_OBJ_NOTNULL(bytes_tree, BYTES_TREE_MAGIC);
assert(bytes_tree == pesi_tree->tree);
if (pesi->no_thread != 0) {
Lck_Lock(&stats_lock);
stats->no_thread += pesi->no_thread;
Lck_Unlock(&stats_lock);
}
pesi_destroy(&pesi);
task_fini(pesi_tree, pesi);
......@@ -3104,8 +3119,15 @@ vmod_event(VRT_CTX, struct vmod_priv *priv, enum vcl_event_e e)
"pesi.bytes_tree");
lck_pesi_tree = Lck_CreateClass(&vsc_seg,
"pesi.pesi_tree");
lck_stats = Lck_CreateClass(&vsc_seg,
"pesi.stats");
AN(lck_bytes_tree);
AN(lck_pesi_tree);
AZ(pesi_vsc_seg);
stats = VSC_pesi_New(NULL, &pesi_vsc_seg, "");
AN(stats);
AN(pesi_vsc_seg);
Lck_New(&stats_lock, lck_stats);
}
VRT_AddVDP(ctx, &VDP_pesi);
......@@ -3114,14 +3136,18 @@ vmod_event(VRT_CTX, struct vmod_priv *priv, enum vcl_event_e e)
VRT_RemoveVDP(ctx, &VDP_pesi);
AN(loadcnt);
if (--loadcnt == 0)
if (--loadcnt == 0) {
Lck_Delete(&stats_lock);
Lck_DestroyClass(&vsc_seg);
VSC_pesi_Destroy(&pesi_vsc_seg);
}
break;
case VCL_EVENT_WARM:
if (warmcnt++ == 0) {
AZ(mempool);
mempool = mpl_init();
AN(mempool);
VRT_VSC_Reveal(pesi_vsc_seg);
}
break;
case VCL_EVENT_COLD:
......@@ -3130,6 +3156,7 @@ vmod_event(VRT_CTX, struct vmod_priv *priv, enum vcl_event_e e)
AN(mempool);
mpl_fini(&mempool);
AZ(mempool);
VRT_VSC_Hide(pesi_vsc_seg);
}
break;
default:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment