Commit 10590c10 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Go over the generation of VSL records with some spit & polish

parent 20f83016
......@@ -50,6 +50,10 @@ static uint32_t *vsl_ptr;
struct VSC_C_main *VSC_C_main;
/*--------------------------------------------------------------------
* Check if the VSL_tag is masked by parameter bitmap
*/
static inline int
vsl_tag_is_masked(enum VSL_tag_e tag)
{
......@@ -62,28 +66,27 @@ vsl_tag_is_masked(enum VSL_tag_e tag)
return (*bm & b);
}
static inline uint32_t
vsl_w0(uint32_t type, uint32_t length)
{
assert(length < 0x10000);
return (((type & 0xff) << 24) | length);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Lay down a header fields, and return pointer to the next record
*/
static inline void
static inline uint32_t *
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, uint32_t vxid)
{
assert(((uintptr_t)p & 0x3) == 0);
assert(tag > SLT_Bogus);
assert(tag < SLT_Reserved);
AZ(len & ~VSL_LENMASK);
p[1] = vxid;
VMB();
p[0] = vsl_w0(tag, len);
p[0] = ((((unsigned)tag & 0xff) << 24) | len);
return (VSL_END(p, len));
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Wrap the VSL buffer
*/
static void
vsl_wrap(void)
......@@ -140,8 +143,7 @@ vsl_get(unsigned len, unsigned records, unsigned flushes)
}
/*--------------------------------------------------------------------
* This variant copies a byte-range directly to the log, without
* taking the detour over sprintf()
* Stick a finished record into VSL.
*/
static void
......@@ -159,10 +161,22 @@ vslr(enum VSL_tag_e tag, uint32_t vxid, const char *b, unsigned len)
p = vsl_get(len, 1, 0);
memcpy(p + 2, b, len);
vsl_hdr(tag, p, len, vxid);
/*
* vsl_hdr() writes p[1] again, but we want to make sure it
* has hit memory because we work on the live buffer here.
*/
p[1] = vxid;
VWMB();
(void)vsl_hdr(tag, p, len, vxid);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Add a unbuffered record to VSL
*
* NB: This variant should be used sparingly and only for low volume
* NB: since it significantly adds to the mutex load on the VSL.
*/
void
VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
......@@ -171,20 +185,21 @@ VSL(enum VSL_tag_e tag, uint32_t vxid, const char *fmt, ...)
unsigned n, mlen = cache_param->shm_reclen;
char buf[mlen];
AN(fmt);
if (vsl_tag_is_masked(tag))
return;
AN(fmt);
va_start(ap, fmt);
if (strchr(fmt, '%') == NULL) {
vslr(tag, vxid, fmt, strlen(fmt));
} else {
va_start(ap, fmt);
n = vsnprintf(buf, mlen, fmt, ap);
va_end(ap);
if (n > mlen)
n = mlen;
vslr(tag, vxid, buf, n);
}
va_end(ap);
}
/*--------------------------------------------------------------------*/
......@@ -210,24 +225,24 @@ VSL_Flush(struct vsl_log *vsl, int overflow)
vsl->wlr = 0;
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* VSL-buffered-txt
*/
static void
wslr(struct vsl_log *vsl, enum VSL_tag_e tag, int id, txt t)
void
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
{
unsigned l, mlen;
Tcheck(t);
if (id == -1)
id = vsl->wid;
if (vsl_tag_is_masked(tag))
return;
mlen = cache_param->shm_reclen;
/* Truncate */
l = Tlen(t);
if (l > mlen) {
if (l > mlen)
l = mlen;
t.e = t.b + l;
}
assert(vsl->wlp < vsl->wle);
......@@ -236,51 +251,10 @@ wslr(struct vsl_log *vsl, enum VSL_tag_e tag, int id, txt t)
VSL_Flush(vsl, 1);
assert(VSL_END(vsl->wlp, l) < vsl->wle);
memcpy(VSL_DATA(vsl->wlp), t.b, l);
vsl_hdr(tag, vsl->wlp, l, id);
vsl->wlp = VSL_END(vsl->wlp, l);
vsl->wlp = vsl_hdr(tag, vsl->wlp, l, vsl->wid);
assert(vsl->wlp < vsl->wle);
vsl->wlr++;
if (DO_DEBUG(DBG_SYNCVSL))
VSL_Flush(vsl, 0);
}
/*--------------------------------------------------------------------*/
static void
wsl(struct vsl_log *, enum VSL_tag_e tag, int id, const char *fmt, va_list ap)
__printflike(4, 0);
static void
wsl(struct vsl_log *vsl, enum VSL_tag_e tag, int id, const char *fmt,
va_list ap)
{
char *p;
unsigned n, mlen;
txt t;
AN(fmt);
mlen = cache_param->shm_reclen;
if (strchr(fmt, '%') == NULL) {
t.b = TRUST_ME(fmt);
t.e = strchr(t.b, '\0');
wslr(vsl, tag, id, t);
} else {
assert(vsl->wlp < vsl->wle);
/* Wrap if we cannot fit a full size record */
if (VSL_END(vsl->wlp, mlen) >= vsl->wle)
VSL_Flush(vsl, 1);
p = VSL_DATA(vsl->wlp);
n = vsnprintf(p, mlen, fmt, ap);
if (n > mlen)
n = mlen; /* we truncate long fields */
vsl_hdr(tag, vsl->wlp, n, id);
vsl->wlp = VSL_END(vsl->wlp, n);
assert(vsl->wlp < vsl->wle);
vsl->wlr++;
}
if (DO_DEBUG(DBG_SYNCVSL))
VSL_Flush(vsl, 0);
}
......@@ -292,31 +266,54 @@ wsl(struct vsl_log *vsl, enum VSL_tag_e tag, int id, const char *fmt,
void
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
{
char *p;
const char *u, *f;
unsigned n, mlen;
va_list ap;
txt t;
AN(fmt);
if (vsl_tag_is_masked(tag))
return;
/*
* If there are no printf-expansions, don't waste time expanding them
*/
f = NULL;
for (u = fmt; *u != '\0'; u++)
if (*u == '%')
f = u;
if (f == NULL) {
t.b = TRUST_ME(fmt);
t.e = TRUST_ME(u);
VSLbt(vsl, tag, t);
return;
}
mlen = cache_param->shm_reclen;
/* Wrap if we cannot fit a full size record */
if (VSL_END(vsl->wlp, mlen) >= vsl->wle)
VSL_Flush(vsl, 1);
p = VSL_DATA(vsl->wlp);
va_start(ap, fmt);
wsl(vsl, tag, vsl->wid, fmt, ap);
n = vsnprintf(p, mlen, fmt, ap);
va_end(ap);
if (n > mlen)
n = mlen; /* we truncate long fields */
vsl->wlp = vsl_hdr(tag, vsl->wlp, n, vsl->wid);
assert(vsl->wlp < vsl->wle);
vsl->wlr++;
if (DO_DEBUG(DBG_SYNCVSL))
VSL_Flush(vsl, 0);
}
/*--------------------------------------------------------------------
* VSL-buffered-txt
* Allocate a VSL buffer
*/
void
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
{
if (vsl_tag_is_masked(tag))
return;
wslr(vsl, tag, -1, t);
}
/*--------------------------------------------------------------------*/
void
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
{
......
......@@ -46,18 +46,22 @@
*
* Each logrecord consist of:
* [n] = ((type & 0xff) << 24) | (length & 0xffff)
* [n + 1] = identifier
* [n + 1] = ((marker & 0x03) << 30) | (identifier & 0x3fffffff)
* [n + 2] ... [m] = content
*
* Notice that the constants in these macros cannot be changed without
* changing corresponding magic numbers in varnishd/cache/cache_shmlog.c
*/
#define VSL_CLIENTMARKER (1U<<30)
#define VSL_BACKENDMARKER (1U<<31)
#define VSL_IDENTMASK (~(3U<<30))
#define VSL_LENMASK 0xffff
#define VSL_WORDS(len) (((len) + 3) / 4)
#define VSL_END(ptr, len) ((ptr) + 2 + VSL_WORDS(len))
#define VSL_NEXT(ptr) VSL_END(ptr, VSL_LEN(ptr))
#define VSL_LEN(ptr) ((ptr)[0] & 0xffff)
#define VSL_LEN(ptr) ((ptr)[0] & VSL_LENMASK)
#define VSL_TAG(ptr) ((ptr)[0] >> 24)
#define VSL_ID(ptr) (((ptr)[1]) & VSL_IDENTMASK)
#define VSL_CLIENT(ptr) (((ptr)[1]) & VSL_CLIENTMARKER)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment