Commit 77e7172e authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

First part of shmlog rewrite:

Handle shmlog in 32bit byte aligned words, that saves wear and tear
on cpu write combining and makes things a bit faster.



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@4913 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 3efd84bd
......@@ -225,7 +225,7 @@ struct worker {
struct VCL_conf *vcl;
unsigned char *wlb, *wlp, *wle;
uint32_t *wlb, *wlp, *wle;
unsigned wlr;
struct SHA256Context *sha256ctx;
......
......@@ -121,7 +121,7 @@ wrk_thread_real(struct wq *qp, unsigned shm_workspace, unsigned sess_workspace,
unsigned nhttp, unsigned http_space, unsigned siov)
{
struct worker *w, ww;
unsigned char wlog[shm_workspace];
uint32_t wlog[shm_workspace / 4];
unsigned char ws[sess_workspace];
unsigned char http0[http_space];
unsigned char http1[http_space];
......@@ -136,7 +136,7 @@ wrk_thread_real(struct wq *qp, unsigned shm_workspace, unsigned sess_workspace,
w->magic = WORKER_MAGIC;
w->lastused = NAN;
w->wlb = w->wlp = wlog;
w->wle = wlog + sizeof wlog;
w->wle = wlog + (sizeof wlog) / 4;
w->sha256ctx = &sha256;
w->http[0] = HTTP_create(http0, nhttp);
w->http[1] = HTTP_create(http1, nhttp);
......@@ -557,7 +557,7 @@ wrk_bgthread(void *arg)
struct bgthread *bt;
struct worker ww;
struct sess *sp;
unsigned char logbuf[1024]; /* XXX: size ? */
uint32_t logbuf[1024]; /* XXX: size ? */
CAST_OBJ_NOTNULL(bt, arg, BGTHREAD_MAGIC);
THR_SetName(bt->name);
......@@ -567,7 +567,7 @@ wrk_bgthread(void *arg)
sp->wrk = &ww;
ww.magic = WORKER_MAGIC;
ww.wlp = ww.wlb = logbuf;
ww.wle = logbuf + sizeof logbuf;
ww.wle = logbuf + (sizeof logbuf) / 4;
(void)bt->func(sp, bt->priv);
......
......@@ -38,9 +38,18 @@ SVNID("$Id$")
#include "shmlog.h"
#include "cache.h"
#include "vmb.h"
static pthread_mutex_t vsl_mtx;
static inline uint32_t
vsl_w0(uint32_t type, uint32_t length)
{
assert(length < 0x10000);
return (((type & 0xff) << 24) | length);
}
#define LOCKSHM(foo) \
do { \
if (pthread_mutex_trylock(foo)) { \
......@@ -56,47 +65,58 @@ vsl_wrap(void)
{
assert(vsl_log_nxt < vsl_log_end);
vsl_log_start[1] = SLT_ENDMARKER;
MEMORY_BARRIER();
*vsl_log_nxt = SLT_WRAPMARKER;
MEMORY_BARRIER();
vsl_log_start[0]++;
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
vsl_log_start[1] = vsl_w0(SLT_ENDMARKER, 0);
do
vsl_log_start[0]++;
while (vsl_log_start[0] == 0);
VWMB();
*vsl_log_nxt = vsl_w0(SLT_WRAPMARKER, 0);
vsl_log_nxt = vsl_log_start + 1;
VSL_stats->shm_cycles++;
}
static void
vsl_hdr(enum shmlogtag tag, unsigned char *p, unsigned len, unsigned id)
/*--------------------------------------------------------------------*/
static inline void
vsl_hdr(enum shmlogtag tag, uint32_t *p, unsigned len, unsigned id)
{
assert(vsl_log_nxt + SHMLOG_NEXTTAG + len < vsl_log_end);
assert(len < 0x10000);
p[__SHMLOG_LEN_HIGH] = (len >> 8) & 0xff;
p[__SHMLOG_LEN_LOW] = len & 0xff;
p[__SHMLOG_ID_HIGH] = (id >> 24) & 0xff;
p[__SHMLOG_ID_MEDHIGH] = (id >> 16) & 0xff;
p[__SHMLOG_ID_MEDLOW] = (id >> 8) & 0xff;
p[__SHMLOG_ID_LOW] = id & 0xff;
p[SHMLOG_DATA + len] = '\0';
MEMORY_BARRIER();
p[SHMLOG_TAG] = tag;
assert(((uintptr_t)p & 0x3) == 0);
p[1] = id;
VMB();
p[0] = vsl_w0(tag, len);
}
static uint8_t *
/*--------------------------------------------------------------------
* Reserve bytes for a record, wrap if necessary
*/
static uint32_t *
vsl_get(unsigned len)
{
uint8_t *p;
uint32_t *p;
uint32_t u;
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
u = VSL_WORDS(len);
/* Wrap if necessary */
if (vsl_log_nxt + SHMLOG_NEXTTAG + len + 1 >= vsl_log_end) /* XXX: + 1 ?? */
if (VSL_NEXT(vsl_log_nxt, len) >= vsl_log_end)
vsl_wrap();
p = vsl_log_nxt;
vsl_log_nxt = VSL_NEXT(vsl_log_nxt, len);
vsl_log_nxt += SHMLOG_NEXTTAG + len;
assert(vsl_log_nxt < vsl_log_end);
*vsl_log_nxt = SLT_ENDMARKER;
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
*vsl_log_nxt = vsl_w0(SLT_ENDMARKER, 0);
printf("GET %p -> %p\n", p, vsl_log_nxt);
return (p);
}
......@@ -106,30 +126,26 @@ vsl_get(unsigned len)
*/
static void
VSLR(enum shmlogtag tag, int id, txt t)
VSLR(enum shmlogtag tag, int id, const char *b, unsigned len)
{
unsigned char *p;
unsigned l, mlen;
uint32_t *p;
unsigned mlen;
Tcheck(t);
mlen = params->shm_reclen;
/* Truncate */
l = Tlen(t);
if (l > mlen) {
l = mlen;
t.e = t.b + l;
}
if (len > mlen)
len = mlen;
/* Only hold the lock while we find our space */
LOCKSHM(&vsl_mtx);
VSL_stats->shm_writes++;
VSL_stats->shm_records++;
p = vsl_get(l);
p = vsl_get(len);
UNLOCKSHM(&vsl_mtx);
memcpy(p + SHMLOG_DATA, t.b, l);
vsl_hdr(tag, p, l, id);
memcpy(p + 2, b, len);
vsl_hdr(tag, p, len, id);
}
/*--------------------------------------------------------------------*/
......@@ -138,9 +154,8 @@ void
VSL(enum shmlogtag tag, int id, const char *fmt, ...)
{
va_list ap;
unsigned char *p;
unsigned n, mlen;
txt t;
unsigned n, mlen = params->shm_reclen;
char buf[mlen];
/*
* XXX: consider formatting into a stack buffer then move into
......@@ -148,35 +163,14 @@ VSL(enum shmlogtag tag, int id, const char *fmt, ...)
*/
AN(fmt);
va_start(ap, fmt);
mlen = params->shm_reclen;
if (strchr(fmt, '%') == NULL) {
t.b = TRUST_ME(fmt);
t.e = strchr(t.b, '\0');
VSLR(tag, id, t);
VSLR(tag, id, fmt, strlen(fmt));
} else {
LOCKSHM(&vsl_mtx);
VSL_stats->shm_writes++;
VSL_stats->shm_records++;
assert(vsl_log_nxt < vsl_log_end);
/* Wrap if we cannot fit a full size record */
if (vsl_log_nxt + SHMLOG_NEXTTAG + mlen + 1 >= vsl_log_end)
vsl_wrap();
p = vsl_log_nxt;
/* +1 for the NUL */
n = vsnprintf((char *)(p + SHMLOG_DATA), mlen + 1L, fmt, ap);
n = vsnprintf(buf, mlen, fmt, ap);
if (n > mlen)
n = mlen; /* we truncate long fields */
vsl_log_nxt += SHMLOG_NEXTTAG + n;
assert(vsl_log_nxt < vsl_log_end);
*vsl_log_nxt = SLT_ENDMARKER;
UNLOCKSHM(&vsl_mtx);
vsl_hdr(tag, p, n, id);
n = mlen;
VSLR(tag, id, buf, n);
}
va_end(ap);
}
......@@ -186,21 +180,24 @@ VSL(enum shmlogtag tag, int id, const char *fmt, ...)
void
WSL_Flush(struct worker *w, int overflow)
{
uint8_t *p;
uint32_t *p;
unsigned l;
l = pdiff(w->wlb, w->wlp);
if (l == 0)
return;
assert(l >= 8);
LOCKSHM(&vsl_mtx);
VSL_stats->shm_flushes += overflow;
VSL_stats->shm_writes++;
VSL_stats->shm_records += w->wlr;
p = vsl_get(l);
p = vsl_get(l - 8);
UNLOCKSHM(&vsl_mtx);
memcpy(p + 1, w->wlb + 1, l - 1);
MEMORY_BARRIER();
memcpy(p + 1, w->wlb + 1, l - 4);
VWMB();
p[0] = w->wlb[0];
w->wlp = w->wlb;
w->wlr = 0;
......@@ -211,7 +208,6 @@ WSL_Flush(struct worker *w, int overflow)
void
WSLR(struct worker *w, enum shmlogtag tag, int id, txt t)
{
unsigned char *p;
unsigned l, mlen;
Tcheck(t);
......@@ -227,13 +223,13 @@ WSLR(struct worker *w, enum shmlogtag tag, int id, txt t)
assert(w->wlp < w->wle);
/* Wrap if necessary */
if (w->wlp + SHMLOG_NEXTTAG + l + 1 >= w->wle)
if (VSL_NEXT(w->wlp, l) >= w->wle)
WSL_Flush(w, 1);
p = w->wlp;
w->wlp += SHMLOG_NEXTTAG + l;
assert (VSL_NEXT(w->wlp, l) < w->wle);
memcpy(VSL_DATA(w->wlp), t.b, l);
vsl_hdr(tag, w->wlp, l, id);
w->wlp = VSL_NEXT(w->wlp, l);
assert(w->wlp < w->wle);
memcpy(p + SHMLOG_DATA, t.b, l);
vsl_hdr(tag, p, l, id);
w->wlr++;
if (params->diag_bitmap & 0x10000)
WSL_Flush(w, 0);
......@@ -245,7 +241,7 @@ void
WSL(struct worker *w, enum shmlogtag tag, int id, const char *fmt, ...)
{
va_list ap;
unsigned char *p;
char *p;
unsigned n, mlen;
txt t;
......@@ -261,16 +257,15 @@ WSL(struct worker *w, enum shmlogtag tag, int id, const char *fmt, ...)
assert(w->wlp < w->wle);
/* Wrap if we cannot fit a full size record */
if (w->wlp + SHMLOG_NEXTTAG + mlen + 1 >= w->wle)
if (VSL_NEXT(w->wlp, mlen) >= w->wle)
WSL_Flush(w, 1);
p = w->wlp;
/* +1 for the NUL */
n = vsnprintf((char *)(p + SHMLOG_DATA), mlen + 1L, fmt, ap);
p = VSL_DATA(w->wlp);
n = vsnprintf(p, mlen, fmt, ap);
if (n > mlen)
n = mlen; /* we truncate long fields */
vsl_hdr(tag, p, n, id);
w->wlp += SHMLOG_NEXTTAG + n;
vsl_hdr(tag, w->wlp, n, id);
w->wlp = VSL_NEXT(w->wlp, n);
assert(w->wlp < w->wle);
w->wlr++;
}
......
......@@ -42,9 +42,9 @@ void VCA_tweak_waiter(struct cli *cli, const char *arg);
void *mgt_SHM_Alloc(unsigned size, const char *class, const char *type, const char *ident);
extern struct varnish_stats *VSL_stats;
extern struct shmloghead *loghead;
extern uint8_t *vsl_log_start;
extern uint8_t *vsl_log_end;
extern uint8_t *vsl_log_nxt;
extern uint32_t *vsl_log_start;
extern uint32_t *vsl_log_end;
extern uint32_t *vsl_log_nxt;
/* varnishd.c */
struct vsb;
......
......@@ -203,7 +203,7 @@ struct params {
* We declare this a volatile pointer, so that reads of parameters
* become atomic, leaving the CLI thread lattitude to change the values
*/
extern volatile struct params *params;
extern volatile struct params * volatile params;
extern struct heritage heritage;
void child_main(void);
......@@ -56,9 +56,9 @@ SVNID("$Id$")
struct varnish_stats *VSL_stats;
struct shmloghead *loghead;
uint8_t *vsl_log_start;
uint8_t *vsl_log_end;
uint8_t *vsl_log_nxt;
uint32_t *vsl_log_start;
uint32_t *vsl_log_end;
uint32_t *vsl_log_nxt;
static int vsl_fd = -1;
......@@ -264,7 +264,6 @@ mgt_SHM_Init(const char *fn, const char *l_arg)
vsl_n_check(i);
(void)close(i);
}
fprintf(stderr, "Creating new SHMFILE\n");
(void)close(i);
vsl_buildnew(fn, size, fill);
......@@ -276,10 +275,6 @@ mgt_SHM_Init(const char *fn, const char *l_arg)
xxxassert(loghead != MAP_FAILED);
(void)mlock((void*)loghead, size);
/* Initialize pool */
loghead->alloc_seq = 0; /* Zero means "inconsistent" */
VWMB();
memset(&loghead->head, 0, sizeof loghead->head);
loghead->head.magic = SHMALLOC_MAGIC;
loghead->head.len =
......@@ -298,15 +293,16 @@ mgt_SHM_Init(const char *fn, const char *l_arg)
vsl_log_start = mgt_SHM_Alloc(s1, VSL_CLASS_LOG, "", "");
AN(vsl_log_start);
vsl_log_end = vsl_log_start + s1;
vsl_log_end = (void*)((uint8_t *)vsl_log_start + s1);
vsl_log_nxt = vsl_log_start + 1;
*vsl_log_nxt = SLT_ENDMARKER;
VWMB();
*vsl_log_nxt = (SLT_ENDMARKER << 24);
do
*vsl_log_start = random();
*vsl_log_start = random() & 0xffff;
while (*vsl_log_start == 0);
VWMB();
do
loghead->alloc_seq = random();
while (loghead->alloc_seq == 0);
......
......@@ -78,7 +78,7 @@ SVNID("$Id$")
#endif
struct heritage heritage;
volatile struct params *params;
volatile struct params * volatile params;
unsigned d_flag = 0;
pid_t mgt_pid;
struct vev_base *mgt_evb;
......
......@@ -271,7 +271,7 @@ static void
do_write(struct VSL_data *vd, const char *w_arg, int a_flag)
{
int fd, i, l;
unsigned char *p;
uint32_t *p;
fd = open_log(w_arg, a_flag);
signal(SIGHUP, sighup);
......@@ -280,8 +280,8 @@ do_write(struct VSL_data *vd, const char *w_arg, int a_flag)
if (i < 0)
break;
if (i > 0) {
l = SHMLOG_LEN(p);
i = write(fd, p, SHMLOG_NEXTTAG + l);
l = VSL_LEN(p);
i = write(fd, p, 8 + VSL_WORDS(l) * 4);
if (i < 0) {
perror(w_arg);
exit(1);
......
......@@ -55,8 +55,8 @@ SVNID("$Id$")
#include "varnishapi.h"
struct top {
unsigned char rec[4];
unsigned char *rec_data;
uint8_t tag;
char *rec_data;
unsigned clen;
unsigned hash;
VTAILQ_ENTRY(top) list;
......@@ -76,33 +76,36 @@ static int f_flag = 0;
static unsigned maxfieldlen = 0;
static void
accumulate(const unsigned char *p)
accumulate(uint32_t *p)
{
struct top *tp, *tp2;
const unsigned char *q;
const char *q;
unsigned int u, l;
uint8_t t;
int i;
// fprintf(stderr, "%*.*s\n", p[1], p[1], p + 4);
// fprintf(stderr, "%p %08x %08x\n", p, p[0], p[1]);
u = 0;
q = p + SHMLOG_DATA;
l = SHMLOG_LEN(p);
q = VSL_DATA(p);
l = VSL_LEN(p);
t = VSL_TAG(p);
for (i = 0; i < l; i++, q++) {
if (f_flag && (*q == ':' || isspace(*q)))
if (f_flag && (*q == ':' || isspace(*q))) {
l = q - VSL_DATA(p);
break;
}
u += *q;
}
VTAILQ_FOREACH(tp, &top_head, list) {
if (tp->hash != u)
continue;
if (tp->rec[SHMLOG_TAG] != p[SHMLOG_TAG])
if (tp->tag != t)
continue;
if (tp->clen != q - p)
if (tp->clen != l)
continue;
if (memcmp(p + SHMLOG_DATA, tp->rec_data,
q - (p + SHMLOG_DATA)))
if (memcmp(VSL_DATA(p), tp->rec_data, l))
continue;
tp->count += 1.0;
break;
......@@ -111,15 +114,16 @@ accumulate(const unsigned char *p)
ntop++;
tp = calloc(sizeof *tp, 1);
assert(tp != NULL);
tp->rec_data = calloc(l, 1);
tp->rec_data = calloc(l + 1, 1);
assert(tp->rec_data != NULL);
tp->hash = u;
tp->count = 1.0;
tp->clen = q - p;
tp->clen = l;
tp->tag = t;
memcpy(tp->rec_data, VSL_DATA(p), l);
tp->rec_data[l] = '\0';
VTAILQ_INSERT_TAIL(&top_head, tp, list);
}
memcpy(tp->rec, p, SHMLOG_DATA - 1);
memcpy(tp->rec_data, p + SHMLOG_DATA, l);
while (1) {
tp2 = VTAILQ_PREV(tp, tophead, list);
if (tp2 == NULL || tp2->count >= tp->count)
......@@ -156,12 +160,12 @@ update(struct VSL_data *vd)
mvprintw(0, 0, "list length %u", ntop);
VTAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) {
if (++l < LINES) {
len = SHMLOG_LEN(tp->rec);
len = tp->clen;
if (len > COLS - 20)
len = COLS - 20;
mvprintw(l, 0, "%9.2f %-*.*s %*.*s\n",
tp->count, maxfieldlen, maxfieldlen,
VSL_tags[tp->rec[SHMLOG_TAG]],
VSL_tags[tp->tag],
len, len, tp->rec_data);
t = tp->count;
}
......@@ -180,10 +184,10 @@ static void *
accumulate_thread(void *arg)
{
struct VSL_data *vd = arg;
uint32_t *p;
int i;
for (;;) {
unsigned char *p;
int i;
i = VSL_NextLog(vd, &p);
if (i < 0)
......@@ -268,22 +272,20 @@ static void
dump(void)
{
struct top *tp, *tp2;
int len;
VTAILQ_FOREACH_SAFE(tp, &top_head, list, tp2) {
if (tp->count <= 1.0)
break;
len = SHMLOG_LEN(tp->rec);
printf("%9.2f %s %*.*s\n",
tp->count, VSL_tags[tp->rec[SHMLOG_TAG]],
len, len, tp->rec_data);
tp->count, VSL_tags[tp->tag],
tp->clen, tp->clen, tp->rec_data);
}
}
static void
do_once(struct VSL_data *vd)
{
unsigned char *p;
uint32_t *p;
while (VSL_NextLog(vd, &p) > 0)
accumulate(p);
......
......@@ -60,7 +60,7 @@ struct shmalloc {
#define SHA_PTR(sha) ((void*)((uintptr_t)((sha) + 1)))
struct shmloghead {
#define SHMLOGHEAD_MAGIC 4185512501U /* From /dev/random */
#define SHMLOGHEAD_MAGIC 4185512502U /* From /dev/random */
unsigned magic;
unsigned hdrsize;
......@@ -83,30 +83,25 @@ struct shmloghead {
#define VSL_CLASS_STAT "Stat"
/*
* Record format is as follows:
* Shared memory log format
*
* 1 byte field type (enum shmlogtag)
* 2 bytes length of contents
* 4 bytes record identifier
* n bytes field contents (isgraph(c) || isspace(c)) allowed.
* The log is structured as an array of 32bit unsigned integers.
*
* The first integer contains a non-zero serial number, which changes
* whenever writing the log starts from the front.
*
* Each logrecord consist of:
* [n] = ((type & 0xff) << 24) | (length & 0xffff)
* [n + 1] = identifier
* [n + 2] ... [m] = content
*/
#define SHMLOG_TAG 0
#define __SHMLOG_LEN_HIGH 1
#define __SHMLOG_LEN_LOW 2
#define __SHMLOG_ID_HIGH 3
#define __SHMLOG_ID_MEDHIGH 4
#define __SHMLOG_ID_MEDLOW 5
#define __SHMLOG_ID_LOW 6
#define SHMLOG_DATA 7
#define SHMLOG_NEXTTAG 8 /* ... + len */
#define SHMLOG_LEN(p) (((p)[__SHMLOG_LEN_HIGH] << 8) | (p)[__SHMLOG_LEN_LOW])
#define SHMLOG_ID(p) ( \
((p)[__SHMLOG_ID_HIGH] << 24) | \
((p)[__SHMLOG_ID_MEDHIGH] << 16) | \
((p)[__SHMLOG_ID_MEDLOW] << 8) | \
(p)[__SHMLOG_ID_LOW])
#define VSL_WORDS(len) (((len) + 3) / 4)
#define VSL_NEXT(ptr, len) ((ptr) + 2 + VSL_WORDS(len))
#define VSL_LEN(ptr) ((ptr)[0] & 0xffff)
#define VSL_TAG(ptr) ((ptr)[0] >> 24)
#define VSL_ID(ptr) ((ptr)[1])
#define VSL_DATA(ptr) ((char*)((ptr)+2))
/*
* The identifiers in shmlogtag are "SLT_" + XML tag. A script may be run
......@@ -117,7 +112,7 @@ enum shmlogtag {
#define SLTM(foo) SLT_##foo,
#include "shmlog_tags.h"
#undef SLTM
SLT_WRAPMARKER = 255
SLT_WRAPMARKER = 255U
};
/* This function lives in both libvarnish and libvarnishapi */
......
......@@ -54,7 +54,7 @@ void VSL_Select(struct VSL_data *vd, unsigned tag);
int VSL_OpenLog(struct VSL_data *vd);
void VSL_NonBlocking(struct VSL_data *vd, int nb);
int VSL_Dispatch(struct VSL_data *vd, vsl_handler *func, void *priv);
int VSL_NextLog(struct VSL_data *lh, unsigned char **pp);
int VSL_NextLog(struct VSL_data *lh, uint32_t **pp);
int VSL_Log_Arg(struct VSL_data *vd, int arg, const char *opt);
int VSL_Stat_Arg(struct VSL_data *vd, int arg, const char *opt);
void VSL_Close(struct VSL_data *vd);
......
......@@ -78,8 +78,8 @@ VSL_New(void)
vd->r_fd = -1;
/* XXX: Allocate only if -r option given ? */
vd->rbuflen = SHMLOG_NEXTTAG + 256;
vd->rbuf = malloc(vd->rbuflen);
vd->rbuflen = 256; /* XXX ?? */
vd->rbuf = malloc(vd->rbuflen * 4);
assert(vd->rbuf != NULL);
VTAILQ_INIT(&vd->sf_list);
......@@ -293,32 +293,6 @@ VSL_Find_Alloc(struct VSL_data *vd, const char *class, const char *type, const c
/*--------------------------------------------------------------------*/
int
VSL_OpenLog(struct VSL_data *vd)
{
unsigned char *p;
struct shmalloc *sha;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
if (VSL_Open(vd))
return (-1);
sha = vsl_find_alloc(vd, VSL_CLASS_LOG, VSL_TYPE_STAT_SMA, "");
assert(sha != NULL);
vd->log_start = SHA_PTR(sha);
vd->log_end = vd->log_start + sha->len - sizeof *sha;
vd->log_ptr = vd->log_start + 1;
if (!vd->d_opt && vd->r_fd == -1) {
for (p = vd->log_ptr; *p != SLT_ENDMARKER; )
p += SHMLOG_LEN(p) + SHMLOG_NEXTTAG;
vd->log_ptr = p;
}
return (0);
}
/*--------------------------------------------------------------------*/
const char *
VSL_Name(const struct VSL_data *vd)
{
......
......@@ -66,14 +66,14 @@ struct VSL_data {
/* Stuff relating the log records below here */
unsigned char *log_start;
unsigned char *log_end;
unsigned char *log_ptr;
uint32_t *log_start;
uint32_t *log_end;
uint32_t *log_ptr;
/* for -r option */
int r_fd;
unsigned rbuflen;
unsigned char *rbuf;
uint32_t *rbuf;
unsigned L_opt;
char *n_opt;
......
......@@ -50,7 +50,7 @@ SVNID("$Id$")
#include "vsl.h"
static int vsl_nextlog(struct VSL_data *vd, unsigned char **pp);
static int vsl_nextlog(struct VSL_data *vd, uint32_t **pp);
/*--------------------------------------------------------------------*/
......@@ -85,59 +85,58 @@ VSL_NonBlocking(struct VSL_data *vd, int nb)
/*--------------------------------------------------------------------*/
static int
vsl_nextlog(struct VSL_data *vd, unsigned char **pp)
vsl_nextlog(struct VSL_data *vd, uint32_t **pp)
{
unsigned char *p;
unsigned w, l;
uint8_t t;
int i;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
if (vd->r_fd != -1) {
assert(vd->rbuflen >= SHMLOG_DATA);
i = read(vd->r_fd, vd->rbuf, SHMLOG_DATA);
if (i != SHMLOG_DATA)
assert(vd->rbuflen >= 8);
i = read(vd->r_fd, vd->rbuf, 8);
if (i != 8)
return (-1);
l = SHMLOG_LEN(vd->rbuf) + SHMLOG_NEXTTAG;
l = 2 + VSL_WORDS(VSL_LEN(vd->rbuf));
if (vd->rbuflen < l) {
l += 200;
vd->rbuf = realloc(vd->rbuf, l);
l += 256;
vd->rbuf = realloc(vd->rbuf, l * 4);
assert(vd->rbuf != NULL);
vd->rbuflen = l;
}
l = SHMLOG_LEN(vd->rbuf) + 1;
i = read(vd->r_fd, vd->rbuf + SHMLOG_DATA, l);
i = read(vd->r_fd, vd->rbuf + 2, l * 4 - 8);
if (i != l)
return (-1);
*pp = vd->rbuf;
return (1);
}
p = vd->log_ptr;
for (w = 0; w < TIMEOUT_USEC;) {
if (*p == SLT_WRAPMARKER) {
p = vd->log_start + 1;
t = VSL_TAG(vd->log_ptr);
if (t == SLT_WRAPMARKER) {
vd->log_ptr = vd->log_start + 1;
continue;
}
if (*p == SLT_ENDMARKER) {
if (t == SLT_ENDMARKER) {
/* XXX: check log_start[0] */
if (vd->flags & F_NON_BLOCKING)
return (-1);
w += SLEEP_USEC;
usleep(SLEEP_USEC);
continue;
}
l = SHMLOG_LEN(p);
vd->log_ptr = p + l + SHMLOG_NEXTTAG;
*pp = p;
*pp = vd->log_ptr;
vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
return (1);
}
vd->log_ptr = p;
*pp = NULL;
return (0);
}
int
VSL_NextLog(struct VSL_data *vd, unsigned char **pp)
VSL_NextLog(struct VSL_data *vd, uint32_t **pp)
{
unsigned char *p, t;
uint32_t *p;
unsigned char t;
unsigned u, l;
int i;
......@@ -146,9 +145,10 @@ VSL_NextLog(struct VSL_data *vd, unsigned char **pp)
i = vsl_nextlog(vd, &p);
if (i != 1)
return (i);
u = SHMLOG_ID(p);
l = SHMLOG_LEN(p);
switch(p[SHMLOG_TAG]) {
u = VSL_ID(p);
l = VSL_LEN(p);
t = VSL_TAG(p);
switch(t) {
case SLT_SessionOpen:
case SLT_ReqStart:
vbit_set(vd->vbm_client, u);
......@@ -169,7 +169,7 @@ VSL_NextLog(struct VSL_data *vd, unsigned char **pp)
if (--vd->keep == 0)
return (-1);
}
t = p[SHMLOG_TAG];
if (vbit_test(vd->vbm_select, t)) {
*pp = p;
return (1);
......@@ -182,16 +182,16 @@ VSL_NextLog(struct VSL_data *vd, unsigned char **pp)
continue;
if (vd->regincl != NULL) {
i = VRE_exec(vd->regincl,
(char *)p + SHMLOG_DATA,
SHMLOG_LEN(p) - SHMLOG_DATA, /* Length */
VSL_DATA(p),
VSL_LEN(p), /* Length */
0, 0, NULL, 0);
if (i == VRE_ERROR_NOMATCH)
continue;
}
if (vd->regexcl != NULL) {
i = VRE_exec(vd->regincl,
(char *)p + SHMLOG_DATA,
SHMLOG_LEN(p) - SHMLOG_DATA, /* Length */
VSL_DATA(p),
VSL_LEN(p), /* Length */
0, 0, NULL, 0);
if (i != VRE_ERROR_NOMATCH)
continue;
......@@ -208,22 +208,21 @@ VSL_Dispatch(struct VSL_data *vd, vsl_handler *func, void *priv)
{
int i;
unsigned u, l, s;
unsigned char *p;
uint32_t *p;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
while (1) {
i = VSL_NextLog(vd, &p);
if (i <= 0)
if (i != 1)
return (i);
u = SHMLOG_ID(p);
l = SHMLOG_LEN(p);
u = VSL_ID(p);
l = VSL_LEN(p);
s = 0;
if (vbit_test(vd->vbm_backend, u))
s |= VSL_S_BACKEND;
if (vbit_test(vd->vbm_client, u))
s |= VSL_S_CLIENT;
if (func(priv,
p[SHMLOG_TAG], u, l, s, (char *)p + SHMLOG_DATA))
if (func(priv, VSL_TAG(p), u, l, s, VSL_DATA(p)))
return (1);
}
}
......@@ -257,3 +256,28 @@ VSL_H_Print(void *priv, enum shmlogtag tag, unsigned fd, unsigned len,
fprintf(fo, "%5d %-12s %c %.*s\n", fd, VSL_tags[tag], type, len, ptr);
return (0);
}
/*--------------------------------------------------------------------*/
int
VSL_OpenLog(struct VSL_data *vd)
{
struct shmalloc *sha;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
if (VSL_Open(vd))
return (-1);
sha = vsl_find_alloc(vd, VSL_CLASS_LOG, "", "");
assert(sha != NULL);
vd->log_start = SHA_PTR(sha);
vd->log_end = (void*)((char *)vd->log_start + sha->len - sizeof *sha);
vd->log_ptr = vd->log_start + 1;
if (!vd->d_opt && vd->r_fd == -1) {
while (VSL_TAG(vd->log_ptr) != SLT_ENDMARKER)
vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
}
return (0);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment