Commit a0308b8a authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Clean up and unify shmlog writing in the worker process.

Always wrap the log on worker process startup.

Detect such wraps in libvarnishapi



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@4914 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 77e7172e
...@@ -50,23 +50,25 @@ vsl_w0(uint32_t type, uint32_t length) ...@@ -50,23 +50,25 @@ vsl_w0(uint32_t type, uint32_t length)
return (((type & 0xff) << 24) | length); return (((type & 0xff) << 24) | length);
} }
#define LOCKSHM(foo) \ /*--------------------------------------------------------------------*/
do { \
if (pthread_mutex_trylock(foo)) { \ static inline void
AZ(pthread_mutex_lock(foo)); \ vsl_hdr(enum shmlogtag tag, uint32_t *p, unsigned len, unsigned id)
VSL_stats->shm_cont++; \ {
} \
} while (0); assert(((uintptr_t)p & 0x3) == 0);
p[1] = id;
VMB();
p[0] = vsl_w0(tag, len);
}
#define UNLOCKSHM(foo) AZ(pthread_mutex_unlock(foo)) /*--------------------------------------------------------------------*/
static void static void
vsl_wrap(void) vsl_wrap(void)
{ {
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
vsl_log_start[1] = vsl_w0(SLT_ENDMARKER, 0); vsl_log_start[1] = vsl_w0(SLT_ENDMARKER, 0);
do do
vsl_log_start[0]++; vsl_log_start[0]++;
...@@ -77,46 +79,39 @@ vsl_wrap(void) ...@@ -77,46 +79,39 @@ vsl_wrap(void)
VSL_stats->shm_cycles++; VSL_stats->shm_cycles++;
} }
/*--------------------------------------------------------------------*/
static inline void
vsl_hdr(enum shmlogtag tag, uint32_t *p, unsigned len, unsigned id)
{
assert(((uintptr_t)p & 0x3) == 0);
p[1] = id;
VMB();
p[0] = vsl_w0(tag, len);
}
/*-------------------------------------------------------------------- /*--------------------------------------------------------------------
* Reserve bytes for a record, wrap if necessary * Reserve bytes for a record, wrap if necessary
*/ */
static uint32_t * static uint32_t *
vsl_get(unsigned len) vsl_get(unsigned len, unsigned records, unsigned flushes)
{ {
uint32_t *p; uint32_t *p;
uint32_t u;
if (pthread_mutex_trylock(&vsl_mtx)) {
AZ(pthread_mutex_lock(&vsl_mtx));
VSL_stats->shm_cont++;
}
assert(vsl_log_nxt < vsl_log_end); assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0); assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
u = VSL_WORDS(len); VSL_stats->shm_writes++;
VSL_stats->shm_flushes += flushes;
VSL_stats->shm_records += records;
/* Wrap if necessary */ /* Wrap if necessary */
if (VSL_NEXT(vsl_log_nxt, len) >= vsl_log_end) if (VSL_NEXT(vsl_log_nxt, len) >= vsl_log_end)
vsl_wrap(); vsl_wrap();
p = vsl_log_nxt; p = vsl_log_nxt;
vsl_log_nxt = VSL_NEXT(vsl_log_nxt, len); vsl_log_nxt = VSL_NEXT(vsl_log_nxt, len);
*vsl_log_nxt = vsl_w0(SLT_ENDMARKER, 0);
assert(vsl_log_nxt < vsl_log_end); assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0); assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
AZ(pthread_mutex_unlock(&vsl_mtx));
*vsl_log_nxt = vsl_w0(SLT_ENDMARKER, 0);
printf("GET %p -> %p\n", p, vsl_log_nxt);
return (p); return (p);
} }
...@@ -137,12 +132,7 @@ VSLR(enum shmlogtag tag, int id, const char *b, unsigned len) ...@@ -137,12 +132,7 @@ VSLR(enum shmlogtag tag, int id, const char *b, unsigned len)
if (len > mlen) if (len > mlen)
len = mlen; len = mlen;
/* Only hold the lock while we find our space */ p = vsl_get(len, 1, 0);
LOCKSHM(&vsl_mtx);
VSL_stats->shm_writes++;
VSL_stats->shm_records++;
p = vsl_get(len);
UNLOCKSHM(&vsl_mtx);
memcpy(p + 2, b, len); memcpy(p + 2, b, len);
vsl_hdr(tag, p, len, id); vsl_hdr(tag, p, len, id);
...@@ -189,12 +179,7 @@ WSL_Flush(struct worker *w, int overflow) ...@@ -189,12 +179,7 @@ WSL_Flush(struct worker *w, int overflow)
assert(l >= 8); assert(l >= 8);
LOCKSHM(&vsl_mtx); p = vsl_get(l - 8, w->wlr, overflow);
VSL_stats->shm_flushes += overflow;
VSL_stats->shm_writes++;
VSL_stats->shm_records += w->wlr;
p = vsl_get(l - 8);
UNLOCKSHM(&vsl_mtx);
memcpy(p + 1, w->wlb + 1, l - 4); memcpy(p + 1, w->wlb + 1, l - 4);
VWMB(); VWMB();
...@@ -281,7 +266,8 @@ VSL_Init(void) ...@@ -281,7 +266,8 @@ VSL_Init(void)
{ {
AZ(pthread_mutex_init(&vsl_mtx, NULL)); AZ(pthread_mutex_init(&vsl_mtx, NULL));
loghead->starttime = TIM_real(); vsl_wrap();
loghead->starttime = (intmax_t)TIM_real();
loghead->panicstr[0] = '\0'; loghead->panicstr[0] = '\0';
memset(VSL_stats, 0, sizeof *VSL_stats); memset(VSL_stats, 0, sizeof *VSL_stats);
loghead->child_pid = getpid(); loghead->child_pid = getpid();
......
...@@ -90,6 +90,7 @@ vsl_nextlog(struct VSL_data *vd, uint32_t **pp) ...@@ -90,6 +90,7 @@ vsl_nextlog(struct VSL_data *vd, uint32_t **pp)
unsigned w, l; unsigned w, l;
uint8_t t; uint8_t t;
int i; int i;
uint32_t seq;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC); CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
if (vd->r_fd != -1) { if (vd->r_fd != -1) {
...@@ -110,23 +111,28 @@ vsl_nextlog(struct VSL_data *vd, uint32_t **pp) ...@@ -110,23 +111,28 @@ vsl_nextlog(struct VSL_data *vd, uint32_t **pp)
*pp = vd->rbuf; *pp = vd->rbuf;
return (1); return (1);
} }
seq = vd->log_start[0];
for (w = 0; w < TIMEOUT_USEC;) { for (w = 0; w < TIMEOUT_USEC;) {
t = VSL_TAG(vd->log_ptr); t = VSL_TAG(vd->log_ptr);
if (t == SLT_WRAPMARKER) {
vd->log_ptr = vd->log_start + 1; if (t != SLT_ENDMARKER) {
continue; *pp = vd->log_ptr;
vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
return (1);
} }
if (t == SLT_ENDMARKER) {
/* XXX: check log_start[0] */ if (t == SLT_WRAPMARKER || vd->log_start[0] != seq) {
if (vd->flags & F_NON_BLOCKING) vd->log_ptr = vd->log_start + 1;
return (-1); seq = vd->log_start[0];
w += SLEEP_USEC;
usleep(SLEEP_USEC);
continue; continue;
} }
*pp = vd->log_ptr;
vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
return (1); /* XXX: check log_start[0] */
if (vd->flags & F_NON_BLOCKING)
return (-1);
w += SLEEP_USEC;
usleep(SLEEP_USEC);
} }
*pp = NULL; *pp = NULL;
return (0); return (0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment