Commit 4e379223 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Optimize shmlog writing:

If we know the record length, only hold the mutex while we reserve
the space.  Until we change the first byte, nothing bad can happen.

XXX: a memory barrier is strictly speaking necessary before we assign
the first byte.

If there are no '%' in the format string, treat as fixed length for
speed.



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@895 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 7bb9d892
......@@ -61,6 +61,7 @@ VSLR(enum shmlogtag tag, unsigned id, const char *b, const char *e)
e = b + l;
}
/* Only hold the lock while we find our space */
AZ(pthread_mutex_lock(&vsl_mtx));
assert(loghead->ptr < loghead->size);
......@@ -68,17 +69,18 @@ VSLR(enum shmlogtag tag, unsigned id, const char *b, const char *e)
if (loghead->ptr + 5 + l + 1 > loghead->size)
vsl_wrap();
p = logstart + loghead->ptr;
loghead->ptr += 5 + l;
p[5 + l] = SLT_ENDMARKER;
assert(loghead->ptr < loghead->size);
AZ(pthread_mutex_unlock(&vsl_mtx));
p[1] = l & 0xff;
p[2] = (id >> 8) & 0xff;
p[3] = id & 0xff;
memcpy(p + 4, b, l);
p[4 + l] = '\0';
p[5 + l] = SLT_ENDMARKER;
/* XXX: memory barrier */
p[0] = tag;
loghead->ptr += 5 + l;
assert(loghead->ptr < loghead->size);
AZ(pthread_mutex_unlock(&vsl_mtx));
}
......@@ -91,6 +93,12 @@ VSL(enum shmlogtag tag, unsigned id, const char *fmt, ...)
va_start(ap, fmt);
p = strchr(fmt, '%');
if (p == NULL) {
VSLR(tag, id, fmt, NULL);
return;
}
AZ(pthread_mutex_lock(&vsl_mtx));
assert(loghead->ptr < loghead->size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment