Commit ef23d995 authored by Martin Blix Grydeland's avatar Martin Blix Grydeland

New VSL log format, with fraction indexes available to safely jump

into the log.
parent f370e8d1
......@@ -44,9 +44,12 @@
static pthread_mutex_t vsl_mtx;
static pthread_mutex_t vsm_mtx;
static uint32_t *vsl_start;
static struct VSL_head *vsl_head;
static const uint32_t *vsl_end;
static uint32_t *vsl_ptr;
static unsigned vsl_segment;
static ssize_t vsl_segsize;
static unsigned vsl_seq;
struct VSC_C_main *VSC_C_main;
......@@ -93,17 +96,21 @@ static void
vsl_wrap(void)
{
assert(vsl_ptr >= vsl_start + 1);
assert(vsl_ptr >= vsl_head->log);
assert(vsl_ptr < vsl_end);
vsl_start[1] = VSL_ENDMARKER;
vsl_head->log[0] = VSL_ENDMARKER;
do
vsl_start[0]++;
while (vsl_start[0] == 0);
vsl_seq++;
while (vsl_seq == 0);
vsl_head->seq = vsl_seq;
vsl_head->segments[0] = 0;
VWMB();
if (vsl_ptr != vsl_start + 1) {
if (vsl_ptr != vsl_head->log) {
*vsl_ptr = VSL_WRAPMARKER;
vsl_ptr = vsl_start + 1;
vsl_ptr = vsl_head->log;
}
vsl_segment = 0;
vsl_head->segment = vsl_segment;
VSC_C_main->shm_cycles++;
}
......@@ -116,6 +123,7 @@ vsl_get(unsigned len, unsigned records, unsigned flushes)
{
uint32_t *p;
int err;
unsigned old_segment;
err = pthread_mutex_trylock(&vsl_mtx);
if (err == EBUSY) {
......@@ -137,11 +145,25 @@ vsl_get(unsigned len, unsigned records, unsigned flushes)
p = vsl_ptr;
vsl_ptr = VSL_END(vsl_ptr, len);
assert(vsl_ptr < vsl_end);
assert(((uintptr_t)vsl_ptr & 0x3) == 0);
*vsl_ptr = VSL_ENDMARKER;
assert(vsl_ptr < vsl_end);
assert(((uintptr_t)vsl_ptr & 0x3) == 0);
old_segment = vsl_segment;
while ((vsl_ptr - vsl_head->log) / vsl_segsize > vsl_segment) {
if (vsl_segment == VSL_SEGMENTS - 1)
break;
vsl_segment++;
vsl_head->segments[vsl_segment] = vsl_ptr - vsl_head->log;
}
if (old_segment != vsl_segment) {
/* Write memory barrier to ensure ENDMARKER and new table
values are seen before new segment number */
VWMB();
vsl_head->segment = vsl_segment;
}
AZ(pthread_mutex_unlock(&vsl_mtx));
return (p);
......@@ -252,7 +274,7 @@ VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
assert(vsl->wlp < vsl->wle);
/* Wrap if necessary */
/* Flush if necessary */
if (VSL_END(vsl->wlp, l) >= vsl->wle)
VSL_Flush(vsl, 1);
assert(VSL_END(vsl->wlp, l) < vsl->wle);
......@@ -298,7 +320,7 @@ VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
mlen = cache_param->shm_reclen;
/* Wrap if we cannot fit a full size record */
/* Flush if we cannot fit a full size record */
if (VSL_END(vsl->wlp, mlen) >= vsl->wle)
VSL_Flush(vsl, 1);
......@@ -357,34 +379,37 @@ vsm_cleaner(void *priv)
void
VSM_Init(void)
{
uint32_t *vsl_log_start;
int i;
pthread_t tp;
AZ(pthread_mutex_init(&vsl_mtx, NULL));
AZ(pthread_mutex_init(&vsm_mtx, NULL));
vsl_log_start = VSM_Alloc(cache_param->vsl_space, VSL_CLASS, "", "");
AN(vsl_log_start);
vsl_log_start[1] = VSL_ENDMARKER;
vsl_head = VSM_Alloc(cache_param->vsl_space, VSL_CLASS, "", "");
AN(vsl_head);
vsl_end = vsl_head->log +
(cache_param->vsl_space - sizeof *vsl_head) / sizeof *vsl_end;
vsl_segsize = (vsl_end - vsl_head->log) / VSL_SEGMENTS;
memset(vsl_head, 0, sizeof *vsl_head);
memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
vsl_head->segments[0] = 0;
for (i = 1; i < VSL_SEGMENTS; i++)
vsl_head->segments[i] = -1;
vsl_ptr = vsl_head->log;
*vsl_ptr = VSL_ENDMARKER;
VWMB();
do
*vsl_log_start = random() & 0xffff;
while (*vsl_log_start == 0);
vsl_seq = random();
while (vsl_seq == 0);
vsl_head->seq = vsl_seq;
VWMB();
vsl_start = vsl_log_start;
vsl_end = vsl_start +
cache_param->vsl_space / (unsigned)sizeof *vsl_end;
vsl_ptr = vsl_start + 1;
VSC_C_main = VSM_Alloc(sizeof *VSC_C_main,
VSC_CLASS, VSC_type_main, "");
AN(VSC_C_main);
vsl_wrap();
// VSM_head->starttime = (intmax_t)VTIM_real();
memset(VSC_C_main, 0, sizeof *VSC_C_main);
// VSM_head->child_pid = getpid();
AZ(pthread_create(&tp, NULL, vsm_cleaner, NULL));
}
......
......@@ -34,15 +34,27 @@
#ifndef VAPI_VSL_FMT_H_INCLUDED
#define VAPI_VSL_FMT_H_INCLUDED
#include "vapi/vsm_int.h"
#define VSL_CLASS "Log"
#define VSL_SEGMENTS 8
/*
* Shared memory log format
*
* The log is structured as an array of 32bit unsigned integers.
* The segments array has index values providing safe entry points into
* the log, where each element N gives the index of the first log record
* in the Nth fraction of the log. An index value of -1 indicated that no
* log records in this fraction exists.
*
* The segment member shows the current segment where Varnish is currently
* appending log data.
*
* The seq member contains a non-zero seq number randomly initialized,
* which increases whenever writing the log starts from the front.
*
* The first integer contains a non-zero serial number, which changes
* whenever writing the log starts from the front.
* The log member points to an array of 32bit unsigned integers containing
* log records.
*
* Each logrecord consist of:
* [n] = ((type & 0xff) << 24) | (length & 0xffff)
......@@ -53,6 +65,15 @@
* changing corresponding magic numbers in varnishd/cache/cache_shmlog.c
*/
struct VSL_head {
#define VSL_HEAD_MARKER "VSLHEAD0" /* Incr. as version# */
char marker[VSM_MARKER_LEN];
ssize_t segments[VSL_SEGMENTS];
unsigned segment; /* Current varnishd segment */
unsigned seq; /* Non-zero seq number */
uint32_t log[];
};
#define VSL_CLIENTMARKER (1U<<30)
#define VSL_BACKENDMARKER (1U<<31)
#define VSL_IDENTMASK (~(3U<<30))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment