Commit 356d46a8 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

First part of VSM overhaul, this compiles and varnishd runs,

but varnishapi and users do not work yet.
parent b40bdeb0
......@@ -853,7 +853,7 @@ int SES_Schedule(struct sess *sp);
void VSL_Init(void);
void *VSM_Alloc(unsigned size, const char *class, const char *type,
const char *ident);
void VSM_Free(const void *ptr);
void VSM_Free(void *ptr);
#ifdef VSL_ENDMARKER
void VSL(enum VSL_tag_e tag, int id, const char *fmt, ...);
void WSLR(struct worker *w, enum VSL_tag_e tag, int id, txt t);
......
......@@ -30,8 +30,10 @@
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include "cache.h"
#include "common/heritage.h"
#include "cache_backend.h" // For w->vbc
......@@ -300,25 +302,32 @@ WSLB(struct worker *w, enum VSL_tag_e tag, const char *fmt, ...)
void
VSL_Init(void)
{
struct VSM_chunk *vsc;
uint32_t *vsl_log_start;
AZ(pthread_mutex_init(&vsl_mtx, NULL));
AZ(pthread_mutex_init(&vsm_mtx, NULL));
VSM__Clean();
vsl_log_start = VSM_Alloc(cache_param->vsl_space, VSL_CLASS, "", "");
AN(vsl_log_start);
vsl_log_start[1] = VSL_ENDMARKER;
VWMB();
do
*vsl_log_start = random() & 0xffff;
while (*vsl_log_start == 0);
VWMB();
VSM_ITER(vsc)
if (!strcmp(vsc->class, VSL_CLASS))
break;
AN(vsc);
vsl_start = VSM_PTR(vsc);
vsl_end = VSM_NEXT(vsc);
vsl_start = vsl_log_start;
vsl_end = vsl_start + cache_param->vsl_space;
vsl_ptr = vsl_start + 1;
VSC_C_main = VSM_Alloc(sizeof *VSC_C_main,
VSC_CLASS, VSC_TYPE_MAIN, "");
AN(VSC_C_main);
vsl_wrap();
VSM_head->starttime = (intmax_t)VTIM_real();
// VSM_head->starttime = (intmax_t)VTIM_real();
memset(VSC_C_main, 0, sizeof *VSC_C_main);
VSM_head->child_pid = getpid();
// VSM_head->child_pid = getpid();
}
/*--------------------------------------------------------------------*/
......@@ -327,19 +336,19 @@ void *
VSM_Alloc(unsigned size, const char *class, const char *type,
const char *ident)
{
void *p;
volatile void *p;
AZ(pthread_mutex_lock(&vsm_mtx));
p = VSM__Alloc(size, class, type, ident);
p = VSM_common_alloc(heritage.vsm, size, class, type, ident);
AZ(pthread_mutex_unlock(&vsm_mtx));
return (p);
return (TRUST_ME(p));
}
void
VSM_Free(const void *ptr)
VSM_Free(void *ptr)
{
AZ(pthread_mutex_lock(&vsm_mtx));
VSM__Free(ptr);
VSM_common_free(heritage.vsm, ptr);
AZ(pthread_mutex_unlock(&vsm_mtx));
}
......@@ -74,6 +74,13 @@ void mgt_child_inherit(int fd, const char *what);
/* vsm.c */
struct vsm_sc;
struct vsm_sc *VSM_common_new(void *ptr, unsigned len);
void *VSM_common_alloc(struct vsm_sc *sc, unsigned size,
const char *class, const char *type, const char *ident);
void VSM_common_free(struct vsm_sc *sc, void *ptr);
void VSM_common_delete(struct vsm_sc *sc);
// extern struct VSM_head *VSM_head;
// extern const struct VSM_chunk *vsm_end;
......
......@@ -27,28 +27,35 @@
*
* VSM stuff common to manager and child.
*
* We have three potential conflicts we need to lock against here:
* We have three potential conflicts we need to deal with:
*
* VSM-studying programs (varnishstat...) vs. everybody else
* The VSM studying programs only have read-only access to the VSM
* so everybody else must use memory barriers, stable storage and
* similar tricks to keep the VSM image in sync (long enough) for
* the studying programs.
* It can not be prevented, and may indeed in some cases be
* desirable for such programs to write to VSM, for instance to
* zero counters.
* Varnishd should never trust the integrity of VSM content.
*
* Manager process vs child process.
* Will only muck about in VSM when child process is not running
* Responsible for cleaning up any mess left behind by dying child.
* The manager will create a fresh VSM for each child process launch
* and not muck about with VSM while the child runs. If the child
* crashes, the panicstring will be evacuated and the VSM possibly
* saved for debugging, and a new VSM created before the child is
* started again.
*
* Child process threads
* Pthread locking necessary.
*
* XXX: not all of this is in place yet.
*/
#include "config.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
......@@ -58,181 +65,267 @@
#include "vmb.h"
#include "vtim.h"
/* These two come from beyond (mgt_shmem.c actually) */
struct VSM_head *VSM_head;
const struct VSM_chunk *vsm_end;
static unsigned
vsm_mark(void)
{
unsigned seq;
seq = VSM_head->alloc_seq;
VSM_head->alloc_seq = 0;
VWMB();
return (seq);
}
static void
vsm_release(unsigned seq)
{
/*--------------------------------------------------------------------*/
if (seq == 0)
return;
VWMB();
do
VSM_head->alloc_seq = ++seq;
while (VSM_head->alloc_seq == 0);
}
struct vsm_range {
unsigned magic;
#define VSM_RANGE_MAGIC 0x8d30f14
VTAILQ_ENTRY(vsm_range) list;
unsigned off;
unsigned len;
double cool;
struct VSM_chunk *chunk;
void *ptr;
};
struct vsm_sc {
unsigned magic;
#define VSM_SC_MAGIC 0x8b83270d
char *b;
unsigned len;
struct VSM_head *head;
VTAILQ_HEAD(,vsm_range) r_used;
VTAILQ_HEAD(,vsm_range) r_cooling;
VTAILQ_HEAD(,vsm_range) r_free;
VTAILQ_HEAD(,vsm_range) r_bogus;
};
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* The free list is sorted by size, which means that collapsing ranges
* on free becomes a multi-pass operation.
*/
static void
vsm_cleanup(void)
vsm_common_insert_free(struct vsm_sc *sc, struct vsm_range *vr)
{
unsigned now = (unsigned)VTIM_mono();
struct VSM_chunk *sha, *sha2;
unsigned seq;
CHECK_OBJ_NOTNULL(VSM_head, VSM_HEAD_MAGIC);
VSM_ITER(sha) {
if (strcmp(sha->class, VSM_CLASS_COOL))
continue;
if (sha->state + VSM_COOL_TIME < now)
break;
}
if (sha == NULL)
return;
seq = vsm_mark();
/* First pass, free, and collapse with next if applicable */
VSM_ITER(sha) {
if (strcmp(sha->class, VSM_CLASS_COOL))
continue;
if (sha->state + VSM_COOL_TIME >= now)
continue;
bprintf(sha->class, "%s", VSM_CLASS_FREE);
bprintf(sha->type, "%s", "");
bprintf(sha->ident, "%s", "");
sha2 = VSM_NEXT(sha);
assert(sha2 <= vsm_end);
if (sha2 == vsm_end)
break;
CHECK_OBJ_NOTNULL(sha2, VSM_CHUNK_MAGIC);
if (!strcmp(sha2->class, VSM_CLASS_FREE)) {
sha->len += sha2->len;
memset(sha2, 0, sizeof *sha2);
struct vsm_range *vr2;
CHECK_OBJ_NOTNULL(sc, VSM_SC_MAGIC);
CHECK_OBJ_NOTNULL(vr, VSM_RANGE_MAGIC);
/* First try to see if we can collapse anything */
VTAILQ_FOREACH(vr2, &sc->r_free, list) {
if (vr2->off == vr->off + vr->len) {
vr2->off = vr->off;
vr2->len += vr->len;
FREE_OBJ(vr);
VTAILQ_REMOVE(&sc->r_free, vr2, list);
vsm_common_insert_free(sc, vr2);
return;
}
if (vr->off == vr2->off + vr2->len) {
vr2->len += vr->len;
FREE_OBJ(vr);
VTAILQ_REMOVE(&sc->r_free, vr2, list);
vsm_common_insert_free(sc, vr2);
return;
}
sha->state = 0;
}
/* Second pass, collaps with prev if applicable */
VSM_ITER(sha) {
if (strcmp(sha->class, VSM_CLASS_FREE))
continue;
sha2 = VSM_NEXT(sha);
assert(sha2 <= vsm_end);
if (sha2 == vsm_end)
break;
CHECK_OBJ_NOTNULL(sha2, VSM_CHUNK_MAGIC);
if (!strcmp(sha2->class, VSM_CLASS_FREE)) {
sha->len += sha2->len;
memset(sha2, 0, sizeof *sha2);
/* Insert in size order */
VTAILQ_FOREACH(vr2, &sc->r_free, list) {
if (vr2->len > vr->len) {
VTAILQ_INSERT_BEFORE(vr2, vr, list);
return;
}
}
vsm_release(seq);
/* At tail, if everything in the list is smaller */
VTAILQ_INSERT_TAIL(&sc->r_free, vr, list);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Initialize a new VSM segment
*/
void *
VSM__Alloc(unsigned size, const char *class, const char *type, const char *ident)
struct vsm_sc *
VSM_common_new(void *p, unsigned l)
{
struct VSM_chunk *sha, *sha2;
unsigned seq;
CHECK_OBJ_NOTNULL(VSM_head, VSM_HEAD_MAGIC);
vsm_cleanup();
struct vsm_sc *sc;
struct vsm_range *vr;
assert(PAOK(sizeof(struct VSM_chunk)));
assert(PAOK(p));
ALLOC_OBJ(sc, VSM_SC_MAGIC);
AN(sc);
VTAILQ_INIT(&sc->r_used);
VTAILQ_INIT(&sc->r_cooling);
VTAILQ_INIT(&sc->r_free);
VTAILQ_INIT(&sc->r_bogus);
sc->b = p;
sc->len = l;
sc->head = (void *)sc->b;
memset(TRUST_ME(sc->head), 0, sizeof *sc->head);
sc->head->magic = VSM_HEAD_MAGIC;
sc->head->hdrsize = sizeof *sc->head;
sc->head->shm_size = l;
ALLOC_OBJ(vr, VSM_RANGE_MAGIC);
AN(vr);
vr->off = PRNDUP(sizeof(*sc->head));
vr->len = l - vr->off;
VTAILQ_INSERT_TAIL(&sc->r_free, vr, list);
return (sc);
}
/* Round up to pointersize */
size = RUP2(size, sizeof(void*));
/*--------------------------------------------------------------------
* Allocate a chunk from VSM
*/
size += sizeof *sha; /* Make space for the header */
void *
VSM_common_alloc(struct vsm_sc *sc, unsigned size,
const char *class, const char *type, const char *ident)
{
struct vsm_range *vr, *vr2, *vr3;
double now = VTIM_real();
unsigned l1, l2;
CHECK_OBJ_NOTNULL(sc, VSM_SC_MAGIC);
AN(size);
/* XXX: silent truncation instead of assert ? */
AN(class);
assert(strlen(class) < sizeof(vr->chunk->class));
AN(type);
assert(strlen(type) < sizeof(vr->chunk->type));
AN(ident);
assert(strlen(ident) < sizeof(vr->chunk->ident));
/* Move cooled off stuff to free list */
VTAILQ_FOREACH_SAFE(vr, &sc->r_cooling, list, vr2) {
if (vr->cool > now)
break;
VTAILQ_REMOVE(&sc->r_cooling, vr, list);
vsm_common_insert_free(sc, vr);
}
VSM_ITER(sha) {
CHECK_OBJ_NOTNULL(sha, VSM_CHUNK_MAGIC);
size = PRNDUP(size);
l1 = size + sizeof(struct VSM_chunk);
l2 = size + 2 * sizeof(struct VSM_chunk);
if (strcmp(sha->class, VSM_CLASS_FREE))
/* Find space in free-list */
VTAILQ_FOREACH_SAFE(vr, &sc->r_free, list, vr2) {
if (vr->len < l1)
continue;
if (vr->len <= l2) {
VTAILQ_REMOVE(&sc->r_free, vr, list);
} else {
ALLOC_OBJ(vr3, VSM_RANGE_MAGIC);
AN(vr3);
vr3->off = vr->off;
vr3->len = l1;
vr->off += l1;
vr->len -= l1;
VTAILQ_REMOVE(&sc->r_free, vr, list);
vsm_common_insert_free(sc, vr);
vr = vr3;
}
break;
}
if (size > sha->len)
continue;
if (vr == NULL) {
/*
* No space in VSM, return malloc'd space
*/
ALLOC_OBJ(vr, VSM_RANGE_MAGIC);
AN(vr);
vr->ptr = malloc(size);
AN(vr->ptr);
VTAILQ_INSERT_TAIL(&sc->r_bogus, vr, list);
/* XXX: log + stats */
return (vr->ptr);
}
/* Mark as inconsistent while we write string fields */
seq = vsm_mark();
/* XXX: stats ? */
if (size + sizeof (*sha) < sha->len) {
sha2 = (void*)((uintptr_t)sha + size);
/* Zero the entire allocation, to avoid garbage confusing readers */
memset(TRUST_ME(sc->b + vr->off), 0, vr->len);
memset(sha2, 0, sizeof *sha2);
sha2->magic = VSM_CHUNK_MAGIC;
sha2->len = sha->len - size;
bprintf(sha2->class, "%s", VSM_CLASS_FREE);
sha->len = size;
}
vr->chunk = (void *)(sc->b + vr->off);
vr->ptr = (vr->chunk + 1);
vr->chunk->magic = VSM_CHUNK_MAGIC;
strcpy(TRUST_ME(vr->chunk->class), class);
strcpy(TRUST_ME(vr->chunk->type), type);
strcpy(TRUST_ME(vr->chunk->ident), ident);
VWMB();
bprintf(sha->class, "%s", class);
bprintf(sha->type, "%s", type);
bprintf(sha->ident, "%s", ident);
vr3 = VTAILQ_FIRST(&sc->r_used);
VTAILQ_INSERT_HEAD(&sc->r_used, vr, list);
vsm_release(seq);
return (VSM_PTR(sha));
if (vr3 != NULL) {
AZ(vr3->chunk->next);
vr3->chunk->next = vr->off;
} else {
sc->head->first = vr->off;
}
return (NULL);
VWMB();
return (vr->ptr);
}
/*--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
* Free a chunk
*/
void
VSM__Free(const void *ptr)
VSM_common_free(struct vsm_sc *sc, void *ptr)
{
struct VSM_chunk *sha;
unsigned seq;
struct vsm_range *vr, *vr2;
CHECK_OBJ_NOTNULL(VSM_head, VSM_HEAD_MAGIC);
VSM_ITER(sha)
if (VSM_PTR(sha) == ptr)
break;
AN(sha);
seq = vsm_mark();
bprintf(sha->class, "%s", VSM_CLASS_COOL);
sha->state = (unsigned)VTIM_mono();
vsm_release(seq);
CHECK_OBJ_NOTNULL(sc, VSM_SC_MAGIC);
AN(ptr);
/* Look in used list, move to cooling list */
VTAILQ_FOREACH(vr, &sc->r_used, list) {
if (vr->ptr != ptr)
continue;
/* XXX: stats ? */
vr2 = VTAILQ_NEXT(vr, list);
VTAILQ_REMOVE(&sc->r_used, vr, list);
VTAILQ_INSERT_TAIL(&sc->r_cooling, vr, list);
vr->cool = VTIM_real() + 60; /* XXX: param ? */
if (vr2 != NULL)
vr2->chunk->next = vr->chunk->next;
else
sc->head->first = vr->chunk->next;
VWMB();
vr->chunk->len = 0;
VWMB();
return;
}
/* Look in bogus list, free */
VTAILQ_FOREACH(vr, &sc->r_bogus, list) {
if (vr->ptr == ptr) {
VTAILQ_REMOVE(&sc->r_bogus, vr, list);
FREE_OBJ(vr);
/* XXX: stats ? */
free(TRUST_ME(ptr));
return;
}
}
/* Panic */
assert(ptr == "Bogus pointer freed");
}
/*--------------------------------------------------------------------
* Free all allocations after the mark (ie: allocated by child).
* Delete a VSM segment
*/
void
VSM__Clean(void)
VSM_common_delete(struct vsm_sc *sc)
{
struct VSM_chunk *sha;
unsigned f, seq;
CHECK_OBJ_NOTNULL(VSM_head, VSM_HEAD_MAGIC);
f = 0;
seq = vsm_mark();
VSM_ITER(sha) {
if (f == 0 && !strcmp(sha->class, VSM_CLASS_MARK)) {
f = 1;
continue;
}
if (f == 0)
continue;
if (strcmp(sha->class, VSM_CLASS_FREE) &&
strcmp(sha->class, VSM_CLASS_COOL))
VSM__Free(VSM_PTR(sha));
struct vsm_range *vr, *vr2;
CHECK_OBJ_NOTNULL(sc, VSM_SC_MAGIC);
VTAILQ_FOREACH_SAFE(vr, &sc->r_free, list, vr2)
FREE_OBJ(vr);
VTAILQ_FOREACH_SAFE(vr, &sc->r_used, list, vr2)
FREE_OBJ(vr);
VTAILQ_FOREACH_SAFE(vr, &sc->r_cooling, list, vr2)
FREE_OBJ(vr);
VTAILQ_FOREACH_SAFE(vr, &sc->r_bogus, list, vr2) {
free(TRUST_ME(vr->ptr));
FREE_OBJ(vr);
}
vsm_release(seq);
sc->head->magic = 0;
FREE_OBJ(sc);
}
......@@ -29,6 +29,8 @@
* This file contains the heritage passed when mgt forks cache
*/
struct vsm_sc;
struct listen_sock {
unsigned magic;
#define LISTEN_SOCK_MAGIC 0x999e4b57
......@@ -56,6 +58,8 @@ struct heritage {
/* Hash method */
const struct hash_slinger *hash;
struct vsm_sc *vsm;
char *name;
char identity[1024];
};
......
......@@ -110,9 +110,8 @@ extern unsigned mgt_vcc_err_unref;
syslog(pri, fmt, __VA_ARGS__); \
} while (0)
#define VSM_Alloc(a, b, c, d) VSM__Alloc(a,b,c,d)
#define VSM_Free(a) VSM__Free(a)
#define VSM_Clean() VSM__Clean()
#define VSM_Alloc(a, b, c, d) VSM_common_alloc(heritage.vsm, a,b,c,d)
#define VSM_Free(a) VSM_common_free(heritage.vsm, a)
#if defined(PTHREAD_CANCELED) || defined(PTHREAD_MUTEX_DEFAULT)
#error "Keep pthreads out of in manager process"
......
......@@ -493,15 +493,15 @@ mgt_cli_secret(const char *S_arg)
{
int i, fd;
char buf[BUFSIZ];
char *p;
volatile char *p;
/* Save in shmem */
i = strlen(S_arg);
p = VSM_Alloc(i + 1, "Arg", "-S", "");
AN(p);
strcpy(p, S_arg);
memcpy(TRUST_ME(p), S_arg, i + 1);
srandomdev();
srandomdev(); /* XXX: why here ??? */
fd = open(S_arg, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "Can not open secret-file \"%s\"\n", S_arg);
......@@ -527,7 +527,7 @@ mgt_cli_telnet(const char *T_arg)
struct vss_addr **ta;
int i, n, sock, good;
struct telnet *tn;
char *p;
volatile char *p;
struct vsb *vsb;
char abuf[VTCP_ADDRBUFSIZE];
char pbuf[VTCP_PORTBUFSIZE];
......@@ -566,7 +566,7 @@ mgt_cli_telnet(const char *T_arg)
/* Save in shmem */
p = VSM_Alloc(VSB_len(vsb) + 1, "Arg", "-T", "");
AN(p);
strcpy(p, VSB_data(vsb));
memcpy(TRUST_ME(p), VSB_data(vsb), VSB_len(vsb) + 1);
VSB_delete(vsb);
}
......
......@@ -654,8 +654,6 @@ main(int argc, char * const *argv)
if (T_arg != NULL)
mgt_cli_telnet(T_arg);
AN(VSM_Alloc(0, VSM_CLASS_MARK, "", ""));
MGT_Run();
if (pfh != NULL)
......
......@@ -169,9 +169,8 @@ vsl_n_check(int fd)
*/
static void
vsl_buildnew(const char *fn, unsigned size, int fill)
vsl_buildnew(const char *fn, ssize_t size)
{
struct VSM_head slh;
int i;
unsigned u;
char buf[64*1024];
......@@ -189,26 +188,16 @@ vsl_buildnew(const char *fn, unsigned size, int fill)
flags &= ~O_NONBLOCK;
AZ(fcntl(vsl_fd, F_SETFL, flags));
memset(&slh, 0, sizeof slh);
slh.magic = VSM_HEAD_MAGIC;
slh.hdrsize = sizeof slh;
slh.shm_size = size;
i = write(vsl_fd, &slh, sizeof slh);
xxxassert(i == sizeof slh);
if (fill) {
memset(buf, 0, sizeof buf);
for (u = sizeof slh; u < size; ) {
i = write(vsl_fd, buf, sizeof buf);
if (i <= 0) {
fprintf(stderr, "Write error %s: %s\n",
fn, strerror(errno));
exit (1);
}
u += i;
memset(buf, 0, sizeof buf);
for (u = 0; u < size; ) {
i = write(vsl_fd, buf, sizeof buf);
if (i <= 0) {
fprintf(stderr, "Write error %s: %s\n",
fn, strerror(errno));
exit (1);
}
u += i;
}
AZ(ftruncate(vsl_fd, (off_t)size));
}
......@@ -220,8 +209,10 @@ static
void
mgt_shm_atexit(void)
{
#if 0
if (getpid() == VSM_head->master_pid)
VSM_head->master_pid = 0;
#endif
}
void
......@@ -229,7 +220,10 @@ mgt_SHM_Init(void)
{
int i, fill;
uintmax_t size, ps;
void *p;
#if 0
uint32_t *vsl_log_start;
#endif
fill = 1;
......@@ -243,55 +237,49 @@ mgt_SHM_Init(void)
vsl_n_check(i);
(void)close(i);
}
vsl_buildnew(VSM_FILENAME, size, fill);
vsl_buildnew(VSM_FILENAME, size);
VSM_head = (void *)mmap(NULL, size,
p = (void *)mmap(NULL, size,
PROT_READ|PROT_WRITE,
MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED,
vsl_fd, 0);
VSM_head->master_pid = getpid();
AZ(atexit(mgt_shm_atexit));
xxxassert(VSM_head != MAP_FAILED);
(void)mlock((void*)VSM_head, size);
memset(&VSM_head->head, 0, sizeof VSM_head->head);
VSM_head->head.magic = VSM_CHUNK_MAGIC;
VSM_head->head.len =
(uint8_t*)(VSM_head) + size - (uint8_t*)&VSM_head->head;
bprintf(VSM_head->head.class, "%s", VSM_CLASS_FREE);
VWMB();
xxxassert(p != MAP_FAILED);
vsm_end = (void*)((uint8_t*)VSM_head + size);
heritage.vsm = VSM_common_new(p, size);
VSC_C_main = VSM_Alloc(sizeof *VSC_C_main,
VSC_CLASS, VSC_TYPE_MAIN, "");
AN(VSC_C_main);
(void)mlock(p, size);
AZ(atexit(mgt_shm_atexit));
/* XXX: We need to zero params if we dealloc/clean/wash */
cache_param = VSM_Alloc(sizeof *cache_param, VSM_CLASS_PARAM, "", "");
AN(cache_param);
*cache_param = mgt_param;
vsl_log_start = VSM_Alloc(mgt_param.vsl_space, VSL_CLASS, "", "");
AN(vsl_log_start);
vsl_log_start[1] = VSL_ENDMARKER;
VWMB();
PAN_panicstr_len = 64 * 1024;
PAN_panicstr = VSM_Alloc(PAN_panicstr_len, PAN_CLASS, "", "");
AN(PAN_panicstr);
/* XXX: shouldn't VSM_Alloc zero ? */
memset(PAN_panicstr, '\0', PAN_panicstr_len);
do
*vsl_log_start = random() & 0xffff;
while (*vsl_log_start == 0);
#if 0
VSM_head->master_pid = getpid();
memset(&VSM_head->head, 0, sizeof VSM_head->head);
VSM_head->head.magic = VSM_CHUNK_MAGIC;
VSM_head->head.len =
(uint8_t*)(VSM_head) + size - (uint8_t*)&VSM_head->head;
bprintf(VSM_head->head.class, "%s", VSM_CLASS_FREE);
VWMB();
vsm_end = (void*)((uint8_t*)VSM_head + size);
VSC_C_main = VSM_Alloc(sizeof *VSC_C_main,
VSC_CLASS, VSC_TYPE_MAIN, "");
AN(VSC_C_main);
do
VSM_head->alloc_seq = random();
while (VSM_head->alloc_seq == 0);
#endif
}
......@@ -299,5 +287,7 @@ void
mgt_SHM_Pid(void)
{
#if 0
VSM_head->master_pid = getpid();
#endif
}
......@@ -41,10 +41,11 @@
*/
struct VSM_chunk {
#define VSM_CHUNK_MAGIC 0x43907b6e /* From /dev/random */
#define VSM_CHUNK_MAGIC 0xa15712e5 /* From /dev/random */
unsigned magic;
unsigned len;
unsigned state;
unsigned len; /* Incl VSM_chunk */
unsigned next; /* Offset in shmem */
unsigned state; /* XXX remove */
char class[8];
char type[8];
char ident[64];
......@@ -54,10 +55,11 @@ struct VSM_chunk {
#define VSM_PTR(sha) ((void*)((uintptr_t)((sha) + 1)))
struct VSM_head {
#define VSM_HEAD_MAGIC 4185512502U /* From /dev/random */
#define VSM_HEAD_MAGIC 0xe75f7e91 /* From /dev/random */
unsigned magic;
unsigned hdrsize;
unsigned first; /* Offset, first chunk */
uint64_t starttime;
int64_t master_pid;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment