Commit 6c54c13d authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

VSM allocations/frees need to happen from both manager and child process

start abstracting this stuff to a common file (vsm.c).



git-svn-id: http://www.varnish-cache.org/svn/trunk/varnish-cache@4958 d4fa192b-c00b-0410-8231-f00ffab90ce4
parent 1f981c5d
......@@ -57,7 +57,8 @@ varnishd_SOURCES = \
storage_synth.c \
storage_umem.c \
stevedore_utils.c \
varnishd.c
varnishd.c \
vsm.c
noinst_HEADERS = \
acct_fields.h \
......
......@@ -42,6 +42,10 @@ SVNID("$Id$")
static pthread_mutex_t vsl_mtx;
static uint32_t *vsl_start;
static uint32_t *vsl_end;
static uint32_t *vsl_ptr;
static inline uint32_t
vsl_w0(uint32_t type, uint32_t length)
{
......@@ -69,13 +73,17 @@ static void
vsl_wrap(void)
{
vsl_log_start[1] = VSL_ENDMARKER;
assert(vsl_ptr >= vsl_start + 1);
assert(vsl_ptr < vsl_end);
vsl_start[1] = VSL_ENDMARKER;
do
vsl_log_start[0]++;
while (vsl_log_start[0] == 0);
vsl_start[0]++;
while (vsl_start[0] == 0);
VWMB();
*vsl_log_nxt = VSL_WRAPMARKER;
vsl_log_nxt = vsl_log_start + 1;
if (vsl_ptr != vsl_start + 1) {
*vsl_ptr = VSL_WRAPMARKER;
vsl_ptr = vsl_start + 1;
}
VSL_stats->shm_cycles++;
}
......@@ -92,24 +100,24 @@ vsl_get(unsigned len, unsigned records, unsigned flushes)
AZ(pthread_mutex_lock(&vsl_mtx));
VSL_stats->shm_cont++;
}
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
assert(vsl_ptr < vsl_end);
assert(((uintptr_t)vsl_ptr & 0x3) == 0);
VSL_stats->shm_writes++;
VSL_stats->shm_flushes += flushes;
VSL_stats->shm_records += records;
/* Wrap if necessary */
if (VSL_END(vsl_log_nxt, len) >= vsl_log_end)
if (VSL_END(vsl_ptr, len) >= vsl_end)
vsl_wrap();
p = vsl_log_nxt;
vsl_log_nxt = VSL_END(vsl_log_nxt, len);
p = vsl_ptr;
vsl_ptr = VSL_END(vsl_ptr, len);
*vsl_log_nxt = VSL_ENDMARKER;
*vsl_ptr = VSL_ENDMARKER;
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
assert(vsl_ptr < vsl_end);
assert(((uintptr_t)vsl_ptr & 0x3) == 0);
AZ(pthread_mutex_unlock(&vsl_mtx));
return (p);
......@@ -264,8 +272,18 @@ WSL(struct worker *w, enum vsl_tag tag, int id, const char *fmt, ...)
void
VSL_Init(void)
{
struct vsm_chunk *vsc;
AZ(pthread_mutex_init(&vsl_mtx, NULL));
VSM_ITER(vsc)
if (!strcmp(vsc->class, VSL_CLASS))
break;
AN(vsc);
vsl_start = VSM_PTR(vsc);
vsl_end = VSM_NEXT(vsc);
vsl_ptr = vsl_start + 1;
vsl_wrap();
loghead->starttime = (intmax_t)TIM_real();
loghead->panicstr[0] = '\0';
......
......@@ -42,9 +42,6 @@ void VCA_tweak_waiter(struct cli *cli, const char *arg);
void *mgt_SHM_Alloc(unsigned size, const char *class, const char *type, const char *ident);
extern struct vsc_main *VSL_stats;
extern struct vsm_head *loghead;
extern uint32_t *vsl_log_start;
extern uint32_t *vsl_log_end;
extern uint32_t *vsl_log_nxt;
/* varnishd.c */
struct vsb;
......@@ -72,16 +69,11 @@ const void *pick(const struct choice *cp, const char *which, const char *kind);
#define NEEDLESS_RETURN(foo) return (foo)
/**********************************************************************
* Guess what: There is no POSIX standard for memory barriers.
* XXX: Please try to find the minimal #ifdef to use here, rely on OS
* supplied facilities if at all possible, to avoid descending into the
* full cpu/compiler explosion.
*/
/* vsm.c */
extern struct vsm_head *vsm_head;
extern void *vsm_end;
struct vsm_chunk *vsm_iter_0(void);
void vsm_iter_n(struct vsm_chunk **pp);
#ifdef __FreeBSD__
#include <machine/atomic.h>
#define MEMORY_BARRIER() mb()
#else
#define MEMORY_BARRIER() close(-1)
#endif
#define VSM_ITER(vd) for ((vd) = vsm_iter_0(); (vd) != NULL; vsm_iter_n(&vd))
......@@ -115,9 +115,6 @@ SVNID("$Id$")
struct vsc_main *VSL_stats;
struct vsm_head *loghead;
uint32_t *vsl_log_start;
uint32_t *vsl_log_end;
uint32_t *vsl_log_nxt;
static int vsl_fd = -1;
......@@ -260,6 +257,7 @@ mgt_SHM_Init(const char *l_arg)
const char *q;
uintmax_t size, s1, s2, ps;
char **av, **ap;
uint32_t *vsl_log_start;
if (l_arg == NULL)
l_arg = "";
......@@ -341,6 +339,9 @@ mgt_SHM_Init(const char *l_arg)
bprintf(loghead->head.class, "%s", "Free");
VWMB();
vsm_head = loghead;
vsm_end = (uint8_t*)loghead + size;
VSL_stats = mgt_SHM_Alloc(sizeof *VSL_stats,
VSC_CLASS, VSC_TYPE_MAIN, "");
AN(VSL_stats);
......@@ -352,9 +353,7 @@ mgt_SHM_Init(const char *l_arg)
vsl_log_start = mgt_SHM_Alloc(s1, VSL_CLASS, "", "");
AN(vsl_log_start);
vsl_log_end = (void*)((uint8_t *)vsl_log_start + s1);
vsl_log_nxt = vsl_log_start + 1;
*vsl_log_nxt = VSL_ENDMARKER;
vsl_log_start[1] = VSL_ENDMARKER;
VWMB();
do
......
/*-
* Copyright (c) 2010 Redpill Linpro AS
* All rights reserved.
*
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* VSM stuff common to manager and child.
*
*/
#include "config.h"
#include "svnid.h"
SVNID("$Id$")
#include <unistd.h>
#include "miniobj.h"
#include "libvarnish.h"
#include "common.h"
#include "vsm.h"
struct vsm_head *vsm_head;
void *vsm_end;
/*--------------------------------------------------------------------*/
struct vsm_chunk *
vsm_iter_0(void)
{
CHECK_OBJ_NOTNULL(vsm_head, VSM_HEAD_MAGIC);
CHECK_OBJ_NOTNULL(&vsm_head->head, VSM_CHUNK_MAGIC);
return (&vsm_head->head);
}
void
vsm_iter_n(struct vsm_chunk **pp)
{
CHECK_OBJ_NOTNULL(vsm_head, VSM_HEAD_MAGIC);
CHECK_OBJ_NOTNULL(*pp, VSM_CHUNK_MAGIC);
*pp = VSM_NEXT(*pp);
if ((void*)(*pp) >= vsm_end) {
*pp = NULL;
return;
}
CHECK_OBJ_NOTNULL(*pp, VSM_CHUNK_MAGIC);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment