Commit 22f5e216 authored by Poul-Henning Kamp's avatar Poul-Henning Kamp

Teach the persistent stevedore to fix up pointers if the silo

gets remapped a different place.

(This would be horribly slow to run in production.)
parent 1a9beb31
......@@ -141,7 +141,6 @@ void v_matchproto_(storage_init_f)
smp_mgt_init(struct stevedore *parent, int ac, char * const *av)
{
struct smp_sc *sc;
struct smp_sign sgn;
void *target;
int i, mmap_flags;
......@@ -195,51 +194,20 @@ smp_mgt_init(struct stevedore *parent, int ac, char * const *av)
AZ(ftruncate(sc->fd, sc->mediasize));
/* Try to determine correct mmap address */
i = read(sc->fd, &sgn, sizeof sgn);
assert(i == sizeof sgn);
if (!memcmp(sgn.ident, "SILO", 5))
target = (void*)(uintptr_t)sgn.mapped;
else
target = NULL;
target = NULL;
mmap_flags = MAP_NOCORE | MAP_NOSYNC | MAP_SHARED;
if (target) {
mmap_flags |= MAP_FIXED;
#ifdef MAP_EXCL
mmap_flags |= MAP_EXCL;
#endif
} else {
#ifdef __FreeBSD__
/*
* I guess the people who came up with ASLR never learned
* that virtual memory can have benficial uses, because they
* added no facility for realiably and portably allocing
* stable address-space.
* This stevedore is only for testing these days, so we
* can get away with just hacking something up: 16M below
* the break seems to work on FreeBSD.
*/
uintptr_t up;
up = (uintptr_t)sbrk(0);
up -= 1ULL<<24;
up -= sc->mediasize;
up &= ~(getpagesize() - 1ULL);
target = (void *)up;
#endif
#ifdef MAP_ALIGNED_SUPER
mmap_flags |= MAP_ALIGNED_SUPER;
mmap_flags |= MAP_ALIGNED_SUPER;
#endif
}
sc->base = (void*)mmap(target, sc->mediasize, PROT_READ|PROT_WRITE,
mmap_flags, sc->fd, 0);
if (sc->base == MAP_FAILED)
ARGV_ERR("(-spersistent) failed to mmap (%s) @%p\n",
VAS_errtxt(errno), target);
if (target != NULL && sc->base != target)
fprintf(stderr, "WARNING: Persistent silo lost to ASLR %s\n",
sc->filename);
smp_def_sign(sc, &sc->idn, 0, "SILO");
sc->ident = SIGN_DATA(&sc->idn);
......
......@@ -494,7 +494,7 @@ smp_allocx(const struct stevedore *st, size_t min_size, size_t max_size,
INIT_OBJ(ss, STORAGE_MAGIC);
ss->ptr = PRNUP(sc, ss + 1);
ss->space = max_size;
ss->priv = sc;
ss->priv = sc->base;
if (ssg != NULL)
*ssg = sg;
return (ss);
......@@ -566,7 +566,7 @@ smp_allocobj(struct worker *wrk, const struct stevedore *stv,
assert(sizeof so->hash == DIGEST_LEN);
memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
EXP_COPY(so, oc);
so->ptr = (uint8_t*)o - sc->base;
so->ptr = (uint8_t*)(o->objstore) - sc->base;
so->ban = BAN_Time(oc->ban);
smp_init_oc(oc, sg, objidx);
......
......@@ -396,13 +396,27 @@ smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
* objcore methods for persistent objects
*/
static void
fix_ptr(const struct smp_seg *sg, const struct storage *st, void **ptr)
{
// See comment where used below
uintptr_t u;
u = (uintptr_t)(*ptr);
if (u != 0) {
u -= (uintptr_t)st->priv;
u += (uintptr_t)sg->sc->base;
}
*ptr = (void *)u;
}
struct object * v_matchproto_(sml_getobj_f)
smp_sml_getobj(struct worker *wrk, struct objcore *oc)
{
struct object *o;
struct smp_seg *sg;
struct smp_object *so;
struct storage *st;
struct storage *st, *st2;
uint64_t l;
int bad;
......@@ -413,7 +427,43 @@ smp_sml_getobj(struct worker *wrk, struct objcore *oc)
CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
so = smp_find_so(sg, oc->stobj->priv2);
o = (void*)(sg->sc->base + so->ptr);
/**************************************************************
* The silo may have been remapped at a different address,
* because the people who came up with ASLR were unable
* imagine that there might be beneficial use-cases for
* always mapping a file at the same specific address.
*
* We store the silos base address in struct storage->priv
* and manually fix all the pointers in struct object and
* the list of struct storage objects which hold the body.
* When done, we update the storage->priv, so we can do the
* same trick next time.
*
* This is a prohibitively expensive workaround, but we can
* live with it, because the role of this stevedore is only
* to keep the internal stevedore API honest.
*/
st = (void*)(sg->sc->base + so->ptr);
fix_ptr(sg, st, (void**)&st->ptr);
o = (void*)st->ptr;
fix_ptr(sg, st, (void**)&o->objstore);
fix_ptr(sg, st, (void**)&o->va_vary);
fix_ptr(sg, st, (void**)&o->va_headers);
fix_ptr(sg, st, (void**)&o->list.vtqh_first);
fix_ptr(sg, st, (void**)&o->list.vtqh_last);
st->priv = (void*)(sg->sc->base);
st2 = o->list.vtqh_first;
while (st2 != NULL) {
fix_ptr(sg, st2, (void**)&st2->list.vtqe_next);
fix_ptr(sg, st2, (void**)&st2->list.vtqe_prev);
fix_ptr(sg, st2, (void**)&st2->ptr);
st2->priv = (void*)(sg->sc->base);
st2 = st2->list.vtqe_next;
}
/*
* The object may not be in this segment since we allocate it
* In a separate operation than the smp_object. We could check
......
......@@ -95,7 +95,7 @@ smp_chk_sign(struct smp_signctx *ctx)
r = 1;
else if (ctx->unique != ctx->ss->unique)
r = 2;
else if ((uintptr_t)ctx->ss != ctx->ss->mapped)
else if (!ctx->ss->mapped)
r = 3;
else {
VSHA256_Init(&ctx->ctx);
......
......@@ -37,6 +37,8 @@ client c1 {
expect resp.http.X-Varnish == "1001"
} -run
varnish v1 -vsl_catchup
varnish v1 -cliok "storage.list"
varnish v1 -cliok "debug.persistent s0 dump"
varnish v1 -cliok "debug.persistent s0 sync"
......
......@@ -2,9 +2,6 @@ varnishtest "Ban list sync across silos"
feature persistent_storage
# VM-remapping is too random on OSX
feature cmd {test $(uname) != "Darwin"}
shell "rm -f ${tmpdir}/_.per[12]"
# Silo 1 & 2
......
......@@ -2,8 +2,6 @@ varnishtest "Test address remapping"
feature persistent_storage
feature disable_aslr
# VM-remapping is too random on OSX
feature cmd {test $(uname) != "Darwin"}
# Same on some hardened Linux
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment