Correct stack regionlist size used during fellow_log_entries_prep()

First and foremost, fellow_log_prep_max_regions was defined wrong:

Except in fellow_cache_test, we call log submission with a maximum of
FELLOW_DISK_LOG_BLOCK_ENTRIES = 56 DLEs. The intention of the
fellow_log_prep_max_regions was was to allocate space to track return
of the maximum number of regions possibly contained. The exact maximum
would be (FELLOW_DISK_LOG_BLOCK_ENTRIES - 1) * DLE_REG_NREGION + 1 =
(55 * 4) + 1 = 221, which is higher than FELLOW_DISK_LOG_BLOCK_ENTRIES
* DLE_BAN_REG_NREGION = 56 * 3 = 168.

Yet it seems prudent to not reply on any fixed maximum, and also our
test cases call for a higher value, so we now define the maximum three
times the actually used value, and also ensure that we batch the code
to this size.

In addition, one assertion in fellow_log_entries_prep() was wrong (it
compared a number of DLEs with a number of regions).

We also tighten some assertions to help future analysis of possible
issues in this area:

- Ensure that the data path via fellow_log_entries_prep() only ever
  uses a region list on the stack.

- By using the regionlist_onlystk_add() macro, ensure that we hit an
  assertion on the array on stack, rather than one on the regionlist
  pointer.

Diff best viewed with -b

Fixes #18
parent 292ecc59
......@@ -740,7 +740,16 @@ fellow_signal_open(struct fellow_fd *ffd)
#include "fellow_regionlist.h"
#define fellow_log_prep_max_regions (FELLOW_DISK_LOG_BLOCK_ENTRIES * DLE_BAN_REG_NREGION)
#define fellow_log_prep_max_dles \
(3 * FELLOW_DISK_LOG_BLOCK_ENTRIES)
#if (((FELLOW_DISK_LOG_BLOCK_ENTRIES - 1) * DLE_REG_NREGION + 1) > \
FELLOW_DISK_LOG_BLOCK_ENTRIES * DLE_BAN_REG_NREGION)
#define fellow_log_prep_max_regions \
(fellow_log_prep_max_dles * DLE_REG_NREGION)
#else
#error Need to re-check fellow_log_prep_max_regions
#endif
struct fellow_log_prep_tofree regionlist_stk(fellow_log_prep_max_regions);
struct fellow_log_prep {
......@@ -3711,6 +3720,8 @@ fellow_log_ban(struct fellow_fd *ffd, uint8_t op,
/*
* we turn DEL_ALLOC DLEs and free them via regions_to_free when the log is
* referenced
*
* must be called with sufficient space in prep->tofree->arr
*/
static void
......@@ -3721,7 +3732,7 @@ fellow_log_entries_mutate(struct fellow_log_prep *prep,
size_t sz;
CHECK_OBJ_NOTNULL(prep, FELLOW_LOG_PREP_MAGIC);
assert(prep->tofree.space >= n);
AZ(prep->tofree.regionlist);
#ifdef DEBUG
/* test with and without mutate */
......@@ -3738,7 +3749,7 @@ fellow_log_entries_mutate(struct fellow_log_prep *prep,
&BUDDY_OFF_EXTENT(off, sz));
#endif
AZ((size_t)off & FELLOW_BLOCK_ALIGN);
regionlist_stk_add(prep->tofree, off, sz);
regionlist_onlystk_add(prep->tofree, off, sz);
e->type = DLEDSK(DLE_OBJ_DEL_FREE);
break;
case DLE_REG_DEL_ALLOCED:
......@@ -3748,7 +3759,7 @@ fellow_log_entries_mutate(struct fellow_log_prep *prep,
buddy_assert_alloced_off_extent(hack_dskbuddy,
&BUDDY_OFF_EXTENT(off, sz));
#endif
regionlist_stk_add(prep->tofree, off, sz);
regionlist_onlystk_add(prep->tofree, off, sz);
}
e->type = DLEDSK(DLE_REG_DEL_FREE);
break;
......@@ -3763,8 +3774,10 @@ fellow_log_entries_mutate(struct fellow_log_prep *prep,
* prep log entries for addition, can be called unlocked
*
* space needs to be provided by caller
*
* returns number of entries prep'd
*/
static void
static unsigned
fellow_log_entries_prep(struct fellow_log_prep *prep,
struct fellow_dle *entry, unsigned n)
{
......@@ -3776,7 +3789,9 @@ fellow_log_entries_prep(struct fellow_log_prep *prep,
AN(prep);
AN(entry);
AN(n);
assert(n <= fellow_log_prep_max_regions);
if (n > fellow_log_prep_max_dles)
n = fellow_log_canfit(entry, fellow_log_prep_max_dles);
assert(n <= fellow_log_prep_max_dles);
memset(hashpfx, 0, sizeof hashpfx);
INIT_OBJ(prep, FELLOW_LOG_PREP_MAGIC);
......@@ -3863,6 +3878,7 @@ fellow_log_entries_prep(struct fellow_log_prep *prep,
assert(fellow_log_block_valid_last(e));
prep->entry = entry;
prep->n = n;
return (n);
}
/* for concurrent logbuffer, needs to be called under lock */
......@@ -3947,25 +3963,31 @@ fellow_privatelog_submit(struct fellow_fd *ffd,
struct fellow_logbuffer *lbuf, struct fellow_dle *entry, unsigned n)
{
struct fellow_log_prep prep[1];
unsigned nn;
/*
* private logs do only really exist during FP_OPEN. During FP_INIT,
* the caller usually needs to hold the logmtx
*/
fellow_log_entries_prep(prep, entry, n);
while (n > 0) {
nn = fellow_log_entries_prep(prep, entry, n);
assert(nn <= n);
n -= nn;
entry += nn;
switch (ffd->phase) {
case FP_INIT:
assert(lbuf == ffd->logbuf);
/* FALLTHROUGH */
case FP_OPEN:
fellow_log_entries_add(ffd, lbuf, prep);
AZ(prep->tofree.n);
break;
default:
WRONG("privatelog_submit can only be called during "
"FP_INIT and FP_OPEN");
switch (ffd->phase) {
case FP_INIT:
assert(lbuf == ffd->logbuf);
/* FALLTHROUGH */
case FP_OPEN:
fellow_log_entries_add(ffd, lbuf, prep);
AZ(prep->tofree.n);
break;
default:
WRONG("privatelog_submit can only be called during "
"FP_INIT and FP_OPEN");
}
}
}
......@@ -6008,26 +6030,35 @@ fellow_log_dle_submit(struct fellow_fd *ffd,
{
struct fellow_log_prep prep[1];
struct regionlist *prealloc = NULL;
unsigned nn;
fellow_log_entries_prep(prep, entry, n);
while (n > 0) {
nn = fellow_log_entries_prep(prep, entry, n);
assert(nn <= n);
n -= nn;
entry += nn;
/* pre-allocate regionlist outside the lock if it
* looks like we are going to need it.
* fellow_log_entries_add() will alloc anyway
*/
if (ffd->logbuf->regions_to_free == NULL &&
prep->tofree.n) {
prealloc = regionlist_alloc(ffd->membuddy);
}
/* pre-allocate regionlist outside the lock if it
* looks like we are going to need it.
* fellow_log_entries_add() will alloc anyway
*/
AZ(pthread_mutex_lock(&ffd->logmtx));
if (ffd->logbuf->regions_to_free == NULL && prealloc != NULL)
TAKEZN(ffd->logbuf->regions_to_free, prealloc);
if (prealloc == NULL &&
ffd->logbuf->regions_to_free == NULL &&
prep->tofree.n) {
prealloc = regionlist_alloc(ffd->membuddy);
}
fellow_log_entries_add(ffd, ffd->logbuf, prep);
AZ(pthread_mutex_unlock(&ffd->logmtx));
AZ(pthread_mutex_lock(&ffd->logmtx));
if (ffd->logbuf->regions_to_free == NULL && prealloc != NULL)
TAKEZN(ffd->logbuf->regions_to_free, prealloc);
fellow_log_entries_add(ffd, ffd->logbuf, prep);
AZ(pthread_mutex_unlock(&ffd->logmtx));
AZ(prep->tofree.n);
}
AZ(prep->tofree.n);
if (prealloc != NULL)
regionlist_free(&prealloc, ffd->dskbuddy);
AZ(prealloc);
......
......@@ -86,17 +86,22 @@ do { \
name.n = 0; \
} while(0)
#define regionlist_stk_add(name, o, s) \
#define regionlist_onlystk_add(name, o, s) \
do { \
if (name.n == name.space) { \
regionlist_stk_flush(name); \
} \
assert(name.n < name.space); \
name.arr[name.n].off = (o); \
name.arr[name.n].size = (s); \
name.n++; \
} while(0)
#define regionlist_stk_add(name, o, s) \
do { \
if (name.n == name.space) { \
regionlist_stk_flush(name); \
} \
regionlist_onlystk_add(name, o, s); \
} while(0)
static struct regl *
regl_alloc_bits(buddy_t *membuddy, const uint8_t bits, int8_t cram,
struct regionlist **rlp)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment