Use BUDDY_POOL() for regionlists (addresses a lockup)

The oh-so-smart idea from 39c2568e was
pretty dumb after all:

While testing with the low RAM config (16MB on 10GB), a lockop was found
with only ~45% of RAM occupied. The reason for the lockup was the
un-crammed 64KB (16bits) request for a regionlist with priority 4, which
was blocking all other requests.

So: No, trying to allocate something "just in case" is never a good
idea.
parent 652fb3d0
......@@ -51,16 +51,15 @@ struct regl {
struct buddy_off_extent arr[];
};
// [0] crammed
// [1] not crammed
BUDDY_REQS(rl_rsv, 2);
BUDDY_POOL(rl_pool, 4); // XXX 4 good?
BUDDY_POOL_GET_FUNC(rl_pool, static)
struct regionlist {
unsigned magic;
#define REGIONLIST_MAGIC 0xeb869815
size_t size;
VSTAILQ_HEAD(,regl) head;
struct rl_rsv rsv;
struct rl_pool pool[1];
};
// usage: struct regl_stk(42)
......@@ -145,12 +144,17 @@ init_regl(void)
}
static void
regl_rsv(struct buddy_reqs *reqs)
regl_fill(struct buddy_reqs *reqs, const void *priv)
{
unsigned u;
CHECK_OBJ_NOTNULL(reqs, BUDDY_REQS_MAGIC);
(void) priv;
BUDDY_REQS_PRI(reqs, FEP_MEM_FREE);
AN(buddy_req_page(reqs, regl_bits, regl_bits_cram));
AN(buddy_req_page(reqs, regl_bits, 0));
(void) buddy_alloc_async(reqs);
for (u = 0; u < reqs->space; u++)
AN(buddy_req_page(reqs, regl_bits, regl_bits_cram));
}
static struct regl *
......@@ -198,8 +202,7 @@ regionlist_alloc(buddy_t *membuddy)
INIT_OBJ(rl, REGIONLIST_MAGIC);
VSTAILQ_INIT(&rl->head);
BUDDY_REQS_INIT(&rl->rsv, membuddy);
regl_rsv(&rl->rsv.reqs);
BUDDY_POOL_INIT(rl->pool, membuddy, regl_fill, NULL);
r = regl_init(alloc, sizeof(*rl));
AN(r);
......@@ -227,7 +230,7 @@ regionlist_append(struct regionlist *to, struct regionlist **fromp)
CHECK_OBJ_NOTNULL(to, REGIONLIST_MAGIC);
TAKE_OBJ_NOTNULL(from, fromp, REGIONLIST_MAGIC);
buddy_alloc_wait_done(&from->rsv.reqs);
BUDDY_POOL_FINI(from->pool);
to->size += from->size;
VSTAILQ_CONCAT(&to->head, &from->head);
}
......@@ -236,36 +239,16 @@ static struct regl *
regionlist_extend(struct regionlist *rl)
{
struct buddy_ptr_page alloc;
struct buddy_reqs *reqs;
struct regl *regl;
unsigned n;
CHECK_OBJ_NOTNULL(rl, REGIONLIST_MAGIC);
reqs = &rl->rsv.reqs;
n = buddy_alloc_async_ready(reqs);
if (n == 0)
n = buddy_alloc_async_wait(reqs);
AN(n);
alloc = buddy_get_next_ptr_page(reqs);
alloc = buddy_get_next_ptr_page(rl_pool_get(rl->pool, NULL));
AN(alloc.ptr);
if (buddy_reqs_next_ready(reqs) &&
alloc.bits < regl_minbits) {
// the first request was crammed and we have an uncrammed
buddy_return1_ptr_page(reqs->buddy, &alloc);
alloc = buddy_get_next_ptr_page(reqs);
AN(alloc.ptr);
}
regl = regl_init(alloc, (size_t)0);
VSTAILQ_INSERT_TAIL(&rl->head, regl, list);
// issue the next async req
if (! buddy_reqs_next_ready(reqs)) {
buddy_alloc_wait_done(reqs);
regl_rsv(reqs);
}
return (regl);
}
......@@ -319,9 +302,8 @@ regionlist_free(struct regionlist **rlp, buddy_t *dskbuddy)
TAKE_OBJ_NOTNULL(rl, rlp, REGIONLIST_MAGIC);
membuddy = rl->rsv.reqs.buddy;
AN(membuddy);
buddy_alloc_wait_done(&rl->rsv.reqs);
membuddy = rl->pool->reqs[0].reqs.buddy;
BUDDY_POOL_FINI(rl->pool);
dskret = BUDDY_RETURNS_STK(dskbuddy, BUDDY_RETURNS_MAX);
memret = BUDDY_RETURNS_STK(membuddy, BUDDY_RETURNS_MAX);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment