Reimplement logbuffer dsk block pool using BUDDY_POOL()

parent cb10bc84
......@@ -420,8 +420,8 @@ assert_lbuf_dsk_resv(void)
assert(LBUF_DSK_RSV_REQS <= BUDDY_REQS_MAX);
}
// dsk request
BUDDY_REQS(lbuf_dskpool_s, LBUF_DSK_RSV_REQS);
// dsk block pool
BUDDY_POOL(lbuf_dskpool, LBUF_DSK_RSV_REQS);
// this is the memory request for dskpool
BUDDY_REQS(lbuf_dskpool_mem_s, 1);
// memory for flush_finish
......@@ -467,8 +467,7 @@ struct fellow_logbuffer {
struct fellow_logbuffer_ff *ff;
struct lbuf_ff_mem_s ff_mem;
struct lbuf_dskpool_mem_s dskpool_mem;
struct lbuf_dskpool_s (*dskpool)[2];
unsigned active_reqs;
struct lbuf_dskpool *dskpool;
};
#define LBUF_OK(lbuf) ( \
......@@ -2015,8 +2014,8 @@ logbuffer_fini_dskpool(struct fellow_logbuffer *lbuf)
size_t sz;
if (lbuf->dskpool != NULL) {
buddy_alloc_wait_done(&(*lbuf->dskpool)[0].reqs);
buddy_alloc_wait_done(&(*lbuf->dskpool)[1].reqs);
BUDDY_POOL_FINI(lbuf->dskpool);
sz = buddy_rndup(lbuf->membuddy, sizeof *lbuf->dskpool);
alloc = BUDDY_PTR_EXTENT(lbuf->dskpool, sz);
buddy_return1_ptr_extent(lbuf->membuddy, &alloc);
......@@ -2291,14 +2290,14 @@ log_blocks_alloc_from_reqs(struct buddy_reqs *reqs,
}
static void
logbuffer_fill_dskreq(const struct fellow_logbuffer *lbuf,
struct buddy_reqs *reqs) {
unsigned i;
logbuffer_fill_dskpool(struct buddy_reqs *reqs, const void *priv)
{
const struct fellow_logbuffer *lbuf;
uint8_t pri;
unsigned u;
CHECK_OBJ_NOTNULL(lbuf, FELLOW_LOGBUFFER_MAGIC);
CHECK_OBJ_NOTNULL(reqs, BUDDY_REQS_MAGIC);
buddy_alloc_wait_done(reqs);
CAST_OBJ_NOTNULL(lbuf, priv, FELLOW_LOGBUFFER_MAGIC);
if (lbuf->regions_to_free &&
regionlist_used(lbuf->regions_to_free))
......@@ -2308,45 +2307,18 @@ logbuffer_fill_dskreq(const struct fellow_logbuffer *lbuf,
BUDDY_REQS_PRI(reqs, pri);
for (i = 0; i < reqs->space; i++)
for (u = 0; u < reqs->space; u++)
AN(buddy_req_page(reqs, MIN_FELLOW_BITS, 0));
(void) buddy_alloc_async(reqs);
CHECK_OBJ_NOTNULL(reqs, BUDDY_REQS_MAGIC);
}
static struct buddy_reqs *
logbuffer_get_dskpool(struct fellow_logbuffer *lbuf)
{
struct buddy_reqs *reqs;
int i;
AN(lbuf->dskpool);
/* i < 3: If both reqs are empty, at iteration 3 we
* must hit the first filled alloc
*/
for (i = 0; i < 3; i++) {
AZ(lbuf->active_reqs & ~1);
reqs = &(*lbuf->dskpool)[lbuf->active_reqs].reqs;
CHECK_OBJ_NOTNULL(reqs, BUDDY_REQS_MAGIC);
if (buddy_reqs_next_ready(reqs))
return (reqs);
(void) buddy_alloc_async_wait(reqs);
if (buddy_reqs_next_ready(reqs))
return (reqs); // ret2
logbuffer_fill_dskreq(lbuf, reqs);
lbuf->active_reqs = ! lbuf->active_reqs;
}
WRONG("Expected ret2 to return");
//NEEDLESS(return(NULL));
}
// defines lbuf_dskpool_get()
BUDDY_POOL_GET_FUNC(lbuf_dskpool, static)
static void
logbuffer_prep_dskpool(struct fellow_logbuffer *lbuf, unsigned urgent)
{
struct lbuf_dskpool_s (*dskpool)[2];
struct lbuf_dskpool *dskpool;
struct buddy_ptr_extent alloc;
unsigned i;
if (lbuf->dskpool != NULL)
return;
......@@ -2386,10 +2358,8 @@ logbuffer_prep_dskpool(struct fellow_logbuffer *lbuf, unsigned urgent)
assert(alloc.size >= sizeof *lbuf->dskpool);
dskpool = alloc.ptr;
for (i = 0; i < 2; i++) {
BUDDY_REQS_INIT(&(*dskpool)[i], lbuf->dskbuddy);
logbuffer_fill_dskreq(lbuf, &(*dskpool)[i].reqs);
}
BUDDY_POOL_INIT(dskpool, lbuf->dskbuddy,
logbuffer_fill_dskpool, lbuf);
lbuf->dskpool = dskpool;
}
}
......@@ -2432,7 +2402,7 @@ logbuffer_alloc_some(struct fellow_logbuffer *lbuf,
while (n > 0) {
log_blocks_alloc_from_reqs(
logbuffer_get_dskpool(lbuf),
lbuf_dskpool_get(lbuf->dskpool, lbuf),
&arr, &n);
}
......@@ -3187,12 +3157,9 @@ logbuffer_availblks(const struct fellow_logbuffer *lbuf)
if (lbuf->logreg != NULL)
avail += lbuf->logreg->free_n;
if (lbuf->dskpool != NULL) {
avail += buddy_reqs_next_ready(
&(*lbuf->dskpool)[0].reqs);
if (lbuf->dskpool != NULL)
avail += buddy_reqs_next_ready(
&(*lbuf->dskpool)[1].reqs);
}
lbuf_dskpool_get(lbuf->dskpool, lbuf));
return (avail);
}
static void
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment