Bring back cramming to fellow_busy_seg_memalloc()

Ref #60
parent bef98385
......@@ -827,7 +827,7 @@ struct fellow_busy_io_seglist {
// pool for segment memory
BUDDY_POOL(fbo_segmem, 1);
BUDDY_POOL_GET_FUNC(fbo_segmem, static)
//BUDDY_POOL_AVAIL_FUNC(fbo_segmem, static)
BUDDY_POOL_AVAIL_FUNC(fbo_segmem, static)
struct fellow_busy_io {
uint16_t magic;
......@@ -4941,16 +4941,21 @@ fellow_busy_body_seg_next(struct fellow_busy *fbo)
*
* used for body and auxattr
*/
static size_t fellow_busy_seg_memalloc(struct fellow_busy *fbo,
static size_t
fellow_busy_seg_memalloc(struct fellow_busy *fbo,
struct fellow_cache_seg *fcs, int8_t cram)
{
struct fellow_disk_seg *fds;
struct buddy_ptr_page mem;
struct fellow_cache *fc;
unsigned bits;
size_t sz;
CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC);
fds = fcs->disk_seg;
CHECK_OBJ_NOTNULL(fds, FELLOW_DISK_SEG_MAGIC);
fc = fbo->fc;
CHECK_OBJ_NOTNULL(fc, FELLOW_CACHE_MAGIC);
assert (fcs->state == FCS_USABLE);
AN(fds->seg.off);
......@@ -4962,13 +4967,33 @@ static size_t fellow_busy_seg_memalloc(struct fellow_busy *fbo,
if (FC_INJ)
return (0);
bits = log2up(fds->seg.size);
assert(bits >= MIN_FELLOW_BITS);
assert(bits <= INT8_MAX);
if (cram != 0) {
cram = buddy_cramlimit_extent_minbits(fds->seg.size,
fbo->fc->tune->cram, MIN_FELLOW_BITS);
cram = buddy_cramlimit_page_minbits((int8_t)bits,
fc->tune->cram, MIN_FELLOW_BITS);
}
/*
* the fbo_segmem pool does not cram - because it pre-allocates,
* this is the best option. But if it is waiting, use a crammed
* immediate allocation. If that fails, wait on the pool
* to avoid double waiting allocations.
*/
mem = buddy_ptr_page_nil;
if (! fbo_segmem_avail(fbo->segmem)) {
mem = buddy_alloc1_ptr_page(fc->membuddy,
(uint8_t)bits, cram);
}
if (mem.ptr == NULL) {
mem = buddy_get_next_ptr_page(
fbo_segmem_get(fbo->segmem, fc->tune));
}
mem = buddy_get_next_ptr_page(
fbo_segmem_get(fbo->segmem, fbo->fc->tune));
AN(mem.ptr);
sz = (size_t)1 << mem.bits;
......@@ -4976,14 +5001,14 @@ static size_t fellow_busy_seg_memalloc(struct fellow_busy *fbo,
* if the fbo_segmem pool gave us too large/small a page,
* trade it in for a new allocation
*/
if (log2up(fds->seg.size) < mem.bits ||
if (bits < mem.bits ||
(cram == 0 && sz < fds->seg.size)) {
struct buddy_reqs *reqs =
BUDDY_REQS_STK(fbo->fc->membuddy, 1);
BUDDY_REQS_STK(fc->membuddy, 1);
BUDDY_REQS_PRI(reqs, FEP_SPCPRI);
AN(buddy_req_extent(reqs, fds->seg.size, 0));
buddy_return1_ptr_page(fbo->fc->membuddy, &mem);
buddy_return1_ptr_page(fc->membuddy, &mem);
AN(buddy_alloc_wait(reqs));
fcs->alloc = buddy_get_ptr_extent(reqs, 0);
buddy_alloc_wait_done(reqs);
......@@ -4997,7 +5022,7 @@ static size_t fellow_busy_seg_memalloc(struct fellow_busy *fbo,
return (0);
if (fds->seg.size < fcs->alloc.size) {
buddy_trim1_ptr_extent(fbo->fc->membuddy, &fcs->alloc,
buddy_trim1_ptr_extent(fc->membuddy, &fcs->alloc,
fds->seg.size);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment