Refactor out the segment sizing strategy

parent 15b40599
......@@ -2365,6 +2365,58 @@ fellow_disk_seglist_alloc(struct fellow_busy *fbo,
return (FCR_OK(fcsl));
}
struct szc {
size_t sz;
int8_t cram;
};
/*
* Return a size to allocate next for the object body,
* given that already bytes are alocated
*/
static struct szc
fellow_busy_body_size_strategy(const struct fellow_busy *fbo, size_t already)
{
unsigned chunkbits;
struct szc szc;
chunkbits = fbo->fc->tune->chunk_exponent;
szc.cram = fbo->fc->tune->cram;
assert(fbo->sz_estimate > already);
szc.sz = fbo->sz_estimate - fbo->sz_dskalloc;
// out of regions: no cram
// if also grown: assume worst case size
if (fbo->nregion >= FCO_MAX_REGIONS - FCO_REGIONS_RESERVE) {
szc.cram = 0;
if (fbo->grown) {
szc.sz = fbo->fc->tune->objsize_max -
fbo->sz_dskalloc;
}
}
else if (fbo->grown == 0 && (szc.sz >> chunkbits) == 0)
// c-l or initial chunked - keep small req
(void)szc.sz;
else if (fbo->grown == 0)
// known size -> round down
szc.sz = (size_t)1 << log2down(szc.sz);
else
// size unknown -> round up
szc.sz = (size_t)1 << log2up(szc.sz);
/* for allocation larger than chunk size, always allow cram down to
* chunk size, but do split pages (INT8_MIN)
*/
if (szc.cram > 0 && szc.sz >= (size_t)1 << chunkbits) {
assert(chunkbits <= INT8_MAX);
szc.cram = buddy_cramlimit_bits(szc.sz, INT8_MIN,
(int8_t)chunkbits);
}
return (szc);
}
/* allocate a new region for body data */
static size_t
fellow_busy_body_seg_alloc(struct fellow_busy *fbo,
......@@ -2373,9 +2425,8 @@ fellow_busy_body_seg_alloc(struct fellow_busy *fbo,
struct fellow_body_region *fbr;
struct buddy_off_extent *fdr;
struct fellow_cache *fc;
unsigned chunkbits;
struct szc szc;
size_t spc;
int8_t cram;
CHECK_OBJ_NOTNULL(fbo, FELLOW_BUSY_MAGIC);
CHECK_OBJ_NOTNULL(fds, FELLOW_DISK_SEG_MAGIC);
......@@ -2389,43 +2440,13 @@ fellow_busy_body_seg_alloc(struct fellow_busy *fbo,
fdr = fbr->reg;
if (fdr == NULL || fbr->len == fdr->size) {
cram = fbo->fc->tune->cram;
chunkbits = fbo->fc->tune->chunk_exponent;
memset(fbr, 0, sizeof *fbr);
assert(fbo->sz_estimate > fbo->sz_dskalloc);
spc = fbo->sz_estimate - fbo->sz_dskalloc;
if (fbo->nregion >= FCO_MAX_REGIONS - FCO_REGIONS_RESERVE) {
// out of regions: no cram,
// if grown, assume worst, else keep spc
cram = 0;
if (fbo->grown) {
spc = fbo->fc->tune->objsize_max -
fbo->sz_dskalloc;
}
}
else if (fbo->grown == 0 && (spc >> chunkbits) == 0)
// c-l or initial chunked - keep small req
(void)spc;
else if (fbo->grown == 0)
// known size -> round down
spc = (size_t)1 << log2down(spc);
else
// size unknown -> round up
spc = (size_t)1 << log2up(spc);
/* for allocation larger than chunk size, always allow
* cram down to chunk size, but do split pages (INT8_MIN)
*/
if (cram > 0 && spc >= (size_t)1 << chunkbits) {
assert(fbo->fc->tune->chunk_exponent <= INT8_MAX);
cram = buddy_cramlimit_bits(spc, INT8_MIN,
(int8_t)fbo->fc->tune->chunk_exponent);
}
szc = fellow_busy_body_size_strategy(fbo, fbo->sz_dskalloc);
fdr = fellow_busy_region_alloc(fbo, spc, cram);
fdr = fellow_busy_region_alloc(fbo, szc.sz, szc.cram);
if (fdr == NULL)
return (0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment