fellow_busy_obj: Allocate fdo with at most FDO_MAX_EMBED_SEGS

this is to make reading disk objects more efficient later on.

This patch triggered c93.vtc failing, so we add a bit of a hack to avoid
a problem which might need more attention later:

FCO_MAX_REGIONS is an issue for chunked encoding objects (with growing
size). We have not yet implemented the best strategy and just tried
to always allocate the largest possible seglist to not use up more
regions than necessary, but small memory configurations do not support
the maximum seglist (4 MB).
parent ea614d48
......@@ -682,14 +682,10 @@ struct fellow_cache_obj {
/*
* we write fellow_disk_object(s) with the embedded fellow_disk_seglist such
* that, when reading the object, the fdo can either be fully embedded in an
* allocation of 4KB, or at least the fcsl_embed can hold all of the
* that, when reading the object, the fcsl_embed can hold all of the
* fellow_disk_seglist.
*
* we want the disk format to be stable, but we might need to grow the
* fellow_cache_obj, so add some extra headroom
*
* starting point on linux
* the numbers on linux:
*
* (gdb) p ((1<<12) - sizeof(struct fellow_cache_obj)) / sizeof(struct fellow_cache_seg)
* $4 = 58
......@@ -699,11 +695,14 @@ struct fellow_cache_obj {
* -> 24 bytes left
*
* mutex_t and cond_t can be bigger on other platforms and might change in size,
* so this optimization may always fail. no problem if it does, we just waste
* some space when reading back objects
* so this optimization may always fail when transporting storage to other
* platforms. no problem if it does, we just waste some space when reading back
* objects
*/
//#define FCO_MAX_SIZE
#define FDO_MAX_EMBED_SEGS \
((MIN_FELLOW_BLOCK - sizeof(struct fellow_cache_obj)) \
/ sizeof(struct fellow_cache_seg))
#define fellow_cache_obj_size(fco) \
(sizeof *fco + (fco)->fcsl_embed.lsegs * sizeof *(fco)->fcsl_embed.segs)
......@@ -2199,22 +2198,19 @@ fellow_busy_obj_alloc(struct fellow_cache *fc,
// round up to disk block such that there
// is space for at least one disk_seg
wsl = (unsigned)PRNDUP(wsl);
asz = sizeof(struct fellow_disk_obj) +
wsl +
sizeof(struct fellow_disk_seglist) +
sizeof(struct fellow_disk_seg);
sz = asz = sizeof(struct fellow_disk_obj) + wsl;
assert(PAOK(asz));
sz += sizeof(struct fellow_disk_seglist);
sz += sizeof(struct fellow_disk_seg);
assert(PAOK(sz));
dsk_sz = fellow_rndup(fc->ffd, asz);
dsk_sz = fellow_rndup(fc->ffd, sz);
assert(PAOK(dsk_sz));
assert(dsk_sz >= sz);
// DBG("roundup fdo + fdsl dsk_sz %zu", dsk_sz);
assert(dsk_sz >= asz);
sz = ((dsk_sz - asz) + sizeof(struct fellow_disk_seg))
/ sizeof(struct fellow_disk_seg);
assert(sz <= FELLOW_DISK_SEGLIST_MAX_SEGS);
assert(sz <= UINT16_MAX);
ldsegs = (uint16_t)sz;
ldsegs = fellow_disk_seglist_fit(dsk_sz - asz);
if (ldsegs > FDO_MAX_EMBED_SEGS)
ldsegs = FDO_MAX_EMBED_SEGS;
// DBG("-> %u embedded segs", ldsegs);
......@@ -2485,8 +2481,17 @@ fellow_busy_body_seglist_alloc(struct fellow_busy *fbo,
assert(fbo->sz_estimate > fbo->sz_returned);
/* sz is number of segments */
if (fbo->grown) {
/*
* XXX for the grown case, we currently try to avoid FCO_MAX_REGIONS
* by always allocating the largest posstible seglist. but that
* leads to a hang for super small memory configurations.
*
* the case is likely excotic, but we avoid it for the common
* cases by allocating one extra seglist by strategy
*
* sz is number of segments
*/
if (fbo->grown && fbo->body_seglist != fbo->fco->fcsl) {
sz = fbo->fc->tune->objsize_max - fbo->sz_dskalloc;
sz += (((size_t)1 << MIN_FELLOW_BITS) - 1);
sz >>= MIN_FELLOW_BITS;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment