fellow_cache_obj: batch memory returns

parent d0338ca8
...@@ -1356,7 +1356,7 @@ fellow_cache_seg_wait_locked(const struct fellow_cache_seg *fcs) ...@@ -1356,7 +1356,7 @@ fellow_cache_seg_wait_locked(const struct fellow_cache_seg *fcs)
* called holding the fco lock * called holding the fco lock
*/ */
static void static void
fellow_cache_seg_free(const struct fellow_cache *fc, fellow_cache_seg_free(struct buddy_returns *memret,
struct fellow_cache_seg *fcs, unsigned deref) struct fellow_cache_seg *fcs, unsigned deref)
{ {
...@@ -1390,7 +1390,7 @@ fellow_cache_seg_free(const struct fellow_cache *fc, ...@@ -1390,7 +1390,7 @@ fellow_cache_seg_free(const struct fellow_cache *fc,
fellow_cache_lru_chgbatch_apply(lcb); fellow_cache_lru_chgbatch_apply(lcb);
} }
if (fcs->alloc.ptr) if (fcs->alloc.ptr)
buddy_return1_ptr_extent(fc->membuddy, &fcs->alloc); AN(buddy_return_ptr_extent(memret, &fcs->alloc));
AZ(fcs->fcs_onlru); AZ(fcs->fcs_onlru);
assert(fcs->refcnt == deref); assert(fcs->refcnt == deref);
fcs->refcnt = 0; fcs->refcnt = 0;
...@@ -1408,29 +1408,26 @@ fellow_cache_seg_free(const struct fellow_cache *fc, ...@@ -1408,29 +1408,26 @@ fellow_cache_seg_free(const struct fellow_cache *fc,
* fco->mtx held * fco->mtx held
*/ */
static void static void
fellow_cache_seg_auxattr_free(const struct fellow_cache *fc, fellow_cache_seg_auxattr_free(struct buddy_returns *memret,
struct fellow_cache_seg *fcs) struct fellow_cache_seg *fcs)
{ {
CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC); CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC);
assert_cache_seg_consistency(fcs); assert_cache_seg_consistency(fcs);
fellow_cache_seg_free(fc, fcs, fcs->refcnt); fellow_cache_seg_free(memret, fcs, fcs->refcnt);
} }
/* fco->mtx held unless surplus seglist */ /* fco->mtx held unless surplus seglist */
static void static void
fellow_cache_seglist_free(const struct fellow_cache *fc, fellow_cache_seglist_free(struct buddy_returns *memret,
struct fellow_cache_seglist *fcsl) struct fellow_cache_seglist *fcsl)
{ {
struct fellow_cache_seglist *next; struct fellow_cache_seglist *next;
struct buddy_returns *memret;
struct buddy_ptr_extent e; struct buddy_ptr_extent e;
unsigned u, segs; unsigned u, segs;
AN(fcsl); AN(fcsl);
memret = BUDDY_RETURNS_STK(fc->membuddy, BUDDY_RETURNS_MAX);
while (fcsl != NULL) { while (fcsl != NULL) {
CHECK_OBJ(fcsl, FELLOW_CACHE_SEGLIST_MAGIC); CHECK_OBJ(fcsl, FELLOW_CACHE_SEGLIST_MAGIC);
...@@ -1439,7 +1436,7 @@ fellow_cache_seglist_free(const struct fellow_cache *fc, ...@@ -1439,7 +1436,7 @@ fellow_cache_seglist_free(const struct fellow_cache *fc,
segs = fcsl->fdsl->nsegs; segs = fcsl->fdsl->nsegs;
for (u = 0; u < segs; u++) for (u = 0; u < segs; u++)
fellow_cache_seg_free(fc, &fcsl->segs[u], 0); fellow_cache_seg_free(memret, &fcsl->segs[u], 0);
for (; u < fcsl->lsegs; u++) { for (; u < fcsl->lsegs; u++) {
assert(FCOS_IS(fcsl->segs[u].state, INIT) || assert(FCOS_IS(fcsl->segs[u].state, INIT) ||
FCOS_IS(fcsl->segs[u].state, USABLE)); FCOS_IS(fcsl->segs[u].state, USABLE));
...@@ -1464,7 +1461,6 @@ fellow_cache_seglist_free(const struct fellow_cache *fc, ...@@ -1464,7 +1461,6 @@ fellow_cache_seglist_free(const struct fellow_cache *fc,
fcsl = next; fcsl = next;
} }
buddy_return(memret);
} }
/* /*
...@@ -1566,9 +1562,13 @@ fellow_cache_obj_free(const struct fellow_cache *fc, ...@@ -1566,9 +1562,13 @@ fellow_cache_obj_free(const struct fellow_cache *fc,
struct fellow_cache_obj *fco; struct fellow_cache_obj *fco;
struct fellow_cache_seg *fcs; struct fellow_cache_seg *fcs;
struct buddy_ptr_extent mem; struct buddy_ptr_extent mem;
struct buddy_returns *memret;
CHECK_OBJ_NOTNULL(fc, FELLOW_CACHE_MAGIC);
TAKE_OBJ_NOTNULL(fco, fcop, FELLOW_CACHE_OBJ_MAGIC); TAKE_OBJ_NOTNULL(fco, fcop, FELLOW_CACHE_OBJ_MAGIC);
memret = BUDDY_RETURNS_STK(fc->membuddy, BUDDY_RETURNS_MAX);
struct fellow_lru_chgbatch lcb[1] = struct fellow_lru_chgbatch lcb[1] =
FELLOW_LRU_CHGBATCH_INIT(lcb, fco, 1); FELLOW_LRU_CHGBATCH_INIT(lcb, fco, 1);
...@@ -1580,10 +1580,10 @@ fellow_cache_obj_free(const struct fellow_cache *fc, ...@@ -1580,10 +1580,10 @@ fellow_cache_obj_free(const struct fellow_cache *fc,
fellow_cache_lru_chgbatch_apply(lcb); fellow_cache_lru_chgbatch_apply(lcb);
DBG("fco %p", fco); DBG("fco %p", fco);
fellow_cache_seglist_free(fc, &fco->seglist); fellow_cache_seglist_free(memret, &fco->seglist);
#define FDO_AUXATTR(U, l) \ #define FDO_AUXATTR(U, l) \
fellow_cache_seg_auxattr_free(fc, &fco->aa_##l##_seg); fellow_cache_seg_auxattr_free(memret, &fco->aa_##l##_seg);
#include "tbl/fellow_obj_attr.h" #include "tbl/fellow_obj_attr.h"
AZ(fcs->fco_infdb); AZ(fcs->fco_infdb);
...@@ -1593,13 +1593,14 @@ fellow_cache_obj_free(const struct fellow_cache *fc, ...@@ -1593,13 +1593,14 @@ fellow_cache_obj_free(const struct fellow_cache *fc,
AZ(pthread_mutex_destroy(&fco->mtx)); AZ(pthread_mutex_destroy(&fco->mtx));
AZ(pthread_cond_destroy(&fco->cond)); AZ(pthread_cond_destroy(&fco->cond));
fellow_cache_seg_free(fc, fcs, 0); fellow_cache_seg_free(memret, fcs, 0);
if (fco->fco_dowry.bits) if (fco->fco_dowry.bits)
buddy_return1_ptr_page(fc->membuddy, &fco->fco_dowry); AN(buddy_return_ptr_page(memret, &fco->fco_dowry));
TAKE(mem, fco->fco_mem); TAKE(mem, fco->fco_mem);
assert(mem.ptr == fco); assert(mem.ptr == fco);
buddy_return1_ptr_extent(fc->membuddy, &mem); AN(buddy_return_ptr_extent(memret, &mem));
buddy_return(memret);
} }
/* /*
...@@ -3806,6 +3807,7 @@ struct fcoi_deref { ...@@ -3806,6 +3807,7 @@ struct fcoi_deref {
struct fellow_cache_seg **segs; struct fellow_cache_seg **segs;
objiterate_f *func; objiterate_f *func;
void *priv; void *priv;
struct buddy_returns *memret;
}; };
static inline void static inline void
...@@ -3828,6 +3830,8 @@ fcoi_deref(struct fcoi_deref *fcoid) ...@@ -3828,6 +3830,8 @@ fcoi_deref(struct fcoi_deref *fcoid)
CHECK_OBJ_NOTNULL(fcoid, FCOI_DEREF_MAGIC); CHECK_OBJ_NOTNULL(fcoid, FCOI_DEREF_MAGIC);
buddy_return(fcoid->memret);
if (fcoid->n == 0) if (fcoid->n == 0)
return; return;
...@@ -3849,6 +3853,8 @@ fellow_cache_obj_iter_flush_deref(struct fcoi_deref *fcoid) ...@@ -3849,6 +3853,8 @@ fellow_cache_obj_iter_flush_deref(struct fcoi_deref *fcoid)
CHECK_OBJ_NOTNULL(fcoid, FCOI_DEREF_MAGIC); CHECK_OBJ_NOTNULL(fcoid, FCOI_DEREF_MAGIC);
buddy_return(fcoid->memret);
if (fcoid->n == 0) if (fcoid->n == 0)
return (0); return (0);
...@@ -4172,6 +4178,7 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco, ...@@ -4172,6 +4178,7 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco,
fcoid->segs = deref; fcoid->segs = deref;
fcoid->func = func; fcoid->func = func;
fcoid->priv = priv; fcoid->priv = priv;
fcoid->memret = BUDDY_RETURNS_STK(fc->membuddy, 1);
fcsc_init(&c, &fco->seglist); fcsc_init(&c, &fco->seglist);
rac = c; rac = c;
...@@ -4267,7 +4274,7 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco, ...@@ -4267,7 +4274,7 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco,
* get deleted when the object is */ * get deleted when the object is */
AZ(pthread_mutex_lock(&fcs->fco->mtx)); AZ(pthread_mutex_lock(&fcs->fco->mtx));
if (fcs->refcnt == 1) if (fcs->refcnt == 1)
fellow_cache_seg_free(fc, fcs, 1); fellow_cache_seg_free(fcoid->memret, fcs, 1);
else { else {
// NULL: refcount must be > 1, so cant be on LRU // NULL: refcount must be > 1, so cant be on LRU
AN(fellow_cache_seg_deref_locked(NULL, fcs)); AN(fellow_cache_seg_deref_locked(NULL, fcs));
...@@ -4666,6 +4673,7 @@ fellow_busy_obj_trim_seglists(struct fellow_busy *fbo) ...@@ -4666,6 +4673,7 @@ fellow_busy_obj_trim_seglists(struct fellow_busy *fbo)
struct fellow_cache_seglist *fcsl; struct fellow_cache_seglist *fcsl;
struct fellow_disk_seglist *fdsl; struct fellow_disk_seglist *fdsl;
struct fellow_cache_obj *fco; struct fellow_cache_obj *fco;
struct buddy_returns *memret;
CHECK_OBJ_NOTNULL(fbo, FELLOW_BUSY_MAGIC); CHECK_OBJ_NOTNULL(fbo, FELLOW_BUSY_MAGIC);
fco = fbo->fco; fco = fbo->fco;
...@@ -4698,7 +4706,9 @@ fellow_busy_obj_trim_seglists(struct fellow_busy *fbo) ...@@ -4698,7 +4706,9 @@ fellow_busy_obj_trim_seglists(struct fellow_busy *fbo)
/* not holding fco->mtx because we trim surplus seglists /* not holding fco->mtx because we trim surplus seglists
* which are not on the lru * which are not on the lru
*/ */
fellow_cache_seglist_free(fbo->fc, fbo->body_seglist); memret = BUDDY_RETURNS_STK(fbo->fc->membuddy, BUDDY_RETURNS_MAX);
fellow_cache_seglist_free(memret, fbo->body_seglist);
buddy_return(memret);
} }
static inline int static inline int
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment