gc obsolete code

fellow_cache_lru_chg() already calls fellow_cache_lru_chgbatch_apply()
when the remove array is full.
parent b36a96be
...@@ -3403,7 +3403,6 @@ fellow_cache_async_fini(struct fellow_cache *fc) ...@@ -3403,7 +3403,6 @@ fellow_cache_async_fini(struct fellow_cache *fc)
static void static void
fellow_cache_seg_deref(struct fellow_cache_seg * const *segs, unsigned n) fellow_cache_seg_deref(struct fellow_cache_seg * const *segs, unsigned n)
{ {
#define DEREF_BATCH 64
struct fellow_cache_seg *fcs; struct fellow_cache_seg *fcs;
struct fellow_cache_obj *fco; struct fellow_cache_obj *fco;
...@@ -3413,19 +3412,16 @@ fellow_cache_seg_deref(struct fellow_cache_seg * const *segs, unsigned n) ...@@ -3413,19 +3412,16 @@ fellow_cache_seg_deref(struct fellow_cache_seg * const *segs, unsigned n)
CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC); CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC);
struct fellow_lru_chgbatch lcb[1] = struct fellow_lru_chgbatch lcb[1] =
FELLOW_LRU_CHGBATCH_INIT(lcb, fco, DEREF_BATCH); FELLOW_LRU_CHGBATCH_INIT(lcb, fco, 64);
AZ(pthread_mutex_lock(&fco->mtx)); AZ(pthread_mutex_lock(&fco->mtx));
while (n--) { while (n--) {
fcs = *segs++; fcs = *segs++;
CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC); CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC);
(void) fellow_cache_seg_deref_locked(lcb, fcs); (void) fellow_cache_seg_deref_locked(lcb, fcs);
if (lcb->n_rem == DEREF_BATCH)
fellow_cache_lru_chgbatch_apply(lcb);
} }
fellow_cache_lru_chgbatch_apply(lcb); fellow_cache_lru_chgbatch_apply(lcb);
AZ(pthread_mutex_unlock(&fco->mtx)); AZ(pthread_mutex_unlock(&fco->mtx));
#undef DEREF_BATCH
} }
static const char * static const char *
...@@ -3486,7 +3482,6 @@ static void ...@@ -3486,7 +3482,6 @@ static void
fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type, fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type,
struct fellow_cache_seg * const *segs, const unsigned n) struct fellow_cache_seg * const *segs, const unsigned n)
{ {
#define REF_BATCH 64
struct fellow_cache_seg *fcs, *iosegs[n], *racesegs[n]; struct fellow_cache_seg *fcs, *iosegs[n], *racesegs[n];
struct fellow_cache_obj *fco; struct fellow_cache_obj *fco;
unsigned u, ion = 0, racen = 0; unsigned u, ion = 0, racen = 0;
...@@ -3502,7 +3497,7 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type, ...@@ -3502,7 +3497,7 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type,
CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC); CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC);
struct fellow_lru_chgbatch lcb[1] = struct fellow_lru_chgbatch lcb[1] =
FELLOW_LRU_CHGBATCH_INIT(lcb, fco, REF_BATCH); FELLOW_LRU_CHGBATCH_INIT(lcb, fco, 64);
assert(n <= BUDDY_REQS_MAX); assert(n <= BUDDY_REQS_MAX);
reqs = BUDDY_REQS_STK(fc->membuddy, BUDDY_REQS_MAX); reqs = BUDDY_REQS_STK(fc->membuddy, BUDDY_REQS_MAX);
...@@ -3566,8 +3561,6 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type, ...@@ -3566,8 +3561,6 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type,
fcs = segs[u]; fcs = segs[u];
(void) fellow_cache_seg_ref_locked(lcb, fcs); (void) fellow_cache_seg_ref_locked(lcb, fcs);
if (lcb->n_rem == REF_BATCH)
fellow_cache_lru_chgbatch_apply(lcb);
while (type == FCIO_SYNC && while (type == FCIO_SYNC &&
(fcs->state == FCS_BUSY || fcs->state == FCS_READING)) { (fcs->state == FCS_BUSY || fcs->state == FCS_READING)) {
...@@ -3646,7 +3639,6 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type, ...@@ -3646,7 +3639,6 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type,
/* retry any raced */ /* retry any raced */
if (racen > 0) if (racen > 0)
fellow_cache_seg_ref_in(fc, type, racesegs, racen); fellow_cache_seg_ref_in(fc, type, racesegs, racen);
#undef REF_BATCH
} }
/* /*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment