Keep a counter of number of LRU entries

parent 03ff6049
......@@ -69,6 +69,8 @@ VTAILQ_HEAD(fellow_cache_lru_head, fellow_cache_seg);
struct fellow_cache_lru {
unsigned magic;
#define FELLOW_CACHE_LRU_MAGIC 0x5fd80809
// informational: how many entries on list
unsigned n;
struct fellow_cache *fc;
pthread_mutex_t lru_mtx;
struct fellow_cache_lru_head lru_head;
......@@ -103,6 +105,7 @@ fellow_cache_lru_fini(struct fellow_cache_lru **lrup)
AZ(r);
}
assert(VTAILQ_EMPTY(&lru->lru_head));
AZ(lru->n);
AZ(pthread_mutex_destroy(&lru->lru_mtx));
}
......@@ -229,7 +232,8 @@ assert_fcos_transition(enum fcos_state f, enum fcos_state t)
struct fellow_lru_chgbatch {
unsigned magic;
#define FELLOW_LRU_CHGBATCH_MAGIC 0xaab452d9
uint16_t l, n;
unsigned n_add;
unsigned l_rem, n_rem;
struct fellow_cache_obj *fco;
struct fellow_cache_lru_head add;
struct fellow_cache_seg **fcs;
......@@ -237,8 +241,9 @@ struct fellow_lru_chgbatch {
#define FELLOW_LRU_CHGBATCH_INIT(name, fcoa, size) {{ \
.magic = FELLOW_LRU_CHGBATCH_MAGIC, \
.l = (size), \
.n = 0, \
.n_add = 0, \
.l_rem = (size), \
.n_rem = 0, \
.fco = (fcoa), \
.add = VTAILQ_HEAD_INITIALIZER((name)->add), \
.fcs = (struct fellow_cache_seg*[size + 1]){0} \
......@@ -1080,7 +1085,7 @@ fellow_cache_lru_chgbatch_apply(struct fellow_lru_chgbatch *lcb)
//DBG("%u/%u", lcb->n, !VTAILQ_EMPTY(&lcb->add));
if (lcb->n == 0 && VTAILQ_EMPTY(&lcb->add))
if (lcb->n_rem == 0 && VTAILQ_EMPTY(&lcb->add))
return;
fco = lcb->fco;
......@@ -1088,7 +1093,7 @@ fellow_cache_lru_chgbatch_apply(struct fellow_lru_chgbatch *lcb)
lru = fco->lru;
CHECK_OBJ_NOTNULL(lru, FELLOW_CACHE_LRU_MAGIC);
n = lcb->n;
n = lcb->n_rem;
while (n--) {
fcs = lcb->fcs[n];
AZ(fcs->lcb_add);
......@@ -1102,16 +1107,19 @@ fellow_cache_lru_chgbatch_apply(struct fellow_lru_chgbatch *lcb)
}
AZ(pthread_mutex_lock(&lru->lru_mtx));
while (lcb->n) {
lcb->n--;
TAKE_OBJ_NOTNULL(fcs, &lcb->fcs[lcb->n],
lru->n += lcb->n_add;
lru->n -= lcb->n_rem;
while (lcb->n_rem) {
lcb->n_rem--;
TAKE_OBJ_NOTNULL(fcs, &lcb->fcs[lcb->n_rem],
FELLOW_CACHE_SEG_MAGIC);
assert(fcs->fco == fco);
VTAILQ_REMOVE(&lru->lru_head, fcs, lru_list);
}
VTAILQ_CONCAT(&lru->lru_head, &lcb->add, lru_list);
AZ(pthread_mutex_unlock(&lru->lru_mtx));
AZ(lcb->n);
lcb->n_add = 0;
AZ(lcb->n_rem);
}
/* chg is fellow_cache_shouldlru(new) - fellow_cache_shouldlru(old)
......@@ -1150,19 +1158,19 @@ fellow_cache_lru_chg(struct fellow_lru_chgbatch *lcb,
//DBG("%p -rem", fcs);
// remove the remove
AN(lcb->n);
for (i = 0; i < lcb->n; i++) {
AN(lcb->n_rem);
for (i = 0; i < lcb->n_rem; i++) {
if (lcb->fcs[i] != fcs)
continue;
lcb->fcs[i] = NULL;
break;
}
assert(i < lcb->n);
if (i + 1 < lcb->n) {
assert(i < lcb->n_rem);
if (i + 1 < lcb->n_rem) {
memmove(&lcb->fcs[i], &lcb->fcs[i + 1],
sizeof lcb->fcs[0] * (lcb->n - (i + 1)));
sizeof lcb->fcs[0] * (lcb->n_rem - (i + 1)));
}
lcb->n--;
lcb->n_rem--;
fcs->lcb_remove = 0;
}
else if (add) {
......@@ -1171,23 +1179,26 @@ fellow_cache_lru_chg(struct fellow_lru_chgbatch *lcb,
AZ(fcs->lcb_remove);
fcs->lcb_add = 1;
VTAILQ_INSERT_TAIL(&lcb->add, fcs, lru_list);
lcb->n_add++;
}
else if (fcs->lcb_add) {
//DBG("%p -add", fcs);
AZ(fcs->lcb_remove);
VTAILQ_REMOVE(&lcb->add, fcs, lru_list);
fcs->lcb_add = 0;
AN(lcb->n_add);
lcb->n_add--;
}
else {
//DBG("%p +rem", fcs);
AZ(fcs->lcb_remove);
AZ(fcs->lcb_add);
if (lcb->n == lcb->l) {
if (lcb->n_rem == lcb->l_rem) {
fellow_cache_lru_chgbatch_apply(lcb);
AZ(lcb->n);
AZ(lcb->n_rem);
}
fcs->lcb_remove = 1;
lcb->fcs[lcb->n++] = fcs;
lcb->fcs[lcb->n_rem++] = fcs;
}
}
......@@ -2986,6 +2997,8 @@ fellow_cache_lru_seg_evict_locked(
{
AN(fcs->fcs_onlru);
fcs->fcs_onlru = 0;
AN(lru->n);
lru->n--;
VTAILQ_REMOVE(&lru->lru_head, fcs, lru_list);
fellow_cache_seg_evict_locked(fcs, alloc);
}
......@@ -3119,6 +3132,8 @@ fellow_cache_lru_work(struct worker *wrk, struct fellow_cache_lru *lru)
AZ(fcs->fco_lru_mutate);
fcs->fco_lru_mutate = 1;
AN(lru->n);
lru->n--;
VTAILQ_REMOVE(&lru->lru_head, fcs, lru_list);
AZ(pthread_mutex_unlock(&lru->lru_mtx));
......@@ -3143,6 +3158,7 @@ fellow_cache_lru_work(struct worker *wrk, struct fellow_cache_lru *lru)
fcs->fco_lru_mutate = 0;
VTAILQ_INSERT_TAIL(&lru->lru_head, fcs, lru_list);
lru->n++;
AZ(pthread_mutex_unlock(&fco->mtx));
......@@ -3426,7 +3442,7 @@ fellow_cache_seg_deref(struct fellow_cache_seg * const *segs, unsigned n)
fcs = *segs++;
CHECK_OBJ_NOTNULL(fcs, FELLOW_CACHE_SEG_MAGIC);
(void) fellow_cache_seg_deref_locked(lcb, fcs);
if (lcb->n == DEREF_BATCH)
if (lcb->n_rem == DEREF_BATCH)
fellow_cache_lru_chgbatch_apply(lcb);
}
fellow_cache_lru_chgbatch_apply(lcb);
......@@ -3572,7 +3588,7 @@ fellow_cache_seg_ref_in(struct fellow_cache *fc, enum fellow_cache_io_e type,
fcs = segs[u];
(void) fellow_cache_seg_ref_locked(lcb, fcs);
if (lcb->n == REF_BATCH)
if (lcb->n_rem == REF_BATCH)
fellow_cache_lru_chgbatch_apply(lcb);
while (type == FCIO_SYNC &&
......@@ -5444,8 +5460,8 @@ t_1lcb(struct fellow_cache_seg *fcs,
assert(n <= LCBMAX);
for (u = 1; u < n + 2; u++) { // length of remove
AZ(lcb->n);
lcb->l = u;
AZ(lcb->n_rem);
lcb->l_rem = u;
#define chg(from, to) \
DBG("%x->%x", from, to); \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment