fellow_cache: Ensure we see seglist loading errors for _iter()

parent 0a007211
......@@ -2323,6 +2323,18 @@ fellow_cache_seglists_wait_locked(struct fellow_cache_obj *fco)
assert(fco->seglstate == SEGL_DONE || fco->seglstate == SEGL_NEED);
}
static void
fellow_cache_seglists_wait(struct fellow_cache_obj *fco)
{
CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC);
if (fco->seglstate != SEGL_LOADING)
return;
AZ(pthread_mutex_lock(&fco->mtx));
fellow_cache_seglists_wait_locked(fco);
AZ(pthread_mutex_unlock(&fco->mtx));
}
/*
* MEM LAYOUT
*
......@@ -4642,8 +4654,10 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco,
// stack usage
assert(readahead <= 31);
fcr.status = fcr_ok; // also if func() != 0
fcr.r.integer = 0;
// fcr_ok is also returned if func() != 0
fcr = fco->fcr;
if (fcr.status != fcr_ok)
return (fcr);
CHECK_OBJ_NOTNULL(fco, FELLOW_CACHE_OBJ_MAGIC);
......@@ -4791,7 +4805,14 @@ fellow_cache_obj_iter(struct fellow_cache *fc, struct fellow_cache_obj *fco,
fcr.r.integer = ret2;
}
return (fcr);
// if ok, wait for seglist loading to complete to see errors
if (fcr.status != fcr_ok)
fco_latch_err(fco, fcr);
else
fellow_cache_seglists_wait(fco);
// to properly return func() return value with fcr_ok
return (fco->fcr.status == fcr_ok ? fcr : fco->fcr);
}
/* Auxiliary attributes
......@@ -6653,11 +6674,7 @@ static void test_fellow_cache_obj_iter_final(
fc_inj_reset();
injcount = -1;
AN(injcount);
/*
* XXX TEMP BROKEN until
* commit "fellow_cache: Ensure we see seglist loading errors for _iter()"
*/
while (0 && injcount) {
while (injcount) {
DBG("injcount %d", injcount);
fc_inj_set(0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment