Refactor logbuffer_addblks() to flush when running out of disk blocks

When adding log blocks, trigger flush also based on available disk blocks,
that is, do not add blocks to the logbuffer which we can not also flush.

Also flush with reference:

I think the capability was originally limited in order to do
full flushes with reference only from the logwatcher thread, in
order to not hold the logmtx for too long.

But now that we have the extra flush finish thread, I do not think
this is necessary any more, and we need to handle tight storage
better.
parent d9101d09
......@@ -3006,11 +3006,29 @@ logbuffer_flush_finish_work(struct worker *wrk, void *priv)
TAKE(to, from); \
} while(0)
static unsigned
logbuffer_availblks(const struct fellow_logbuffer *lbuf)
{
unsigned avail = 0;
if (lbuf->logreg != NULL)
avail += lbuf->logreg->free_n;
if (lbuf->dskreqs != NULL) {
avail += buddy_reqs_next_ready(
&(*lbuf->dskreqs)[0].reqs);
avail += buddy_reqs_next_ready(
&(*lbuf->dskreqs)[1].reqs);
}
return (avail);
}
static void
logbuffer_addblks(struct fellow_fd *ffd,
struct fellow_logbuffer *lbuf,
struct fellow_alloc_log_block *blks, unsigned n)
{
unsigned canflush;
unsigned avail;
unsigned can;
CHECK_OBJ_NOTNULL(ffd, FELLOW_FD_MAGIC);
CHECK_LBUF_USABLE(lbuf);
......@@ -3034,12 +3052,35 @@ logbuffer_addblks(struct fellow_fd *ffd,
AN(lbuf->active.block);
AN(lbuf->head.block);
// if logbuffer is full, try to flush
if (lbuf->space > 0 &&
lbuf->n == lbuf->space &&
(logbuffer_can(lbuf, LBUF_CAN_FLUSH) ||
logbuffer_can(lbuf, LBUF_CAN_LOGREG))) {
logbuffer_flush(ffd, lbuf, 0, 0);
canflush =
logbuffer_can(lbuf, LBUF_CAN_FLUSH) ||
logbuffer_can(lbuf, LBUF_CAN_LOGREG);
avail = 0;
can = 0;
if (canflush)
avail = logbuffer_availblks(lbuf);
/* if we need memory, just flush blocks
*/
if (canflush &&
lbuf->space > 0 && lbuf->n == lbuf->space)
can |= LBUF_CAN_FLUSH;
/* if we run short of disk blocks, we want to
* ref in order to apply frees
*
* unless avail increases, this flushes for every half
*/
if (canflush &&
lbuf->n > 0 && lbuf->n * 2 >= avail)
can |= LBUF_CAN_REF;
/*
* flush if logbuffer is full or not enough blocks
*/
if (can) {
logbuffer_flush(ffd, lbuf, 0, can);
if (lbuf->n != lbuf->space)
continue;
}
......@@ -3057,7 +3098,10 @@ logbuffer_addblks(struct fellow_fd *ffd,
TAKE_BLK(lbuf->arr[lbuf->n], lbuf->active);
lbuf->n++;
while (n > 1 && lbuf->n < lbuf->space) {
if (canflush == 0)
avail = UINT_MAX;
while (n > 1 && lbuf->n < lbuf->space && lbuf->n < avail) {
TAKE_BLK(lbuf->arr[lbuf->n], *blks);
lbuf->n++;
blks++;
......@@ -3088,15 +3132,8 @@ logbuffer_getblk(struct fellow_fd *ffd,
lbuf->active.block->nentries < FELLOW_DISK_LOG_BLOCK_ENTRIES)
return (lbuf->active.block);
blk = fellow_logblk_new(lbuf->membuddy, 0,
blk = fellow_logblk_new(lbuf->membuddy, 1,
ffd->tune->hash_log, lbuf->id);
if (blk == NULL) {
if (logbuffer_can(lbuf, LBUF_CAN_FLUSH) ||
logbuffer_can(lbuf, LBUF_CAN_LOGREG))
logbuffer_flush(ffd, lbuf, 0, 0);
blk = fellow_logblk_new(lbuf->membuddy, 1,
ffd->tune->hash_log, lbuf->id);
}
XXXAN(blk);
ablk->block = blk;
......@@ -4106,10 +4143,6 @@ fellow_log_entries_add(struct fellow_fd *ffd,
AN(n);
AN(entry);
/* NOTE: logbuffer_getblk() beflow could call logbuffer_flush(), which
* writes mutated log entries. BUT the flush is caled with can == 0 such
* that regions_to_free is not handled.
*/
if (prep->tofree.n) {
if (lbuf->regions_to_free == NULL)
lbuf->regions_to_free = regionlist_alloc(ffd->membuddy);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment