Commit 78d0f4ba authored by Nils Goroll's avatar Nils Goroll

add back front pushes

parent afb94ad5
...@@ -64,8 +64,15 @@ ...@@ -64,8 +64,15 @@
* whether to use blocking threads for pass (private/hfm/fpm) objects * whether to use blocking threads for pass (private/hfm/fpm) objects
* *
* if false, buffering will be used * if false, buffering will be used
* XXX make configurable per req/task
*/ */
static int use_thread_blocking = 0; static int use_thread_blocking = 0;
/*
* whether to push bytes early
*
* XXX make configurable per req/task
*/
static int front_push = 0;
#ifdef DEBUG #ifdef DEBUG
#define VSLdbgv(req, fmt, ...) \ #define VSLdbgv(req, fmt, ...) \
...@@ -1341,8 +1348,6 @@ bytes_unpend_worklist(struct req *req, struct bytes_tree *tree, ...@@ -1341,8 +1348,6 @@ bytes_unpend_worklist(struct req *req, struct bytes_tree *tree,
CHECK_OBJ_NOTNULL(tree->front, NODE_MAGIC); CHECK_OBJ_NOTNULL(tree->front, NODE_MAGIC);
CHECK_OBJ_NOTNULL(tree->root, NODE_MAGIC); CHECK_OBJ_NOTNULL(tree->root, NODE_MAGIC);
assert(req->esi_level == 0);
if (tree->root->state == ST_DELIVERED) { if (tree->root->state == ST_DELIVERED) {
VSLdbg(req, "bytes_unpend: whole tree is delivered"); VSLdbg(req, "bytes_unpend: whole tree is delivered");
assert_node(tree->root, CHK_DELI); assert_node(tree->root, CHK_DELI);
...@@ -1831,6 +1836,9 @@ bytes_push_worklist(struct req *req, struct bytes_tree *tree, ...@@ -1831,6 +1836,9 @@ bytes_push_worklist(struct req *req, struct bytes_tree *tree,
if (parent->parent == NULL) if (parent->parent == NULL)
continue; continue;
if (parent->state < ST_CLOSED)
continue;
/* /*
* fini the now unneeded parent ASAP * fini the now unneeded parent ASAP
* *
...@@ -2023,34 +2031,31 @@ pesi_buf_bytes(struct req *req, enum vdp_action act, void **priv, ...@@ -2023,34 +2031,31 @@ pesi_buf_bytes(struct req *req, enum vdp_action act, void **priv,
memcpy(node->data.st->ptr, ptr, len); memcpy(node->data.st->ptr, ptr, len);
} }
} }
#if 0
/* XXX RETHINK: solution might be to special case the layer above node_insert(tree, parent, node);
*
* now that we push the each esi level's nexus, we cannot start delivery /*
* before the pesi vdps have been taken out of the chain, otherwise we * check unlocked if an attempt to unpend makes any sense at all
* will push to pesi(buf) again during unpending
*
*
* OLD TEXT:
*
* by default, we got a private parent node to which we can write
* lockless, but which cannot be delivered (unpended) in parallel,
* delivery will wait until the end of the request (set_close() on the
* parent node)
*
* if we are buffering (! refok) or streaming, we stop using a private
* node under the assumption that delivering as quickly as possible is
* worth the additional overhead.
*/ */
AN(tree->front);
if (front_push &&
tree->unpend_owner != NULL &&
(tree->front->parent == NULL ||
tree->front == parent ||
tree->front->parent == parent ||
tree->front_owner == req->wrk)) {
VSLdbg(req, "front push");
// XXX for this node, we can spare buffering if got pushed
// XXX double locking with node_insert
if (parent->state == ST_PRIVATE && (refok == 0 || req->objcore->boc)) {
Lck_Lock(&tree->tree_lock); Lck_Lock(&tree->tree_lock);
set_open(tree, parent, req->wrk); if (parent->state == ST_PRIVATE)
set_open(tree, parent, req->wrk);
assert(parent->state = ST_OPEN);
bytes_unpend(req, tree);
Lck_Unlock(&tree->tree_lock); Lck_Unlock(&tree->tree_lock);
} }
#endif
node_insert(tree, parent, node);
VSLdbgv(req, "bytes_add to %s parent: exit", VSLdbgv(req, "bytes_add to %s parent: exit",
parent->state == ST_PRIVATE ? "private" : "open"); parent->state == ST_PRIVATE ? "private" : "open");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment