Fully gc T_FINAL

It was effectively removed in 913c4653 but
dead code was left until now.
parent 5b578827
......@@ -53,7 +53,6 @@
/* ============================================================
* node finalizers
*/
static void fini_final(struct vdp_ctx *, struct node *);
static void fini_subreq(const struct vdp_ctx *, struct node *);
static void fini_data(struct vdp_ctx *, struct node *);
......@@ -230,9 +229,6 @@ node_fini(struct vdp_ctx *vdx, struct node *node)
{
switch (node->type) {
case T_FINAL:
fini_final(vdx, node);
break;
case T_SUBREQ:
fini_subreq(vdx, node);
break;
......@@ -320,9 +316,8 @@ node_insert(struct bytes_tree *tree, struct node *parent,
assert(node->state == ST_DATA);
break;
case T_SUBREQ:
case T_FINAL:
default:
/* cannot insert SUBREQ and FINAL yet */
/* cannot insert T_SUBREQ yet */
INCOMPL();
}
......@@ -373,7 +368,6 @@ set_unpending(const struct bytes_tree *tree, struct node *node)
assert(node->state == ST_DATA);
assert(node->type == T_DATA ||
node->type == T_SUBREQ ||
node->type == T_FINAL ||
node->type == T_CRC);
node->state = ST_UNPENDING;
......@@ -581,42 +575,6 @@ subreq_wait_done(struct node *node)
Lck_Unlock(node->subreq.shared_lock);
}
/* ------------------------------------------------------------
* node cleanup (not holding tree lock)
*
* must be idempotent
*/
static void
fini_final(struct vdp_ctx *vdx, struct node *node)
{
(void) vdx;
assert(node->type == T_FINAL);
if (node->final.fi_state == FI_DESTROYED)
return;
Lck_Lock(node->final.shared_lock);
if (node->final.fi_state < FI_GO) {
node->final.fi_state = FI_GO;
AZ(pthread_cond_signal(&node->final.fi_cond));
}
if (node->final.fi_state < FI_DONE)
AZ(Lck_CondWait(&node->final.fi_cond,
node->final.shared_lock));
Lck_Unlock(node->final.shared_lock);
assert(node->final.fi_state == FI_DONE);
AZ(pthread_cond_destroy(&node->final.fi_cond));
node->final.shared_lock = NULL;
node->final.fi_state = FI_DESTROYED;
}
static void
fini_subreq(const struct vdp_ctx *vdx, struct node *node)
{
......@@ -692,32 +650,6 @@ node_bytes(struct vdp_ctx *vdx, struct bytes_tree *tree,
return (VDP_bytes(vdx, act, ptr, sz));
}
static int
push_final(struct vdp_ctx *vdx, struct bytes_tree *tree,
struct node *node, const struct node *next)
{
(void) vdx;
(void) tree;
(void) next;
assert(node->type == T_FINAL);
assert(node->final.fi_state == FI_READY);
Lck_Lock(node->final.shared_lock);
node->final.fi_state = FI_GO;
AZ(pthread_cond_signal(&node->final.fi_cond));
if (node->final.fi_state < FI_DONE)
AZ(Lck_CondWait(&node->final.fi_cond,
node->final.shared_lock));
Lck_Unlock(node->final.shared_lock);
assert(node->final.fi_state == FI_DONE);
return (tree->retval);
}
static int
push_subreq(struct req *req, const struct bytes_tree *tree,
struct node *node, const struct node *next)
......@@ -887,9 +819,6 @@ worklist_push(struct vdp_ctx *vdx, struct bytes_tree *tree,
assert(node->state == ST_UNPENDING);
switch (node->type) {
case T_FINAL:
retval = push_final(vdx, tree, node, next);
break;
case T_SUBREQ:
AN(tree->root);
retval = push_subreq(tree->root->nexus.req, tree, node, next);
......
......@@ -52,11 +52,10 @@ struct bytes_tree {
enum n_type {
T_INVALID = 0,
T_NEXUS, // can change into T_SUBREQ / T_FINAL / T_DATA
T_NEXUS, // can change into T_SUBREQ / T_DATA
T_DATA,
T_CRC,
T_SUBREQ,
T_FINAL // non-ESI pass / hfm / hfp
T_SUBREQ
} __attribute__ ((__packed__));
/*
......@@ -64,7 +63,7 @@ enum n_type {
*
* ST_DATA: may never have any children
*
* T_DATA / T_CRC / T_SUBREQ / T_FINAL
* T_DATA / T_CRC / T_SUBREQ
*
* ST_PRIVATE: may receive pushes creating children
* unpending must not yet touch it
......@@ -87,7 +86,7 @@ enum n_type {
*
* ST_UNPENDING: in the process of being pushed to the client
*
* T_DATA / T_CRC / T_SUBREQ / T_FINAL
* T_DATA / T_CRC / T_SUBREQ
*
* ST_DELIVERED: We have pushed data up
*
......@@ -162,22 +161,6 @@ struct node_subreq {
int done;
};
// sub-state for node_final while in ST_DATA
enum fi_state {
FI_READY = 0,
FI_GO, // topreq signalling req to deliver
FI_DONE, // req signalling topreq it is done
FI_DESTROYED // cond/mtx destroyed (fini_final())
} __attribute__ ((__packed__));
/* we block the sub-thread when it's ready for delivery and continue when the
* topreqp tells it to */
struct node_final {
struct lock *shared_lock; // == &tree->nodes_lock
pthread_cond_t fi_cond;
enum fi_state fi_state;
};
enum n_alloc {
NA_INVALID = 0,
NA_WS,
......@@ -199,7 +182,6 @@ struct node { // 120b
struct node_nexus nexus; // T_NEXUS 72b
struct node_data data; // T_DATA 32b
struct node_subreq subreq; // T_SUBREQ 88b
struct node_final final; // T_FINAL 64b
struct node_crc crc; // T_CRC 16b
};
};
......@@ -245,8 +227,7 @@ node_mutate_lock(struct bytes_tree *tree, struct node *node,
{
/* these checks can be relexed when needed */
assert(type == T_DATA ||
type == T_SUBREQ ||
type == T_FINAL);
type == T_SUBREQ);
assert(state == ST_DATA);
Lck_Lock(&tree->tree_lock);
......
......@@ -17,7 +17,7 @@ digraph bytes_node_state {
}
subgraph cluster_leaf {
label="leaf nodes:\nT_DATA\nT_CRC\nT_SUBREQ\nT_FINAL"
label="leaf nodes:\nT_DATA\nT_CRC\nT_SUBREQ"
ST_DATA -> ST_UNPENDING [label="set_unpending()"]
}
......
......@@ -35,7 +35,7 @@ digraph tree {
/* legend */
T_NEXUS [shape=diamond]
T_DATA [shape=triangle, label="T_DATA\nT_CRC\n(bytes to oc)"]
T_SUBREQ [shape=box, label="T_SUBREQ\nT_FINAL\n(oc / thread)"]
T_SUBREQ [shape=box, label="T_SUBREQ\n(oc / thread)"]
T_NEXUS -> T_DATA
T_NEXUS -> T_SUBREQ
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment