mirror of
https://github.com/FRRouting/frr.git
synced 2025-04-30 13:37:17 +02:00
lib: nb: add list_entry_done() callback to free resources
The existing iteration callback only allows for a daemon to return a pointer to objects that must already exist and must continue to exists indefinitely. To allow the daemon to return allocated iterator objects and for locking it's container structures we need a callback to tell the daemon when FRR is done using the returned value, so the daemon can free it (or unlock etc) That's what list_entry_done() is for. Signed-off-by: Christian Hopps <chopps@labn.net>
This commit is contained in:
parent
f28394313f
commit
f9c759ee4e
|
@ -235,8 +235,9 @@ static int nb_node_validate_cb(const struct nb_node *nb_node,
|
|||
* depends on context (e.g. some daemons might augment "frr-interface"
|
||||
* while others don't).
|
||||
*/
|
||||
if (!valid && callback_implemented && operation != NB_CB_GET_NEXT
|
||||
&& operation != NB_CB_GET_KEYS && operation != NB_CB_LOOKUP_ENTRY)
|
||||
if (!valid && callback_implemented && operation != NB_CB_GET_NEXT &&
|
||||
operation != NB_CB_GET_KEYS && operation != NB_CB_LIST_ENTRY_DONE &&
|
||||
operation != NB_CB_LOOKUP_ENTRY)
|
||||
flog_warn(EC_LIB_NB_CB_UNNEEDED,
|
||||
"unneeded '%s' callback for '%s'",
|
||||
nb_cb_operation_name(operation), nb_node->xpath);
|
||||
|
@ -283,6 +284,8 @@ static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
|
|||
state_optional);
|
||||
error += nb_node_validate_cb(nb_node, NB_CB_GET_KEYS, !!nb_node->cbs.get_keys,
|
||||
state_optional);
|
||||
error += nb_node_validate_cb(nb_node, NB_CB_LIST_ENTRY_DONE, !!nb_node->cbs.list_entry_done,
|
||||
true);
|
||||
error += nb_node_validate_cb(nb_node, NB_CB_LOOKUP_ENTRY, !!nb_node->cbs.lookup_entry,
|
||||
state_optional);
|
||||
error += nb_node_validate_cb(nb_node, NB_CB_RPC, !!nb_node->cbs.rpc,
|
||||
|
@ -1806,6 +1809,19 @@ int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
|
|||
return nb_node->cbs.get_keys(&args);
|
||||
}
|
||||
|
||||
void nb_callback_list_entry_done(const struct nb_node *nb_node, const void *parent_list_entry,
|
||||
const void *list_entry)
|
||||
{
|
||||
if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CFG_CBS) || !nb_node->cbs.list_entry_done)
|
||||
return;
|
||||
|
||||
DEBUGD(&nb_dbg_cbs_state,
|
||||
"northbound callback (list_entry_done): node [%s] parent_list_entry [%p] list_entry [%p]",
|
||||
nb_node->xpath, parent_list_entry, list_entry);
|
||||
|
||||
nb_node->cbs.list_entry_done(parent_list_entry, list_entry);
|
||||
}
|
||||
|
||||
const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
|
||||
const void *parent_list_entry,
|
||||
const struct yang_list_keys *keys)
|
||||
|
@ -1943,6 +1959,7 @@ static int nb_callback_configuration(struct nb_context *context,
|
|||
case NB_CB_GET_ELEM:
|
||||
case NB_CB_GET_NEXT:
|
||||
case NB_CB_GET_KEYS:
|
||||
case NB_CB_LIST_ENTRY_DONE:
|
||||
case NB_CB_LOOKUP_ENTRY:
|
||||
case NB_CB_RPC:
|
||||
case NB_CB_NOTIFY:
|
||||
|
@ -2322,6 +2339,7 @@ bool nb_cb_operation_is_valid(enum nb_cb_operation operation,
|
|||
}
|
||||
return true;
|
||||
case NB_CB_GET_KEYS:
|
||||
case NB_CB_LIST_ENTRY_DONE:
|
||||
case NB_CB_LOOKUP_ENTRY:
|
||||
switch (snode->nodetype) {
|
||||
case LYS_LIST:
|
||||
|
@ -2625,6 +2643,8 @@ const char *nb_cb_operation_name(enum nb_cb_operation operation)
|
|||
return "get_next";
|
||||
case NB_CB_GET_KEYS:
|
||||
return "get_keys";
|
||||
case NB_CB_LIST_ENTRY_DONE:
|
||||
return "list_entry_done";
|
||||
case NB_CB_LOOKUP_ENTRY:
|
||||
return "lookup_entry";
|
||||
case NB_CB_RPC:
|
||||
|
|
|
@ -98,6 +98,7 @@ enum nb_cb_operation {
|
|||
NB_CB_GET_ELEM,
|
||||
NB_CB_GET_NEXT,
|
||||
NB_CB_GET_KEYS,
|
||||
NB_CB_LIST_ENTRY_DONE,
|
||||
NB_CB_LOOKUP_ENTRY,
|
||||
NB_CB_RPC,
|
||||
NB_CB_NOTIFY,
|
||||
|
@ -515,6 +516,24 @@ struct nb_callbacks {
|
|||
*/
|
||||
int (*get_keys)(struct nb_cb_get_keys_args *args);
|
||||
|
||||
/*
|
||||
* Operational data callback for YANG lists.
|
||||
*
|
||||
* This callback function is called to cleanup any resources that may be
|
||||
* held by a backend opaque `list_entry` value (e.g., a lock). It is
|
||||
* called when the northbound code is done using a `list_entry` value it
|
||||
* obtained using the lookup_entry() callback. It is also called on the
|
||||
* `list_entry` returned from the get_next() or lookup_next() callbacks
|
||||
* if the iteration aborts before walking to the end of the list. The
|
||||
* intention is to allow any resources (e.g., a lock) to now be
|
||||
* released.
|
||||
*
|
||||
* args
|
||||
* parent_list_entry - pointer to the parent list entry
|
||||
* list_entry - value returned previously from `lookup_entry()`
|
||||
*/
|
||||
void (*list_entry_done)(const void *parent_list_entry, const void *list_entry);
|
||||
|
||||
/*
|
||||
* Operational data callback for YANG lists.
|
||||
*
|
||||
|
@ -883,6 +902,8 @@ extern int nb_callback_get_keys(const struct nb_node *nb_node,
|
|||
extern const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
|
||||
const void *parent_list_entry,
|
||||
const struct yang_list_keys *keys);
|
||||
extern void nb_callback_list_entry_done(const struct nb_node *nb_node,
|
||||
const void *parent_list_entry, const void *list_entry);
|
||||
extern const void *nb_callback_lookup_node_entry(struct lyd_node *node,
|
||||
const void *parent_list_entry);
|
||||
extern const void *nb_callback_lookup_next(const struct nb_node *nb_node,
|
||||
|
|
|
@ -139,6 +139,9 @@ static const void *nb_op_list_get_next(struct nb_op_yield_state *ys, struct nb_n
|
|||
static const void *nb_op_list_lookup_entry(struct nb_op_yield_state *ys, struct nb_node *nb_node,
|
||||
const struct nb_op_node_info *pni, struct lyd_node *node,
|
||||
const struct yang_list_keys *keys);
|
||||
static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
|
||||
const struct nb_op_node_info *pni, const void *list_entry);
|
||||
static void ys_pop_inner(struct nb_op_yield_state *ys);
|
||||
|
||||
/* -------------------- */
|
||||
/* Function Definitions */
|
||||
|
@ -189,6 +192,9 @@ static inline void nb_op_free_yield_state(struct nb_op_yield_state *ys,
|
|||
darr_free(ys->non_specific_predicate);
|
||||
darr_free(ys->query_tokstr);
|
||||
darr_free(ys->schema_path);
|
||||
/* need to cleanup resources, so pop these individually */
|
||||
while (darr_len(ys->node_infos))
|
||||
ys_pop_inner(ys);
|
||||
darr_free(ys->node_infos);
|
||||
darr_free(ys->xpath_orig);
|
||||
darr_free(ys->xpath);
|
||||
|
@ -223,10 +229,20 @@ static void ys_trim_xpath(struct nb_op_yield_state *ys)
|
|||
|
||||
static void ys_pop_inner(struct nb_op_yield_state *ys)
|
||||
{
|
||||
uint len = darr_len(ys->node_infos);
|
||||
struct nb_op_node_info *ni, *pni;
|
||||
struct nb_node *nb_node;
|
||||
int i = darr_lasti(ys->node_infos);
|
||||
|
||||
assert(len);
|
||||
darr_setlen(ys->node_infos, len - 1);
|
||||
pni = i > 0 ? &ys->node_infos[i - 1] : NULL;
|
||||
ni = &ys->node_infos[i];
|
||||
|
||||
/* list_entry's propagate so only free the first occurance */
|
||||
if (ni->list_entry && (!pni || pni->list_entry != ni->list_entry)) {
|
||||
nb_node = ni->schema ? ni->schema->priv : NULL;
|
||||
if (nb_node)
|
||||
nb_op_list_list_entry_done(ys, nb_node, pni, ni->list_entry);
|
||||
}
|
||||
darr_setlen(ys->node_infos, i);
|
||||
ys_trim_xpath(ys);
|
||||
}
|
||||
|
||||
|
@ -873,6 +889,14 @@ static enum nb_error nb_op_list_get_keys(struct nb_op_yield_state *ys, struct nb
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nb_op_list_list_entry_done(struct nb_op_yield_state *ys, struct nb_node *nb_node,
|
||||
const struct nb_op_node_info *pni, const void *list_entry)
|
||||
{
|
||||
if (CHECK_FLAG(nb_node->flags, F_NB_NODE_HAS_GET_TREE))
|
||||
return;
|
||||
|
||||
nb_callback_list_entry_done(nb_node, pni ? pni->list_entry : NULL, list_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* nb_op_add_leaf() - Add leaf data to the get tree results
|
||||
|
@ -1154,8 +1178,8 @@ static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys,
|
|||
*
|
||||
* If the schema path (original query) is longer than our current node
|
||||
* info stack (current xpath location), we are building back up to the
|
||||
* base of the user query, return the next schema node from the query
|
||||
* string (schema_path).
|
||||
* base of the walk at the end of the user query path, return the next
|
||||
* schema node from the query string (schema_path).
|
||||
*/
|
||||
if (last != NULL)
|
||||
assert(last->schema == parent);
|
||||
|
@ -1526,6 +1550,18 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
*/
|
||||
assert(!list_start);
|
||||
is_specific_node = true;
|
||||
|
||||
/*
|
||||
* Release the entry back to the daemon
|
||||
*/
|
||||
assert(ni->list_entry == list_entry);
|
||||
nb_op_list_list_entry_done(ys, nn, pni, list_entry);
|
||||
ni->list_entry = NULL;
|
||||
|
||||
/*
|
||||
* Continue on as we may reap the resulting node
|
||||
* if empty.
|
||||
*/
|
||||
list_entry = NULL;
|
||||
}
|
||||
|
||||
|
@ -1605,6 +1641,18 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
list_entry = nb_op_list_get_next(ys, nn, pni, list_entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* The walk API is that get/lookup_next returns NULL
|
||||
* when done, those callbacks are also is responsible
|
||||
* for releasing any state associated with previous
|
||||
* list_entry's (e.g., any locks) during the iteration.
|
||||
* Therefore we need to zero out the last top level
|
||||
* list_entry so we don't mistakenly call the
|
||||
* list_entry_done() callback on it.
|
||||
*/
|
||||
if (!is_specific_node && !list_start && !list_entry)
|
||||
ni->list_entry = NULL;
|
||||
|
||||
/*
|
||||
* (FN:A) Reap empty list element? Check to see if we
|
||||
* should reap an empty list element. We do this if the
|
||||
|
@ -1620,17 +1668,14 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
* have no non-key children, check for this condition
|
||||
* and do not reap if true.
|
||||
*/
|
||||
if (!list_start && ni->inner &&
|
||||
!lyd_child_no_keys(ni->inner) &&
|
||||
if (!list_start && ni->inner && !lyd_child_no_keys(ni->inner) &&
|
||||
/* not the top element with a key match */
|
||||
!((darr_ilen(ys->node_infos) ==
|
||||
darr_ilen(ys->schema_path) - 1) &&
|
||||
!((darr_ilen(ys->node_infos) == darr_ilen(ys->schema_path) - 1) &&
|
||||
lysc_is_key((*darr_last(ys->schema_path)))) &&
|
||||
/* is this at or below the base? */
|
||||
darr_ilen(ys->node_infos) <= ys->query_base_level)
|
||||
/* is this list entry below the query base? */
|
||||
darr_ilen(ys->node_infos) - 1 < ys->query_base_level)
|
||||
ys_free_inner(ys, ni);
|
||||
|
||||
|
||||
if (!list_entry) {
|
||||
/*
|
||||
* List Iteration Done
|
||||
|
@ -1725,12 +1770,15 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
ni->xpath_len = len;
|
||||
}
|
||||
|
||||
/* Save the new list_entry early so it can be cleaned up on error */
|
||||
ni->list_entry = list_entry;
|
||||
ni->schema = sib;
|
||||
|
||||
/* Need to get keys. */
|
||||
|
||||
if (!CHECK_FLAG(nn->flags, F_NB_NODE_KEYLESS_LIST)) {
|
||||
ret = nb_op_list_get_keys(ys, nn, list_entry, &ni->keys);
|
||||
if (ret) {
|
||||
darr_pop(ys->node_infos);
|
||||
ret = NB_ERR_RESOURCE;
|
||||
goto done;
|
||||
}
|
||||
|
@ -1765,7 +1813,6 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
.inner,
|
||||
sib, &ni->keys, &node);
|
||||
if (err) {
|
||||
darr_pop(ys->node_infos);
|
||||
ret = NB_ERR_RESOURCE;
|
||||
goto done;
|
||||
}
|
||||
|
@ -1775,8 +1822,7 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume)
|
|||
* Save the new list entry with the list node info
|
||||
*/
|
||||
ni->inner = node;
|
||||
ni->schema = node->schema;
|
||||
ni->list_entry = list_entry;
|
||||
assert(ni->schema == node->schema);
|
||||
ni->niters += 1;
|
||||
ni->nents += 1;
|
||||
|
||||
|
|
Loading…
Reference in a new issue