2021-10-28 09:07:11 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/*
|
|
|
|
* MGMTD Transactions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2021 Vmware, Inc.
|
|
|
|
* Pushpasis Sarkar <spushpasis@vmware.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
2023-07-07 05:23:24 +02:00
|
|
|
#include "darr.h"
|
2021-10-28 09:07:11 +02:00
|
|
|
#include "hash.h"
|
|
|
|
#include "jhash.h"
|
|
|
|
#include "libfrr.h"
|
2023-07-07 05:23:24 +02:00
|
|
|
#include "mgmt_msg.h"
|
|
|
|
#include "mgmt_msg_native.h"
|
2021-10-28 09:07:11 +02:00
|
|
|
#include "mgmtd/mgmt.h"
|
|
|
|
#include "mgmtd/mgmt_memory.h"
|
|
|
|
#include "mgmtd/mgmt_txn.h"
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
#define __dbg(fmt, ...) \
|
2023-06-12 06:25:07 +02:00
|
|
|
DEBUGD(&mgmt_debug_txn, "TXN: %s: " fmt, __func__, ##__VA_ARGS__)
|
2024-01-31 01:50:52 +01:00
|
|
|
#define __log_err(fmt, ...) zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
|
2025-02-26 18:34:05 +01:00
|
|
|
#define MGMTD_TXN_UNLOCK(txn, in_hash_free) mgmt_txn_unlock(txn, in_hash_free, __FILE__, __LINE__)
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
enum mgmt_txn_event {
|
|
|
|
MGMTD_TXN_PROC_SETCFG = 1,
|
|
|
|
MGMTD_TXN_PROC_COMMITCFG,
|
|
|
|
MGMTD_TXN_PROC_GETCFG,
|
2023-07-07 05:23:24 +02:00
|
|
|
MGMTD_TXN_PROC_GETTREE,
|
2024-03-19 20:11:59 +01:00
|
|
|
MGMTD_TXN_PROC_RPC,
|
2021-10-28 09:07:11 +02:00
|
|
|
MGMTD_TXN_COMMITCFG_TIMEOUT,
|
|
|
|
};
|
|
|
|
|
|
|
|
PREDECL_LIST(mgmt_txn_reqs);
|
|
|
|
|
|
|
|
struct mgmt_set_cfg_req {
|
|
|
|
Mgmtd__DatastoreId ds_id;
|
|
|
|
struct mgmt_ds_ctx *ds_ctx;
|
|
|
|
struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
|
|
|
|
uint16_t num_cfg_changes;
|
|
|
|
bool implicit_commit;
|
|
|
|
Mgmtd__DatastoreId dst_ds_id;
|
|
|
|
struct mgmt_ds_ctx *dst_ds_ctx;
|
|
|
|
struct mgmt_setcfg_stats *setcfg_stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum mgmt_commit_phase {
|
|
|
|
MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
|
|
|
|
MGMTD_COMMIT_PHASE_TXN_CREATE,
|
|
|
|
MGMTD_COMMIT_PHASE_APPLY_CFG,
|
|
|
|
MGMTD_COMMIT_PHASE_TXN_DELETE,
|
|
|
|
MGMTD_COMMIT_PHASE_MAX
|
|
|
|
};
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static inline const char *mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
switch (cmt_phase) {
|
|
|
|
case MGMTD_COMMIT_PHASE_PREPARE_CFG:
|
|
|
|
return "PREP-CFG";
|
|
|
|
case MGMTD_COMMIT_PHASE_TXN_CREATE:
|
|
|
|
return "CREATE-TXN";
|
|
|
|
case MGMTD_COMMIT_PHASE_APPLY_CFG:
|
|
|
|
return "APPLY-CFG";
|
|
|
|
case MGMTD_COMMIT_PHASE_TXN_DELETE:
|
|
|
|
return "DELETE-TXN";
|
|
|
|
case MGMTD_COMMIT_PHASE_MAX:
|
|
|
|
return "Invalid/Unknown";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "Invalid/Unknown";
|
|
|
|
}
|
|
|
|
|
|
|
|
PREDECL_LIST(mgmt_txn_batches);
|
|
|
|
|
|
|
|
struct mgmt_txn_be_cfg_batch {
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
enum mgmt_be_client_id be_id;
|
|
|
|
struct mgmt_be_client_adapter *be_adapter;
|
|
|
|
Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
|
2023-06-26 18:59:59 +02:00
|
|
|
Mgmtd__YangCfgDataReq *cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
|
2021-10-28 09:07:11 +02:00
|
|
|
Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
|
|
|
|
Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
|
|
|
|
size_t num_cfg_data;
|
|
|
|
int buf_space_left;
|
|
|
|
struct mgmt_txn_batches_item list_linkage;
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
|
|
|
|
|
|
|
|
#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
|
|
|
|
frr_each_safe (mgmt_txn_batches, list, batch)
|
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
struct mgmt_edit_req {
|
|
|
|
char xpath_created[XPATH_MAXLEN];
|
2024-09-17 08:27:31 +02:00
|
|
|
bool created;
|
2024-03-03 20:40:16 +01:00
|
|
|
bool unlock;
|
|
|
|
};
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_commit_cfg_req {
|
|
|
|
Mgmtd__DatastoreId src_ds_id;
|
|
|
|
struct mgmt_ds_ctx *src_ds_ctx;
|
|
|
|
Mgmtd__DatastoreId dst_ds_id;
|
|
|
|
struct mgmt_ds_ctx *dst_ds_ctx;
|
|
|
|
uint32_t nb_txn_id;
|
|
|
|
uint8_t validate_only : 1;
|
|
|
|
uint8_t abort : 1;
|
|
|
|
uint8_t implicit : 1;
|
|
|
|
uint8_t rollback : 1;
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
uint8_t init : 1;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/* Track commit phases */
|
2024-01-11 22:41:29 +01:00
|
|
|
enum mgmt_commit_phase phase;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-10-20 23:51:01 +02:00
|
|
|
enum mgmt_commit_phase be_phase[MGMTD_BE_CLIENT_ID_MAX];
|
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
/*
|
|
|
|
* Additional information when the commit is triggered by native edit
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
struct mgmt_edit_req *edit;
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* Set of config changes to commit. This is used only
|
|
|
|
* when changes are NOT to be determined by comparing
|
|
|
|
* candidate and running DSs. This is typically used
|
|
|
|
* for downloading all relevant configs for a new backend
|
|
|
|
* client that has recently come up and connected with
|
|
|
|
* MGMTD.
|
|
|
|
*/
|
|
|
|
struct nb_config_cbs *cfg_chgs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Details on all the Backend Clients associated with
|
|
|
|
* this commit.
|
|
|
|
*/
|
2023-07-09 05:11:15 +02:00
|
|
|
uint64_t clients;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List of backend batches for this commit to be validated
|
|
|
|
* and applied at the backend.
|
|
|
|
*/
|
2023-10-20 23:51:01 +02:00
|
|
|
struct mgmt_txn_batches_head batches[MGMTD_BE_CLIENT_ID_MAX];
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
2023-10-20 23:51:01 +02:00
|
|
|
* The last batch added for any backend client.
|
2021-10-28 09:07:11 +02:00
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_be_cfg_batch *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
struct mgmt_commit_stats *cmt_stats;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mgmt_get_data_reply {
|
|
|
|
/* Buffer space for preparing data reply */
|
|
|
|
int num_reply;
|
|
|
|
int last_batch;
|
|
|
|
Mgmtd__YangDataReply data_reply;
|
|
|
|
Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
|
2023-06-26 18:59:59 +02:00
|
|
|
Mgmtd__YangData *reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
|
2021-10-28 09:07:11 +02:00
|
|
|
Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
|
|
|
|
char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mgmt_get_data_req {
|
|
|
|
Mgmtd__DatastoreId ds_id;
|
2023-06-14 15:32:16 +02:00
|
|
|
struct nb_config *cfg_root;
|
2021-10-28 09:07:11 +02:00
|
|
|
char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
|
|
|
|
int num_xpaths;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Buffer space for preparing reply.
|
|
|
|
* NOTE: Should only be malloc-ed on demand to reduce
|
|
|
|
* memory footprint. Freed up via mgmt_trx_req_free()
|
|
|
|
*/
|
|
|
|
struct mgmt_get_data_reply *reply;
|
|
|
|
|
|
|
|
int total_reply;
|
|
|
|
};
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
struct txn_req_get_tree {
|
|
|
|
char *xpath; /* xpath of tree to get */
|
|
|
|
uint64_t sent_clients; /* Bitmask of clients sent req to */
|
|
|
|
uint64_t recv_clients; /* Bitmask of clients recv reply from */
|
|
|
|
int32_t partial_error; /* an error while gather results */
|
2024-01-06 10:45:29 +01:00
|
|
|
uint8_t result_type; /* LYD_FORMAT for results */
|
2024-01-29 21:50:26 +01:00
|
|
|
uint8_t wd_options; /* LYD_PRINT_WD_* flags for results */
|
2024-01-13 23:53:21 +01:00
|
|
|
uint8_t exact; /* if exact node is requested */
|
2024-01-06 10:45:29 +01:00
|
|
|
uint8_t simple_xpath; /* if xpath is simple */
|
2023-07-07 05:23:24 +02:00
|
|
|
struct lyd_node *client_results; /* result tree from clients */
|
|
|
|
};
|
|
|
|
|
2024-03-19 20:11:59 +01:00
|
|
|
struct txn_req_rpc {
|
|
|
|
char *xpath; /* xpath of rpc/action to invoke */
|
|
|
|
uint64_t sent_clients; /* Bitmask of clients sent req to */
|
|
|
|
uint64_t recv_clients; /* Bitmask of clients recv reply from */
|
|
|
|
uint8_t result_type; /* LYD_FORMAT for results */
|
|
|
|
char *errstr; /* error string */
|
|
|
|
struct lyd_node *client_results; /* result tree from clients */
|
|
|
|
};
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_txn_req {
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
enum mgmt_txn_event req_event;
|
|
|
|
uint64_t req_id;
|
|
|
|
union {
|
|
|
|
struct mgmt_set_cfg_req *set_cfg;
|
|
|
|
struct mgmt_get_data_req *get_data;
|
2023-07-07 05:23:24 +02:00
|
|
|
struct txn_req_get_tree *get_tree;
|
2024-03-19 20:11:59 +01:00
|
|
|
struct txn_req_rpc *rpc;
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_commit_cfg_req commit_cfg;
|
|
|
|
} req;
|
|
|
|
|
|
|
|
struct mgmt_txn_reqs_item list_linkage;
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
|
|
|
|
|
|
|
|
#define FOREACH_TXN_REQ_IN_LIST(list, req) \
|
|
|
|
frr_each_safe (mgmt_txn_reqs, list, req)
|
|
|
|
|
|
|
|
struct mgmt_txn_ctx {
|
|
|
|
uint64_t session_id; /* One transaction per client session */
|
|
|
|
uint64_t txn_id;
|
|
|
|
enum mgmt_txn_type type;
|
|
|
|
|
|
|
|
/* struct mgmt_master *mm; */
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *proc_set_cfg;
|
|
|
|
struct event *proc_comm_cfg;
|
|
|
|
struct event *proc_get_cfg;
|
|
|
|
struct event *proc_get_data;
|
2023-07-07 05:23:24 +02:00
|
|
|
struct event *proc_get_tree;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *comm_cfg_timeout;
|
2023-07-07 05:23:24 +02:00
|
|
|
struct event *get_tree_timeout;
|
2024-03-19 20:11:59 +01:00
|
|
|
struct event *rpc_timeout;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *clnup;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/* List of backend adapters involved in this transaction */
|
2025-01-08 16:34:57 +01:00
|
|
|
/* XXX reap this */
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_txn_badapters_head be_adapters;
|
|
|
|
|
|
|
|
int refcount;
|
|
|
|
|
|
|
|
struct mgmt_txns_item list_linkage;
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
/* TODO: why do we need unique lists for each type of transaction since
|
|
|
|
* a transaction is of only 1 type?
|
|
|
|
*/
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* List of pending set-config requests for a given
|
|
|
|
* transaction/session. Just one list for requests
|
|
|
|
* not processed at all. There's no backend interaction
|
|
|
|
* involved.
|
|
|
|
*/
|
|
|
|
struct mgmt_txn_reqs_head set_cfg_reqs;
|
|
|
|
/*
|
|
|
|
* List of pending get-config requests for a given
|
|
|
|
* transaction/session. Just one list for requests
|
|
|
|
* not processed at all. There's no backend interaction
|
|
|
|
* involved.
|
|
|
|
*/
|
|
|
|
struct mgmt_txn_reqs_head get_cfg_reqs;
|
2023-07-07 05:23:24 +02:00
|
|
|
/*
|
|
|
|
* List of pending get-tree requests.
|
|
|
|
*/
|
|
|
|
struct mgmt_txn_reqs_head get_tree_reqs;
|
2024-03-19 20:11:59 +01:00
|
|
|
/*
|
|
|
|
* List of pending rpc requests.
|
|
|
|
*/
|
|
|
|
struct mgmt_txn_reqs_head rpc_reqs;
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* There will always be one commit-config allowed for a given
|
|
|
|
* transaction/session. No need to maintain lists for it.
|
|
|
|
*/
|
|
|
|
struct mgmt_txn_req *commit_cfg_req;
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
|
|
|
|
|
|
|
|
#define FOREACH_TXN_IN_LIST(mm, txn) \
|
|
|
|
frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
|
|
|
|
|
|
|
|
static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
|
|
|
|
enum mgmt_result result,
|
|
|
|
const char *error_if_any);
|
|
|
|
|
2024-01-11 22:41:29 +01:00
|
|
|
static inline const char *mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
if (!txn->commit_cfg_req)
|
|
|
|
return "None";
|
|
|
|
|
2024-01-11 22:41:29 +01:00
|
|
|
return mgmt_commit_phase2str(txn->commit_cfg_req->req.commit_cfg.phase);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line);
|
2025-02-26 18:34:05 +01:00
|
|
|
static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, bool in_hash_free, const char *file,
|
2023-06-26 18:59:59 +02:00
|
|
|
int line);
|
|
|
|
static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_be_client_adapter *adapter);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static struct event_loop *mgmt_txn_tm;
|
2021-10-28 09:07:11 +02:00
|
|
|
static struct mgmt_master *mgmt_txn_mm;
|
|
|
|
|
|
|
|
static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
|
2023-06-26 18:59:59 +02:00
|
|
|
enum mgmt_txn_event event);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-01-25 12:51:58 +01:00
|
|
|
static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn);
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
static struct mgmt_txn_be_cfg_batch *
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn, enum mgmt_be_client_id id,
|
|
|
|
struct mgmt_be_client_adapter *be_adapter)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_be_cfg_batch *batch;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
batch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
|
|
|
|
sizeof(struct mgmt_txn_be_cfg_batch));
|
|
|
|
assert(batch);
|
|
|
|
batch->be_id = id;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
batch->txn = txn;
|
2021-10-28 09:07:11 +02:00
|
|
|
MGMTD_TXN_LOCK(txn);
|
|
|
|
assert(txn->commit_cfg_req);
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_txn_batches_add_tail(&txn->commit_cfg_req->req.commit_cfg
|
2023-10-20 23:51:01 +02:00
|
|
|
.batches[id],
|
2023-06-26 18:59:59 +02:00
|
|
|
batch);
|
|
|
|
batch->be_adapter = be_adapter;
|
|
|
|
batch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
|
2021-10-28 09:07:11 +02:00
|
|
|
if (be_adapter)
|
|
|
|
mgmt_be_adapter_lock(be_adapter);
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = batch;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
return batch;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static void mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **batch)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
size_t indx;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg(" freeing batch txn-id %" PRIu64, (*batch)->txn->txn_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
assert((*batch)->txn && (*batch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
cmtcfg_req = &(*batch)->txn->commit_cfg_req->req.commit_cfg;
|
2023-10-20 23:51:01 +02:00
|
|
|
mgmt_txn_batches_del(&cmtcfg_req->batches[(*batch)->be_id], *batch);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
if ((*batch)->be_adapter)
|
|
|
|
mgmt_be_adapter_unlock(&(*batch)->be_adapter);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
for (indx = 0; indx < (*batch)->num_cfg_data; indx++) {
|
|
|
|
if ((*batch)->data[indx].xpath) {
|
|
|
|
free((*batch)->data[indx].xpath);
|
|
|
|
(*batch)->data[indx].xpath = NULL;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
MGMTD_TXN_UNLOCK(&(*batch)->txn, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *batch);
|
|
|
|
*batch = NULL;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
|
|
|
|
enum mgmt_be_client_id id)
|
|
|
|
{
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_be_cfg_batch *batch;
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_txn_batches_head *list;
|
|
|
|
|
2023-10-20 23:51:01 +02:00
|
|
|
list = &txn->commit_cfg_req->req.commit_cfg.batches[id];
|
2023-06-26 18:59:59 +02:00
|
|
|
FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
|
|
|
|
mgmt_txn_cfg_batch_free(&batch);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_txn_batches_fini(list);
|
|
|
|
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
|
2023-06-26 18:59:59 +02:00
|
|
|
uint64_t req_id,
|
|
|
|
enum mgmt_txn_event req_event)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
|
|
|
|
txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
|
|
|
|
assert(txn_req);
|
|
|
|
txn_req->txn = txn;
|
|
|
|
txn_req->req_id = req_id;
|
|
|
|
txn_req->req_event = req_event;
|
|
|
|
|
|
|
|
switch (txn_req->req_event) {
|
|
|
|
case MGMTD_TXN_PROC_SETCFG:
|
|
|
|
txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
|
|
|
|
sizeof(struct mgmt_set_cfg_req));
|
|
|
|
assert(txn_req->req.set_cfg);
|
|
|
|
mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Added a new SETCFG req-id: %" PRIu64 " txn-id: %" PRIu64
|
|
|
|
", session-id: %" PRIu64,
|
|
|
|
txn_req->req_id, txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_COMMITCFG:
|
|
|
|
txn->commit_cfg_req = txn_req;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Added a new COMMITCFG req-id: %" PRIu64
|
|
|
|
" txn-id: %" PRIu64 " session-id: %" PRIu64,
|
|
|
|
txn_req->req_id, txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID (id) {
|
2023-10-20 23:51:01 +02:00
|
|
|
txn_req->req.commit_cfg.be_phase[id] =
|
|
|
|
MGMTD_COMMIT_PHASE_PREPARE_CFG;
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_batches_init(
|
2023-10-20 23:51:01 +02:00
|
|
|
&txn_req->req.commit_cfg.batches[id]);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2024-01-11 22:41:29 +01:00
|
|
|
txn_req->req.commit_cfg.phase = MGMTD_COMMIT_PHASE_PREPARE_CFG;
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_GETCFG:
|
|
|
|
txn_req->req.get_data =
|
|
|
|
XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
|
|
|
|
sizeof(struct mgmt_get_data_req));
|
|
|
|
assert(txn_req->req.get_data);
|
|
|
|
mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Added a new GETCFG req-id: %" PRIu64 " txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn_req->req_id, txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
2023-07-07 05:23:24 +02:00
|
|
|
case MGMTD_TXN_PROC_GETTREE:
|
|
|
|
txn_req->req.get_tree = XCALLOC(MTYPE_MGMTD_TXN_GETTREE_REQ,
|
|
|
|
sizeof(struct txn_req_get_tree));
|
|
|
|
mgmt_txn_reqs_add_tail(&txn->get_tree_reqs, txn_req);
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Added a new GETTREE req-id: %" PRIu64 " txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn_req->req_id, txn->txn_id, txn->session_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
break;
|
2024-03-19 20:11:59 +01:00
|
|
|
case MGMTD_TXN_PROC_RPC:
|
|
|
|
txn_req->req.rpc = XCALLOC(MTYPE_MGMTD_TXN_RPC_REQ,
|
|
|
|
sizeof(struct txn_req_rpc));
|
|
|
|
assert(txn_req->req.rpc);
|
|
|
|
mgmt_txn_reqs_add_tail(&txn->rpc_reqs, txn_req);
|
|
|
|
__dbg("Added a new RPC req-id: %" PRIu64 " txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn_req->req_id, txn->txn_id, txn->session_id);
|
|
|
|
break;
|
2021-10-28 09:07:11 +02:00
|
|
|
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MGMTD_TXN_LOCK(txn);
|
|
|
|
|
|
|
|
return txn_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
|
|
|
|
{
|
|
|
|
int indx;
|
|
|
|
struct mgmt_txn_reqs_head *req_list = NULL;
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
struct mgmt_be_client_adapter *adapter;
|
2023-06-12 06:26:50 +02:00
|
|
|
struct mgmt_commit_cfg_req *ccreq;
|
2024-01-31 01:50:52 +01:00
|
|
|
struct mgmt_set_cfg_req *set_cfg;
|
2023-06-12 06:26:50 +02:00
|
|
|
bool cleanup;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
switch ((*txn_req)->req_event) {
|
|
|
|
case MGMTD_TXN_PROC_SETCFG:
|
2024-01-31 01:50:52 +01:00
|
|
|
set_cfg = (*txn_req)->req.set_cfg;
|
|
|
|
for (indx = 0; indx < set_cfg->num_cfg_changes; indx++) {
|
|
|
|
if (set_cfg->cfg_changes[indx].value)
|
|
|
|
free((void *)set_cfg->cfg_changes[indx].value);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
req_list = &(*txn_req)->txn->set_cfg_reqs;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Deleting SETCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
|
|
|
|
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
|
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_COMMITCFG:
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Deleting COMMITCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
|
|
|
|
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
|
2023-06-12 06:26:50 +02:00
|
|
|
|
|
|
|
ccreq = &(*txn_req)->req.commit_cfg;
|
2024-01-11 22:41:29 +01:00
|
|
|
cleanup = (ccreq->phase >= MGMTD_COMMIT_PHASE_TXN_CREATE &&
|
|
|
|
ccreq->phase < MGMTD_COMMIT_PHASE_TXN_DELETE);
|
2023-06-12 06:26:50 +02:00
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
XFREE(MTYPE_MGMTD_TXN_REQ, ccreq->edit);
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID (id) {
|
|
|
|
/*
|
|
|
|
* Send TXN_DELETE to cleanup state for this
|
|
|
|
* transaction on backend
|
|
|
|
*/
|
|
|
|
|
2023-06-12 06:26:50 +02:00
|
|
|
/*
|
|
|
|
* Get rid of the batches first so we don't end up doing
|
|
|
|
* anything more with them
|
|
|
|
*/
|
|
|
|
mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn, id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we were in the middle of the state machine then
|
|
|
|
* send a txn delete message
|
|
|
|
*/
|
|
|
|
adapter = mgmt_be_get_adapter_by_id(id);
|
2023-07-09 05:11:15 +02:00
|
|
|
if (adapter && cleanup && IS_IDBIT_SET(ccreq->clients, id))
|
2023-06-12 06:26:50 +02:00
|
|
|
mgmt_txn_send_be_txn_delete((*txn_req)->txn,
|
|
|
|
adapter);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_GETCFG:
|
|
|
|
for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
|
|
|
|
indx++) {
|
|
|
|
if ((*txn_req)->req.get_data->xpaths[indx])
|
|
|
|
free((void *)(*txn_req)
|
|
|
|
->req.get_data->xpaths[indx]);
|
|
|
|
}
|
|
|
|
req_list = &(*txn_req)->txn->get_cfg_reqs;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Deleting GETCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
|
|
|
|
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
if ((*txn_req)->req.get_data->reply)
|
|
|
|
XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
|
|
|
|
(*txn_req)->req.get_data->reply);
|
2023-06-14 15:32:16 +02:00
|
|
|
|
|
|
|
if ((*txn_req)->req.get_data->cfg_root)
|
|
|
|
nb_config_free((*txn_req)->req.get_data->cfg_root);
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
|
|
|
|
break;
|
2023-07-07 05:23:24 +02:00
|
|
|
case MGMTD_TXN_PROC_GETTREE:
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Deleting GETTREE req-id: %" PRIu64 " of txn-id: %" PRIu64,
|
|
|
|
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
req_list = &(*txn_req)->txn->get_tree_reqs;
|
|
|
|
lyd_free_all((*txn_req)->req.get_tree->client_results);
|
|
|
|
XFREE(MTYPE_MGMTD_XPATH, (*txn_req)->req.get_tree->xpath);
|
|
|
|
XFREE(MTYPE_MGMTD_TXN_GETTREE_REQ, (*txn_req)->req.get_tree);
|
|
|
|
break;
|
2024-03-19 20:11:59 +01:00
|
|
|
case MGMTD_TXN_PROC_RPC:
|
|
|
|
__dbg("Deleting RPC req-id: %" PRIu64 " txn-id: %" PRIu64,
|
|
|
|
(*txn_req)->req_id, (*txn_req)->txn->txn_id);
|
|
|
|
req_list = &(*txn_req)->txn->rpc_reqs;
|
|
|
|
lyd_free_all((*txn_req)->req.rpc->client_results);
|
|
|
|
XFREE(MTYPE_MGMTD_ERR, (*txn_req)->req.rpc->errstr);
|
|
|
|
XFREE(MTYPE_MGMTD_XPATH, (*txn_req)->req.rpc->xpath);
|
|
|
|
XFREE(MTYPE_MGMTD_TXN_RPC_REQ, (*txn_req)->req.rpc);
|
|
|
|
break;
|
2021-10-28 09:07:11 +02:00
|
|
|
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-12-30 15:36:50 +01:00
|
|
|
if (req_list) {
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_reqs_del(req_list, *txn_req);
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Removed req-id: %" PRIu64 " from request-list (left:%zu)",
|
|
|
|
(*txn_req)->req_id, mgmt_txn_reqs_count(req_list));
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
MGMTD_TXN_UNLOCK(&(*txn_req)->txn, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
|
|
|
|
*txn_req = NULL;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void mgmt_txn_process_set_cfg(struct event *thread)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct mgmt_ds_ctx *ds_ctx;
|
|
|
|
struct nb_config *nb_config;
|
|
|
|
char err_buf[1024];
|
|
|
|
bool error;
|
|
|
|
int num_processed = 0;
|
|
|
|
size_t left;
|
|
|
|
struct mgmt_commit_stats *cmt_stats;
|
|
|
|
int ret = 0;
|
|
|
|
|
2022-12-25 16:26:52 +01:00
|
|
|
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
|
2021-10-28 09:07:11 +02:00
|
|
|
assert(txn);
|
|
|
|
cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Processing %zu SET_CONFIG requests txn-id:%" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn->txn_id,
|
|
|
|
txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
|
|
|
|
assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
|
|
|
|
ds_ctx = txn_req->req.set_cfg->ds_ctx;
|
|
|
|
if (!ds_ctx) {
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn_req->req.set_cfg->ds_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
MGMTD_INTERNAL_ERROR,
|
|
|
|
"No such datastore!",
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->implicit_commit);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_process_set_cfg_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_config = mgmt_ds_get_nb_config(ds_ctx);
|
|
|
|
if (!nb_config) {
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn_req->req.set_cfg->ds_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
MGMTD_INTERNAL_ERROR,
|
|
|
|
"Unable to retrieve DS Config Tree!",
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->implicit_commit);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_process_set_cfg_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = false;
|
2023-06-26 18:59:59 +02:00
|
|
|
nb_candidate_edit_config_changes(nb_config,
|
|
|
|
txn_req->req.set_cfg->cfg_changes,
|
|
|
|
(size_t)txn_req->req.set_cfg
|
|
|
|
->num_cfg_changes,
|
2024-01-18 05:17:35 +01:00
|
|
|
NULL, false, err_buf,
|
|
|
|
sizeof(err_buf), &error);
|
2021-10-28 09:07:11 +02:00
|
|
|
if (error) {
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn_req->req.set_cfg->ds_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
MGMTD_INTERNAL_ERROR, err_buf,
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->implicit_commit);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_process_set_cfg_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (txn_req->req.set_cfg->implicit_commit) {
|
|
|
|
assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
|
|
|
|
assert(txn_req->req.set_cfg->dst_ds_ctx);
|
|
|
|
|
2023-06-18 22:19:54 +02:00
|
|
|
/* We expect the user to have locked the DST DS */
|
|
|
|
if (!mgmt_ds_is_locked(txn_req->req.set_cfg->dst_ds_ctx,
|
|
|
|
txn->session_id)) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("DS %u not locked for implicit commit txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64 " err: %s",
|
|
|
|
txn_req->req.set_cfg->dst_ds_id,
|
|
|
|
txn->txn_id, txn->session_id,
|
|
|
|
strerror(ret));
|
2023-11-21 22:18:40 +01:00
|
|
|
mgmt_fe_send_set_cfg_reply(
|
|
|
|
txn->session_id, txn->txn_id,
|
|
|
|
txn_req->req.set_cfg->ds_id,
|
|
|
|
txn_req->req_id, MGMTD_DS_LOCK_FAILED,
|
|
|
|
"running DS not locked for implicit commit",
|
|
|
|
txn_req->req.set_cfg->implicit_commit);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_process_set_cfg_done;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_txn_send_commit_config_req(txn->txn_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->ds_id,
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->ds_ctx,
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->dst_ds_id,
|
|
|
|
txn_req->req.set_cfg
|
|
|
|
->dst_ds_ctx,
|
2024-03-03 20:40:16 +01:00
|
|
|
false, false, true,
|
|
|
|
NULL);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&cmt_stats->last_start, NULL);
|
|
|
|
cmt_stats->commit_cnt++;
|
2023-06-26 18:59:59 +02:00
|
|
|
} else if (mgmt_fe_send_set_cfg_reply(txn->session_id,
|
|
|
|
txn->txn_id,
|
|
|
|
txn_req->req.set_cfg->ds_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
MGMTD_SUCCESS, NULL,
|
|
|
|
false) != 0) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_txn_process_set_cfg_done:
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: The following will remove it from the list as well.
|
|
|
|
*/
|
|
|
|
mgmt_txn_req_free(&txn_req);
|
|
|
|
|
|
|
|
num_processed++;
|
|
|
|
if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
|
|
|
|
if (left) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
|
|
|
|
num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC, (int)left);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
|
2023-06-26 18:59:59 +02:00
|
|
|
enum mgmt_result result,
|
|
|
|
const char *error_if_any)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
bool success, create_cmt_info_rec;
|
|
|
|
|
|
|
|
if (!txn->commit_cfg_req)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
|
|
|
|
|
2023-06-18 22:19:54 +02:00
|
|
|
/* TODO: these replies should not be send if it's a rollback
|
|
|
|
* b/c right now that is special cased.. that special casing should be
|
|
|
|
* removed; however...
|
|
|
|
*/
|
2024-03-03 20:40:16 +01:00
|
|
|
if (!txn->commit_cfg_req->req.commit_cfg.edit &&
|
|
|
|
!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
|
2023-06-26 18:59:59 +02:00
|
|
|
!txn->commit_cfg_req->req.commit_cfg.rollback &&
|
|
|
|
mgmt_fe_send_commit_cfg_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.src_ds_id,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.dst_ds_id,
|
|
|
|
txn->commit_cfg_req->req_id,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.validate_only,
|
|
|
|
result, error_if_any) != 0) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
if (!txn->commit_cfg_req->req.commit_cfg.edit &&
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
|
2023-06-26 18:59:59 +02:00
|
|
|
!txn->commit_cfg_req->req.commit_cfg.rollback &&
|
|
|
|
mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.src_ds_id,
|
|
|
|
txn->commit_cfg_req->req_id,
|
|
|
|
success ? MGMTD_SUCCESS
|
|
|
|
: MGMTD_INTERNAL_ERROR,
|
|
|
|
error_if_any, true) != 0) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.edit &&
|
|
|
|
mgmt_fe_adapter_send_edit_reply(txn->session_id, txn->txn_id,
|
|
|
|
txn->commit_cfg_req->req_id,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.edit->unlock,
|
|
|
|
true,
|
2024-09-17 08:27:31 +02:00
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.edit->created,
|
2024-03-03 20:40:16 +01:00
|
|
|
txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.edit->xpath_created,
|
|
|
|
success ? 0 : -1,
|
|
|
|
error_if_any) != 0) {
|
|
|
|
__log_err("Failed to send EDIT-REPLY txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
txn->txn_id, txn->session_id);
|
|
|
|
}
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
if (success) {
|
|
|
|
/* Stop the commit-timeout timer */
|
2023-06-18 22:19:54 +02:00
|
|
|
/* XXX why only on success? */
|
2022-12-25 16:26:52 +01:00
|
|
|
EVENT_OFF(txn->comm_cfg_timeout);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
create_cmt_info_rec =
|
|
|
|
(result != MGMTD_NO_CFG_CHANGES &&
|
|
|
|
!txn->commit_cfg_req->req.commit_cfg.rollback);
|
|
|
|
|
|
|
|
/*
|
2025-04-08 07:55:03 +02:00
|
|
|
* Successful commit: Copy Src DS to Dst DS if and only if
|
2021-10-28 09:07:11 +02:00
|
|
|
* this was not a validate-only or abort request.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
if ((txn->session_id &&
|
|
|
|
!txn->commit_cfg_req->req.commit_cfg.validate_only &&
|
|
|
|
!txn->commit_cfg_req->req.commit_cfg.abort) ||
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.rollback) {
|
2025-04-08 07:55:03 +02:00
|
|
|
mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.src_ds_ctx,
|
2021-10-28 09:07:11 +02:00
|
|
|
create_cmt_info_rec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore Src DS back to Dest DS only through a commit abort
|
|
|
|
* request.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
if (txn->session_id && txn->commit_cfg_req->req.commit_cfg.abort)
|
2025-04-08 07:55:03 +02:00
|
|
|
mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg.src_ds_ctx,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The commit has failied. For implicit commit requests restore
|
2025-04-08 07:55:03 +02:00
|
|
|
* back the contents of the candidate DS. For non-implicit
|
|
|
|
* commit we want to allow the user to re-commit on the changes
|
|
|
|
* (whether further modified or not).
|
2021-10-28 09:07:11 +02:00
|
|
|
*/
|
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.implicit)
|
2025-04-08 07:55:03 +02:00
|
|
|
mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg.src_ds_ctx,
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.rollback) {
|
2023-06-18 22:19:54 +02:00
|
|
|
mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
|
2023-06-14 15:32:16 +02:00
|
|
|
mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
|
2023-03-14 11:36:06 +01:00
|
|
|
/*
|
|
|
|
* Resume processing the rollback command.
|
2023-06-18 22:19:54 +02:00
|
|
|
*
|
|
|
|
* TODO: there's no good reason to special case rollback, the
|
|
|
|
* rollback boolean should be passed back to the FE client and it
|
|
|
|
* can do the right thing.
|
2023-03-14 11:36:06 +01:00
|
|
|
*/
|
|
|
|
mgmt_history_rollback_complete(success);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.init) {
|
|
|
|
/*
|
|
|
|
* This is the backend init request.
|
|
|
|
* We need to unlock the running datastore.
|
|
|
|
*/
|
|
|
|
mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
|
|
|
|
}
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
|
|
|
|
mgmt_txn_req_free(&txn->commit_cfg_req);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The CONFIG Transaction should be destroyed from Frontend-adapter.
|
|
|
|
* But in case the transaction is not triggered from a front-end session
|
|
|
|
* we need to cleanup by itself.
|
|
|
|
*/
|
|
|
|
if (!txn->session_id)
|
2024-01-25 12:51:58 +01:00
|
|
|
mgmt_txn_cleanup_txn(&txn);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req)
|
|
|
|
{
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("txn-id: %" PRIu64 ", Phase '%s'", txn->txn_id,
|
|
|
|
mgmt_txn_commit_phase_str(txn));
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if all clients has moved to next phase or not.
|
|
|
|
*/
|
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID (id) {
|
2023-07-09 05:11:15 +02:00
|
|
|
if (IS_IDBIT_SET(cmtcfg_req->clients, id) &&
|
2024-01-11 22:41:29 +01:00
|
|
|
cmtcfg_req->be_phase[id] == cmtcfg_req->phase) {
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* There's atleast once client who hasn't moved to
|
|
|
|
* next phase.
|
|
|
|
*
|
|
|
|
* TODO: Need to re-think this design for the case
|
|
|
|
* set of validators for a given YANG data item is
|
|
|
|
* different from the set of notifiers for the same.
|
|
|
|
*/
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are here, it means all the clients has moved to next phase.
|
|
|
|
* So we can move the whole commit to next phase.
|
|
|
|
*/
|
2024-01-11 22:41:29 +01:00
|
|
|
cmtcfg_req->phase++;
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Move entire txn-id: %" PRIu64 " to phase '%s'", txn->txn_id,
|
|
|
|
mgmt_txn_commit_phase_str(txn));
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-07-09 05:11:15 +02:00
|
|
|
/*
|
|
|
|
* This is the real workhorse
|
|
|
|
*/
|
2021-10-28 09:07:11 +02:00
|
|
|
static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
|
2023-06-26 18:59:59 +02:00
|
|
|
struct nb_config_cbs *changes)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct nb_config_cb *cb, *nxt;
|
|
|
|
struct nb_config_change *chg;
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_be_cfg_batch *batch;
|
2021-10-28 09:07:11 +02:00
|
|
|
char *xpath = NULL, *value = NULL;
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
struct mgmt_be_client_adapter *adapter;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
int num_chgs = 0;
|
|
|
|
int xpath_len, value_len;
|
2024-01-11 19:58:09 +01:00
|
|
|
uint64_t clients, chg_clients;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
cmtcfg_req = &txn_req->req.commit_cfg;
|
|
|
|
|
|
|
|
RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
|
|
|
|
chg = (struct nb_config_change *)cb;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Could have directly pointed to xpath in nb_node.
|
|
|
|
* But dont want to mess with it now.
|
|
|
|
* xpath = chg->cb.nb_node->xpath;
|
|
|
|
*/
|
|
|
|
xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
|
|
|
|
if (!xpath) {
|
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn_req->txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Internal error! Could not get Xpath from Ds node!");
|
2023-05-31 13:54:10 +02:00
|
|
|
return -1;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
value = (char *)lyd_get_value(chg->cb.dnode);
|
|
|
|
if (!value)
|
|
|
|
value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("XPATH: %s, Value: '%s'", xpath, value ? value : "NIL");
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-03-20 17:12:33 +01:00
|
|
|
clients =
|
|
|
|
mgmt_be_interested_clients(xpath,
|
|
|
|
MGMT_BE_XPATH_SUBSCR_TYPE_CFG);
|
2024-01-11 19:58:09 +01:00
|
|
|
|
|
|
|
chg_clients = 0;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
xpath_len = strlen(xpath) + 1;
|
|
|
|
value_len = strlen(value) + 1;
|
2023-07-09 05:11:15 +02:00
|
|
|
FOREACH_BE_CLIENT_BITS (id, clients) {
|
2021-10-28 09:07:11 +02:00
|
|
|
adapter = mgmt_be_get_adapter_by_id(id);
|
|
|
|
if (!adapter)
|
|
|
|
continue;
|
|
|
|
|
2024-01-11 19:58:09 +01:00
|
|
|
chg_clients |= (1ull << id);
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
batch = cmtcfg_req->last_be_cfg_batch[id];
|
|
|
|
if (!batch ||
|
|
|
|
(batch->num_cfg_data ==
|
|
|
|
MGMTD_MAX_CFG_CHANGES_IN_BATCH) ||
|
|
|
|
(batch->buf_space_left < (xpath_len + value_len))) {
|
2021-10-28 09:07:11 +02:00
|
|
|
/* Allocate a new config batch */
|
2023-06-26 18:59:59 +02:00
|
|
|
batch = mgmt_txn_cfg_batch_alloc(txn_req->txn,
|
|
|
|
id, adapter);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
batch->buf_space_left -= (xpath_len + value_len);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_yang_cfg_data_req_init(
|
2023-06-26 18:59:59 +02:00
|
|
|
&batch->cfg_data[batch->num_cfg_data]);
|
|
|
|
batch->cfg_datap[batch->num_cfg_data] =
|
|
|
|
&batch->cfg_data[batch->num_cfg_data];
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-10-06 01:58:58 +02:00
|
|
|
/*
|
|
|
|
* On the backend, we don't really care if it's CREATE
|
|
|
|
* or MODIFY, because the existence was already checked
|
|
|
|
* on the frontend. Therefore we use SET for both.
|
|
|
|
*/
|
2023-10-05 23:13:16 +02:00
|
|
|
if (chg->cb.operation == NB_CB_DESTROY)
|
2023-06-26 18:59:59 +02:00
|
|
|
batch->cfg_data[batch->num_cfg_data].req_type =
|
2023-10-06 14:01:16 +02:00
|
|
|
MGMTD__CFG_DATA_REQ_TYPE__REMOVE_DATA;
|
2021-10-28 09:07:11 +02:00
|
|
|
else
|
2023-06-26 18:59:59 +02:00
|
|
|
batch->cfg_data[batch->num_cfg_data].req_type =
|
2021-10-28 09:07:11 +02:00
|
|
|
MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_yang_data_init(&batch->data[batch->num_cfg_data]);
|
|
|
|
batch->cfg_data[batch->num_cfg_data].data =
|
|
|
|
&batch->data[batch->num_cfg_data];
|
|
|
|
batch->data[batch->num_cfg_data].xpath = strdup(xpath);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_yang_data_value_init(
|
2023-06-26 18:59:59 +02:00
|
|
|
&batch->value[batch->num_cfg_data]);
|
|
|
|
batch->data[batch->num_cfg_data].value =
|
|
|
|
&batch->value[batch->num_cfg_data];
|
|
|
|
batch->value[batch->num_cfg_data].value_case =
|
2021-10-28 09:07:11 +02:00
|
|
|
MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
|
2023-06-26 18:59:59 +02:00
|
|
|
batch->value[batch->num_cfg_data].encoded_str_val =
|
|
|
|
value;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg(" -- %s, batch item:%d", adapter->name,
|
|
|
|
(int)batch->num_cfg_data);
|
2023-06-26 18:59:59 +02:00
|
|
|
|
|
|
|
batch->num_cfg_data++;
|
2021-10-28 09:07:11 +02:00
|
|
|
num_chgs++;
|
|
|
|
}
|
|
|
|
|
2024-03-06 22:57:30 +01:00
|
|
|
if (!chg_clients)
|
2024-07-22 13:52:10 +02:00
|
|
|
__dbg("Daemons interested in XPATH are not currently connected: %s",
|
|
|
|
xpath);
|
2023-05-20 05:12:33 +02:00
|
|
|
|
2024-01-11 19:58:09 +01:00
|
|
|
cmtcfg_req->clients |= chg_clients;
|
|
|
|
|
2023-05-20 05:12:33 +02:00
|
|
|
free(xpath);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
|
|
|
|
if (!num_chgs) {
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn_req->txn,
|
|
|
|
MGMTD_NO_CFG_CHANGES,
|
2024-07-22 13:52:10 +02:00
|
|
|
"No connected daemons interested in changes");
|
2023-05-31 13:54:10 +02:00
|
|
|
return -1;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-10-20 23:51:01 +02:00
|
|
|
/* Move all BE clients to create phase */
|
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID(id) {
|
|
|
|
if (IS_IDBIT_SET(cmtcfg_req->clients, id))
|
|
|
|
cmtcfg_req->be_phase[id] =
|
|
|
|
MGMTD_COMMIT_PHASE_TXN_CREATE;
|
|
|
|
}
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
|
|
|
|
{
|
|
|
|
struct nb_context nb_ctx;
|
|
|
|
struct nb_config *nb_config;
|
|
|
|
struct nb_config_cbs changes;
|
|
|
|
struct nb_config_cbs *cfg_chgs = NULL;
|
|
|
|
int ret;
|
|
|
|
bool del_cfg_chgs = false;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
memset(&nb_ctx, 0, sizeof(nb_ctx));
|
|
|
|
memset(&changes, 0, sizeof(changes));
|
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
|
|
|
|
cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
|
|
|
|
del_cfg_chgs = true;
|
|
|
|
goto mgmt_txn_prep_config_validation_done;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.src_ds_id != MGMTD_DS_CANDIDATE) {
|
2021-10-28 09:07:11 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INVALID_PARAM,
|
|
|
|
"Source DS cannot be any other than CANDIDATE!");
|
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id != MGMTD_DS_RUNNING) {
|
2021-10-28 09:07:11 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INVALID_PARAM,
|
|
|
|
"Destination DS cannot be any other than RUNNING!");
|
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
|
|
|
|
"No such source datastore!");
|
2021-10-28 09:07:11 +02:00
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
|
|
|
|
"No such destination datastore!");
|
2021-10-28 09:07:11 +02:00
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.abort) {
|
|
|
|
/*
|
|
|
|
* This is a commit abort request. Return back success.
|
|
|
|
* That should trigger a restore of Candidate datastore to
|
|
|
|
* Running.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_config = mgmt_ds_get_nb_config(
|
|
|
|
txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
|
|
|
|
if (!nb_config) {
|
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Unable to retrieve Commit DS Config Tree!");
|
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
2023-11-14 20:17:24 +01:00
|
|
|
/*
|
|
|
|
* Validate YANG contents of the source DS and get the diff
|
|
|
|
* between source and destination DS contents.
|
|
|
|
*/
|
|
|
|
char err_buf[BUFSIZ] = { 0 };
|
|
|
|
|
|
|
|
ret = nb_candidate_validate_yang(nb_config, true, err_buf,
|
|
|
|
sizeof(err_buf) - 1);
|
|
|
|
if (ret != NB_OK) {
|
|
|
|
if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
|
|
|
|
strlcpy(err_buf, "Validation failed", sizeof(err_buf));
|
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
|
|
|
|
err_buf);
|
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
2023-10-06 20:18:33 +02:00
|
|
|
nb_config_diff(mgmt_ds_get_nb_config(txn->commit_cfg_req->req.commit_cfg
|
|
|
|
.dst_ds_ctx),
|
|
|
|
nb_config, &changes);
|
|
|
|
cfg_chgs = &changes;
|
|
|
|
del_cfg_chgs = true;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
|
|
|
|
/*
|
|
|
|
* This means there's no changes to commit whatsoever
|
|
|
|
* is the source of the changes in config.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_NO_CFG_CHANGES,
|
|
|
|
"No changes found to be committed!");
|
2021-10-28 09:07:11 +02:00
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
|
|
|
|
->validate_start,
|
|
|
|
NULL);
|
|
|
|
/*
|
|
|
|
* Perform application level validations locally on the MGMTD
|
|
|
|
* process by calling application specific validation routines
|
|
|
|
* loaded onto MGMTD process using libraries.
|
|
|
|
*/
|
2023-11-14 20:17:24 +01:00
|
|
|
nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
|
|
|
|
nb_ctx.user = (void *)txn;
|
2021-10-28 09:07:11 +02:00
|
|
|
ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
|
|
|
|
sizeof(err_buf) - 1);
|
|
|
|
if (ret != NB_OK) {
|
|
|
|
if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
|
|
|
|
strlcpy(err_buf, "Validation failed", sizeof(err_buf));
|
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
|
2023-06-26 18:59:59 +02:00
|
|
|
err_buf);
|
2021-10-28 09:07:11 +02:00
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
|
|
|
|
/*
|
|
|
|
* This was a validate-only COMMIT request return success.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
|
|
|
|
|
|
|
|
mgmt_txn_prep_config_validation_done:
|
|
|
|
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
|
|
|
|
->prep_cfg_start,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over the diffs and create ordered batches of config
|
|
|
|
* commands to be validated.
|
|
|
|
*/
|
|
|
|
ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
|
|
|
|
if (ret != 0) {
|
|
|
|
ret = -1;
|
|
|
|
goto mgmt_txn_prepare_config_done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move to the Transaction Create Phase */
|
2024-01-11 22:41:29 +01:00
|
|
|
txn->commit_cfg_req->req.commit_cfg.phase =
|
2021-10-28 09:07:11 +02:00
|
|
|
MGMTD_COMMIT_PHASE_TXN_CREATE;
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start the COMMIT Timeout Timer to abort Txn if things get stuck at
|
|
|
|
* backend.
|
|
|
|
*/
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
|
|
|
|
mgmt_txn_prepare_config_done:
|
|
|
|
|
|
|
|
if (cfg_chgs && del_cfg_chgs)
|
|
|
|
nb_config_diff_del_changes(cfg_chgs);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
|
|
|
|
{
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
struct mgmt_be_client_adapter *adapter;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
|
|
|
|
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID (id) {
|
2023-07-09 05:11:15 +02:00
|
|
|
if (IS_IDBIT_SET(cmtcfg_req->clients, id)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
adapter = mgmt_be_get_adapter_by_id(id);
|
2023-06-05 00:09:25 +02:00
|
|
|
if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Could not send TXN_CREATE to backend adapter");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dont move the commit to next phase yet. Wait for the TXN_REPLY to
|
|
|
|
* come back.
|
|
|
|
*/
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("txn-id: %" PRIu64 " session-id: %" PRIu64 " Phase '%s'",
|
|
|
|
txn->txn_id, txn->session_id, mgmt_txn_commit_phase_str(txn));
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-05 00:09:25 +02:00
|
|
|
static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_be_client_adapter *adapter)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_be_cfg_batch *batch;
|
|
|
|
struct mgmt_be_cfgreq cfg_req = { 0 };
|
2021-10-28 09:07:11 +02:00
|
|
|
size_t num_batches, indx;
|
|
|
|
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
|
|
|
|
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
2023-07-09 05:11:15 +02:00
|
|
|
assert(IS_IDBIT_SET(cmtcfg_req->clients, adapter->id));
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
indx = 0;
|
2023-10-20 23:51:01 +02:00
|
|
|
num_batches = mgmt_txn_batches_count(&cmtcfg_req->batches[adapter->id]);
|
|
|
|
FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->batches[adapter->id],
|
2023-06-26 18:59:59 +02:00
|
|
|
batch) {
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
cfg_req.cfgdata_reqs = batch->cfg_datap;
|
|
|
|
cfg_req.num_reqs = batch->num_cfg_data;
|
2021-10-28 09:07:11 +02:00
|
|
|
indx++;
|
2023-06-26 18:59:59 +02:00
|
|
|
if (mgmt_be_send_cfgdata_req(adapter, txn->txn_id,
|
|
|
|
cfg_req.cfgdata_reqs,
|
|
|
|
cfg_req.num_reqs,
|
2023-07-10 07:36:24 +02:00
|
|
|
indx == num_batches)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Internal Error! Could not send config data to backend!");
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Could not send CFGDATA_CREATE txn-id: %" PRIu64
|
|
|
|
" to client '%s",
|
|
|
|
txn->txn_id, adapter->name);
|
2021-10-28 09:07:11 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-02-08 16:30:42 +01:00
|
|
|
* We don't advance the phase here, instead that is driven by the
|
|
|
|
* cfg_reply.
|
2021-10-28 09:07:11 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_be_client_adapter *adapter)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
2023-06-12 06:26:50 +02:00
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req =
|
|
|
|
&txn->commit_cfg_req->req.commit_cfg;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-12 06:26:50 +02:00
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-07-09 05:11:15 +02:00
|
|
|
if (IS_IDBIT_UNSET(cmtcfg_req->clients, adapter->id))
|
2023-06-12 06:26:50 +02:00
|
|
|
return 0;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-12 06:26:50 +02:00
|
|
|
return mgmt_be_send_txn_req(adapter, txn->txn_id, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void mgmt_txn_cfg_commit_timedout(struct event *thread)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
2022-12-25 16:26:52 +01:00
|
|
|
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
|
2021-10-28 09:07:11 +02:00
|
|
|
assert(txn);
|
|
|
|
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
|
|
|
|
|
|
|
|
if (!txn->commit_cfg_req)
|
|
|
|
return;
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Backend timeout txn-id: %" PRIu64 " aborting commit",
|
|
|
|
txn->txn_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a COMMIT_CONFIG_REPLY with failure.
|
|
|
|
* NOTE: The transaction cleanup will be triggered from Front-end
|
|
|
|
* adapter.
|
|
|
|
*/
|
|
|
|
mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Operation on the backend timed-out. Aborting commit!");
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_txn_req *txn_req)
|
|
|
|
{
|
|
|
|
struct txn_req_get_tree *get_tree = txn_req->req.get_tree;
|
2023-12-30 15:34:44 +01:00
|
|
|
uint64_t req_id = txn_req->req_id;
|
2024-01-13 23:53:21 +01:00
|
|
|
struct lyd_node *result;
|
2024-01-06 10:45:29 +01:00
|
|
|
int ret = NB_OK;
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/* cancel timer and send reply onward */
|
|
|
|
EVENT_OFF(txn->get_tree_timeout);
|
|
|
|
|
2024-01-06 10:45:29 +01:00
|
|
|
if (!get_tree->simple_xpath && get_tree->client_results) {
|
|
|
|
/*
|
|
|
|
* We have a complex query so Filter results by the xpath query.
|
|
|
|
*/
|
2024-01-06 12:06:38 +01:00
|
|
|
if (yang_lyd_trim_xpath(&get_tree->client_results,
|
|
|
|
txn_req->req.get_tree->xpath))
|
|
|
|
ret = NB_ERR;
|
2024-01-06 10:45:29 +01:00
|
|
|
}
|
|
|
|
|
2024-01-13 23:53:21 +01:00
|
|
|
result = get_tree->client_results;
|
|
|
|
|
|
|
|
if (ret == NB_OK && result && get_tree->exact)
|
|
|
|
result = yang_dnode_get(result, get_tree->xpath);
|
|
|
|
|
2024-01-06 10:45:29 +01:00
|
|
|
if (ret == NB_OK)
|
|
|
|
ret = mgmt_fe_adapter_send_tree_data(txn->session_id,
|
|
|
|
txn->txn_id,
|
|
|
|
txn_req->req_id,
|
|
|
|
get_tree->result_type,
|
2024-01-29 21:50:26 +01:00
|
|
|
get_tree->wd_options,
|
2024-01-13 23:53:21 +01:00
|
|
|
result,
|
2024-01-06 10:45:29 +01:00
|
|
|
get_tree->partial_error,
|
|
|
|
false);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/* we're done with the request */
|
|
|
|
mgmt_txn_req_free(&txn_req);
|
|
|
|
|
|
|
|
if (ret) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Error sending the results of GETTREE for txn-id %" PRIu64
|
|
|
|
" req_id %" PRIu64 " to requested type %u",
|
|
|
|
txn->txn_id, req_id, get_tree->result_type);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
2024-09-17 08:27:03 +02:00
|
|
|
(void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false,
|
|
|
|
errno_from_nb_error(ret),
|
2023-07-07 05:23:24 +02:00
|
|
|
"Error converting results of GETTREE");
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-03-19 20:11:59 +01:00
|
|
|
static int txn_rpc_done(struct mgmt_txn_ctx *txn, struct mgmt_txn_req *txn_req)
|
|
|
|
{
|
|
|
|
struct txn_req_rpc *rpc = txn_req->req.rpc;
|
|
|
|
uint64_t req_id = txn_req->req_id;
|
|
|
|
|
|
|
|
/* cancel timer and send reply onward */
|
|
|
|
EVENT_OFF(txn->rpc_timeout);
|
|
|
|
|
|
|
|
if (rpc->errstr)
|
2024-09-17 08:27:03 +02:00
|
|
|
mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -EINVAL,
|
2024-03-19 20:11:59 +01:00
|
|
|
rpc->errstr);
|
|
|
|
else if (mgmt_fe_adapter_send_rpc_reply(txn->session_id, txn->txn_id,
|
|
|
|
req_id, rpc->result_type,
|
|
|
|
rpc->client_results)) {
|
|
|
|
__log_err("Error sending the results of RPC for txn-id %" PRIu64
|
|
|
|
" req_id %" PRIu64 " to requested type %u",
|
|
|
|
txn->txn_id, req_id, rpc->result_type);
|
|
|
|
|
2024-09-17 08:27:03 +02:00
|
|
|
(void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false,
|
|
|
|
-EINVAL,
|
2024-03-19 20:11:59 +01:00
|
|
|
"Error converting results of RPC");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we're done with the request */
|
|
|
|
mgmt_txn_req_free(&txn_req);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
static void txn_get_tree_timeout(struct event *thread)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
|
|
|
|
txn_req = (struct mgmt_txn_req *)EVENT_ARG(thread);
|
|
|
|
txn = txn_req->txn;
|
|
|
|
|
|
|
|
assert(txn);
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_SHOW);
|
|
|
|
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Backend timeout txn-id: %" PRIu64 " ending get-tree",
|
|
|
|
txn->txn_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a get-tree data reply.
|
|
|
|
*
|
|
|
|
* NOTE: The transaction cleanup will be triggered from Front-end
|
|
|
|
* adapter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
txn_req->req.get_tree->partial_error = -ETIMEDOUT;
|
|
|
|
txn_get_tree_data_done(txn, txn_req);
|
|
|
|
}
|
|
|
|
|
2024-03-19 20:11:59 +01:00
|
|
|
static void txn_rpc_timeout(struct event *thread)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
|
|
|
|
txn_req = (struct mgmt_txn_req *)EVENT_ARG(thread);
|
|
|
|
txn = txn_req->txn;
|
|
|
|
|
|
|
|
assert(txn);
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_RPC);
|
|
|
|
|
|
|
|
__log_err("Backend timeout txn-id: %" PRIu64 " ending rpc", txn->txn_id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a get-tree data reply.
|
|
|
|
*
|
|
|
|
* NOTE: The transaction cleanup will be triggered from Front-end
|
|
|
|
* adapter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
txn_req->req.rpc->errstr =
|
|
|
|
XSTRDUP(MTYPE_MGMTD_ERR, "Operation on the backend timed-out");
|
|
|
|
txn_rpc_done(txn, txn_req);
|
|
|
|
}
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* Send CFG_APPLY_REQs to all the backend client.
|
|
|
|
*
|
|
|
|
* NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
|
|
|
|
* for all backend clients has been generated. Please see
|
|
|
|
* mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
|
|
|
|
* for details.
|
|
|
|
*/
|
|
|
|
static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
|
|
|
|
{
|
|
|
|
enum mgmt_be_client_id id;
|
|
|
|
struct mgmt_be_client_adapter *adapter;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
|
|
|
|
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
|
|
|
|
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
|
|
|
if (cmtcfg_req->validate_only) {
|
|
|
|
/*
|
|
|
|
* If this was a validate-only COMMIT request return success.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
|
2021-10-28 09:07:11 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
FOREACH_MGMTD_BE_CLIENT_ID (id) {
|
2023-07-09 05:11:15 +02:00
|
|
|
if (IS_IDBIT_SET(cmtcfg_req->clients, id)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
adapter = mgmt_be_get_adapter_by_id(id);
|
|
|
|
if (!adapter)
|
|
|
|
return -1;
|
|
|
|
|
2023-06-05 00:09:25 +02:00
|
|
|
if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
(void)mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Could not send CFG_APPLY_REQ to backend adapter");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
cmtcfg_req->cmt_stats->last_num_apply_reqs++;
|
|
|
|
|
|
|
|
UNSET_FLAG(adapter->flags,
|
|
|
|
MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
|
|
|
|
* to come back.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void mgmt_txn_process_commit_cfg(struct event *thread)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
|
2022-12-25 16:26:52 +01:00
|
|
|
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
|
2021-10-28 09:07:11 +02:00
|
|
|
assert(txn);
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Processing COMMIT_CONFIG for txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64 " Phase '%s'",
|
|
|
|
txn->txn_id, txn->session_id, mgmt_txn_commit_phase_str(txn));
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
assert(txn->commit_cfg_req);
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
2024-01-11 22:41:29 +01:00
|
|
|
switch (cmtcfg_req->phase) {
|
2021-10-28 09:07:11 +02:00
|
|
|
case MGMTD_COMMIT_PHASE_PREPARE_CFG:
|
|
|
|
mgmt_txn_prepare_config(txn);
|
|
|
|
break;
|
|
|
|
case MGMTD_COMMIT_PHASE_TXN_CREATE:
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
|
|
|
|
NULL);
|
|
|
|
/*
|
|
|
|
* Send TXN_CREATE_REQ to all Backend now.
|
|
|
|
*/
|
|
|
|
mgmt_txn_send_be_txn_create(txn);
|
|
|
|
break;
|
|
|
|
case MGMTD_COMMIT_PHASE_APPLY_CFG:
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
|
|
|
|
NULL);
|
|
|
|
/*
|
|
|
|
* We should have received successful CFG_VALIDATE_REPLY from
|
|
|
|
* all concerned Backend Clients by now. Send out the
|
|
|
|
* CFG_APPLY_REQs now.
|
|
|
|
*/
|
|
|
|
mgmt_txn_send_be_cfg_apply(txn);
|
|
|
|
break;
|
|
|
|
case MGMTD_COMMIT_PHASE_TXN_DELETE:
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
|
|
|
|
NULL);
|
|
|
|
/*
|
|
|
|
* We would have sent TXN_DELETE_REQ to all backend by now.
|
|
|
|
* Send a successful CONFIG_COMMIT_REPLY back to front-end.
|
|
|
|
* NOTE: This should also trigger DS merge/unlock and Txn
|
|
|
|
* cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
|
|
|
|
* more details.
|
|
|
|
*/
|
2022-12-25 16:26:52 +01:00
|
|
|
EVENT_OFF(txn->comm_cfg_timeout);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
|
|
|
|
break;
|
|
|
|
case MGMTD_COMMIT_PHASE_MAX:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
|
|
|
|
{
|
|
|
|
size_t indx;
|
|
|
|
|
|
|
|
for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
|
|
|
|
get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
|
|
|
|
{
|
|
|
|
int indx;
|
|
|
|
|
|
|
|
for (indx = 0; indx < get_reply->num_reply; indx++) {
|
|
|
|
if (get_reply->reply_xpathp[indx]) {
|
|
|
|
free(get_reply->reply_xpathp[indx]);
|
|
|
|
get_reply->reply_xpathp[indx] = 0;
|
|
|
|
}
|
|
|
|
if (get_reply->reply_data[indx].xpath) {
|
|
|
|
free(get_reply->reply_data[indx].xpath);
|
|
|
|
get_reply->reply_data[indx].xpath = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
get_reply->num_reply = 0;
|
|
|
|
memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
|
|
|
|
memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
|
|
|
|
memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
|
|
|
|
|
|
|
|
memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
|
|
|
|
|
|
|
|
mgmt_init_get_data_reply(get_reply);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
|
|
|
|
{
|
|
|
|
if (get_data->reply)
|
|
|
|
mgmt_reset_get_data_reply(get_data->reply);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_get_data_req *get_req)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_get_data_reply *get_reply;
|
|
|
|
Mgmtd__YangDataReply *data_reply;
|
|
|
|
|
|
|
|
get_reply = get_req->reply;
|
|
|
|
if (!get_reply)
|
|
|
|
return;
|
|
|
|
|
|
|
|
data_reply = &get_reply->data_reply;
|
|
|
|
mgmt_yang_data_reply_init(data_reply);
|
|
|
|
data_reply->n_data = get_reply->num_reply;
|
|
|
|
data_reply->data = get_reply->reply_datap;
|
2023-06-26 18:59:59 +02:00
|
|
|
data_reply->next_indx = (!get_reply->last_batch ? get_req->total_reply
|
|
|
|
: -1);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Sending %zu Get-Config/Data replies next-index:%" PRId64,
|
|
|
|
data_reply->n_data, data_reply->next_indx);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
switch (txn_req->req_event) {
|
|
|
|
case MGMTD_TXN_PROC_GETCFG:
|
2023-06-27 19:58:54 +02:00
|
|
|
if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
|
|
|
|
txn_req->txn->txn_id, get_req->ds_id,
|
|
|
|
txn_req->req_id, MGMTD_SUCCESS,
|
|
|
|
data_reply, NULL) != 0) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64 " req-id: %" PRIu64,
|
|
|
|
txn_req->txn->txn_id,
|
|
|
|
txn_req->txn->session_id, txn_req->req_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_SETCFG:
|
|
|
|
case MGMTD_TXN_PROC_COMMITCFG:
|
2023-07-07 05:23:24 +02:00
|
|
|
case MGMTD_TXN_PROC_GETTREE:
|
2024-03-19 20:11:59 +01:00
|
|
|
case MGMTD_TXN_PROC_RPC:
|
2021-10-28 09:07:11 +02:00
|
|
|
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Invalid Txn-Req-Event %u", txn_req->req_event);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset reply buffer for next reply.
|
|
|
|
*/
|
|
|
|
mgmt_reset_get_data_reply_buf(get_req);
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
static void txn_iter_get_config_data_cb(const char *xpath, struct lyd_node *node,
|
|
|
|
struct nb_node *nb_node, void *ctx)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct mgmt_get_data_req *get_req;
|
|
|
|
struct mgmt_get_data_reply *get_reply;
|
|
|
|
Mgmtd__YangData *data;
|
|
|
|
Mgmtd__YangDataValue *data_value;
|
|
|
|
|
|
|
|
txn_req = (struct mgmt_txn_req *)ctx;
|
|
|
|
if (!txn_req)
|
2023-05-16 11:54:05 +02:00
|
|
|
return;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
if (!(node->schema->nodetype & LYD_NODE_TERM))
|
2023-05-16 11:54:05 +02:00
|
|
|
return;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-10-10 02:52:54 +02:00
|
|
|
assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
get_req = txn_req->req.get_data;
|
|
|
|
assert(get_req);
|
|
|
|
get_reply = get_req->reply;
|
|
|
|
data = &get_reply->reply_data[get_reply->num_reply];
|
|
|
|
data_value = &get_reply->reply_value[get_reply->num_reply];
|
|
|
|
|
|
|
|
mgmt_yang_data_init(data);
|
2023-05-16 11:54:05 +02:00
|
|
|
data->xpath = strdup(xpath);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_yang_data_value_init(data_value);
|
|
|
|
data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
|
|
|
|
data_value->encoded_str_val = (char *)lyd_get_value(node);
|
|
|
|
data->value = data_value;
|
|
|
|
|
|
|
|
get_reply->num_reply++;
|
|
|
|
get_req->total_reply++;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
|
|
|
|
data->xpath, data_value->encoded_str_val);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
|
|
|
|
mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
|
|
|
|
struct mgmt_txn_req *txn_req,
|
2023-06-14 15:32:16 +02:00
|
|
|
struct nb_config *root)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
int indx;
|
|
|
|
struct mgmt_get_data_req *get_data;
|
|
|
|
struct mgmt_get_data_reply *get_reply;
|
|
|
|
|
|
|
|
get_data = txn_req->req.get_data;
|
|
|
|
|
|
|
|
if (!get_data->reply) {
|
|
|
|
get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
|
|
|
|
sizeof(struct mgmt_get_data_reply));
|
|
|
|
if (!get_data->reply) {
|
2023-06-27 19:58:54 +02:00
|
|
|
mgmt_fe_send_get_reply(
|
2023-06-26 18:59:59 +02:00
|
|
|
txn->session_id, txn->txn_id, get_data->ds_id,
|
|
|
|
txn_req->req_id, MGMTD_INTERNAL_ERROR, NULL,
|
2021-10-28 09:07:11 +02:00
|
|
|
"Internal error: Unable to allocate reply buffers!");
|
|
|
|
goto mgmt_txn_get_config_failed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read data contents from the DS and respond back directly.
|
|
|
|
* No need to go to backend for getting data.
|
|
|
|
*/
|
|
|
|
get_reply = get_data->reply;
|
|
|
|
for (indx = 0; indx < get_data->num_xpaths; indx++) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Trying to get all data under '%s'",
|
|
|
|
get_data->xpaths[indx]);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_init_get_data_reply(get_reply);
|
2023-05-16 11:54:05 +02:00
|
|
|
/*
|
|
|
|
* mgmt_ds_iter_data works on path prefixes, but the user may
|
|
|
|
* want to also use an xpath regexp we need to add this
|
|
|
|
* functionality.
|
|
|
|
*/
|
2023-06-14 15:32:16 +02:00
|
|
|
if (mgmt_ds_iter_data(get_data->ds_id, root,
|
|
|
|
get_data->xpaths[indx],
|
2023-07-07 05:23:24 +02:00
|
|
|
txn_iter_get_config_data_cb,
|
2023-05-16 11:54:05 +02:00
|
|
|
(void *)txn_req) == -1) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Invalid Xpath '%s", get_data->xpaths[indx]);
|
2023-06-27 19:58:54 +02:00
|
|
|
mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
|
|
|
|
get_data->ds_id, txn_req->req_id,
|
|
|
|
MGMTD_INTERNAL_ERROR, NULL,
|
|
|
|
"Invalid xpath");
|
2021-10-28 09:07:11 +02:00
|
|
|
goto mgmt_txn_get_config_failed;
|
|
|
|
}
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Got %d remaining data-replies for xpath '%s'",
|
|
|
|
get_reply->num_reply, get_data->xpaths[indx]);
|
2021-10-28 09:07:11 +02:00
|
|
|
get_reply->last_batch = true;
|
|
|
|
mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
mgmt_txn_get_config_failed:
|
|
|
|
|
2023-03-24 11:09:42 +01:00
|
|
|
/*
|
|
|
|
* Delete the txn request. It will also remove it from request
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
mgmt_txn_req_free(&txn_req);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void mgmt_txn_process_get_cfg(struct event *thread)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
2023-06-14 15:32:16 +02:00
|
|
|
struct nb_config *cfg_root;
|
2021-10-28 09:07:11 +02:00
|
|
|
int num_processed = 0;
|
|
|
|
bool error;
|
|
|
|
|
2022-12-25 16:26:52 +01:00
|
|
|
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
|
2021-10-28 09:07:11 +02:00
|
|
|
assert(txn);
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Processing %zu GET_CONFIG requests txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64,
|
|
|
|
mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn->txn_id,
|
|
|
|
txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
|
|
|
|
error = false;
|
|
|
|
assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
|
2023-06-14 15:32:16 +02:00
|
|
|
cfg_root = txn_req->req.get_data->cfg_root;
|
|
|
|
assert(cfg_root);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-14 15:32:16 +02:00
|
|
|
if (mgmt_txn_get_config(txn, txn_req, cfg_root) != 0) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Unable to retrieve config from DS %d txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64 " req-id: %" PRIu64,
|
|
|
|
txn_req->req.get_data->ds_id, txn->txn_id,
|
|
|
|
txn->session_id, txn_req->req_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
error = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* Delete the txn request.
|
|
|
|
* Note: The following will remove it from the list
|
|
|
|
* as well.
|
|
|
|
*/
|
|
|
|
mgmt_txn_req_free(&txn_req);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Else the transaction would have been already deleted or
|
|
|
|
* moved to corresponding pending list. No need to delete it.
|
|
|
|
*/
|
|
|
|
num_processed++;
|
|
|
|
if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
|
|
|
|
num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mgmt_txn_ctx *
|
|
|
|
mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
|
2023-06-26 18:59:59 +02:00
|
|
|
enum mgmt_txn_type type)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
FOREACH_TXN_IN_LIST (cm, txn) {
|
|
|
|
if (txn->session_id == session_id && txn->type == type)
|
|
|
|
return txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
|
2023-06-26 18:59:59 +02:00
|
|
|
enum mgmt_txn_type type)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn = NULL;
|
|
|
|
|
2024-01-25 12:54:45 +01:00
|
|
|
/* Do not allow multiple config transactions */
|
|
|
|
if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_config_txn_in_progress())
|
|
|
|
return NULL;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id, type);
|
2021-10-28 09:07:11 +02:00
|
|
|
if (!txn) {
|
|
|
|
txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
|
|
|
|
assert(txn);
|
|
|
|
|
|
|
|
txn->session_id = session_id;
|
|
|
|
txn->type = type;
|
|
|
|
mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
|
2023-07-07 05:23:24 +02:00
|
|
|
/* TODO: why do we need N lists for one transaction */
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_reqs_init(&txn->set_cfg_reqs);
|
|
|
|
mgmt_txn_reqs_init(&txn->get_cfg_reqs);
|
2023-07-07 05:23:24 +02:00
|
|
|
mgmt_txn_reqs_init(&txn->get_tree_reqs);
|
2024-03-19 20:11:59 +01:00
|
|
|
mgmt_txn_reqs_init(&txn->rpc_reqs);
|
2021-10-28 09:07:11 +02:00
|
|
|
txn->commit_cfg_req = NULL;
|
|
|
|
txn->refcount = 0;
|
|
|
|
if (!mgmt_txn_mm->next_txn_id)
|
|
|
|
mgmt_txn_mm->next_txn_id++;
|
|
|
|
txn->txn_id = mgmt_txn_mm->next_txn_id++;
|
|
|
|
hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Added new '%s' txn-id: %" PRIu64,
|
|
|
|
mgmt_txn_type2str(type), txn->txn_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
if (type == MGMTD_TXN_TYPE_CONFIG)
|
|
|
|
mgmt_txn_mm->cfg_txn = txn;
|
|
|
|
|
|
|
|
MGMTD_TXN_LOCK(txn);
|
|
|
|
}
|
|
|
|
|
|
|
|
return txn;
|
|
|
|
}
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
static void mgmt_txn_delete(struct mgmt_txn_ctx **txn, bool in_hash_free)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
2025-02-26 18:34:05 +01:00
|
|
|
MGMTD_TXN_UNLOCK(txn, in_hash_free);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int mgmt_txn_hash_key(const void *data)
|
|
|
|
{
|
|
|
|
const struct mgmt_txn_ctx *txn = data;
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
return jhash2((uint32_t *)&txn->txn_id,
|
2021-10-28 09:07:11 +02:00
|
|
|
sizeof(txn->txn_id) / sizeof(uint32_t), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
|
|
|
|
{
|
|
|
|
const struct mgmt_txn_ctx *txn1 = d1;
|
|
|
|
const struct mgmt_txn_ctx *txn2 = d2;
|
|
|
|
|
|
|
|
return (txn1->txn_id == txn2->txn_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_hash_free(void *data)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn = data;
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
mgmt_txn_delete(&txn, true);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_hash_init(void)
|
|
|
|
{
|
|
|
|
if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
|
|
|
|
return;
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key, mgmt_txn_hash_cmp,
|
|
|
|
"MGMT Transactions");
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_hash_destroy(void)
|
|
|
|
{
|
|
|
|
if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
|
|
|
|
return;
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
hash_clean(mgmt_txn_mm->txn_hash, mgmt_txn_hash_free);
|
2021-10-28 09:07:11 +02:00
|
|
|
hash_free(mgmt_txn_mm->txn_hash);
|
|
|
|
mgmt_txn_mm->txn_hash = NULL;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static inline struct mgmt_txn_ctx *mgmt_txn_id2ctx(uint64_t txn_id)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_txn_ctx key = { 0 };
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
key.txn_id = txn_id;
|
|
|
|
txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
|
|
|
|
|
|
|
|
return txn;
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
uint64_t mgmt_txn_get_session_id(uint64_t txn_id)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
|
|
|
|
return txn ? txn->session_id : MGMTD_SESSION_ID_NONE;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
txn->refcount++;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file, line,
|
|
|
|
mgmt_txn_type2str(txn->type), txn->txn_id, txn->refcount);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, bool in_hash_free, const char *file, int line)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
assert(*txn && (*txn)->refcount);
|
|
|
|
|
|
|
|
(*txn)->refcount--;
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("%s:%d --> Unlock %s txn-id: %" PRIu64 " refcnt: %d", file, line,
|
|
|
|
mgmt_txn_type2str((*txn)->type), (*txn)->txn_id, (*txn)->refcount);
|
2021-10-28 09:07:11 +02:00
|
|
|
if (!(*txn)->refcount) {
|
|
|
|
if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
|
|
|
|
if (mgmt_txn_mm->cfg_txn == *txn)
|
|
|
|
mgmt_txn_mm->cfg_txn = NULL;
|
2022-12-25 16:26:52 +01:00
|
|
|
EVENT_OFF((*txn)->proc_get_cfg);
|
|
|
|
EVENT_OFF((*txn)->proc_get_data);
|
|
|
|
EVENT_OFF((*txn)->proc_comm_cfg);
|
|
|
|
EVENT_OFF((*txn)->comm_cfg_timeout);
|
2023-07-07 05:23:24 +02:00
|
|
|
EVENT_OFF((*txn)->get_tree_timeout);
|
2025-02-26 18:34:05 +01:00
|
|
|
if (!in_hash_free)
|
|
|
|
hash_release(mgmt_txn_mm->txn_hash, *txn);
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Deleted %s txn-id: %" PRIu64 " session-id: %" PRIu64,
|
|
|
|
mgmt_txn_type2str((*txn)->type), (*txn)->txn_id,
|
|
|
|
(*txn)->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_MGMTD_TXN, *txn);
|
|
|
|
}
|
|
|
|
|
|
|
|
*txn = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
|
|
|
|
{
|
|
|
|
/* TODO: Any other cleanup applicable */
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
mgmt_txn_delete(txn, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
static void mgmt_txn_cleanup_all_txns(void)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
|
|
|
|
return;
|
|
|
|
|
|
|
|
FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
|
|
|
|
mgmt_txn_cleanup_txn(&txn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
|
2023-06-26 18:59:59 +02:00
|
|
|
enum mgmt_txn_event event)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
2023-06-26 18:59:59 +02:00
|
|
|
struct timeval tv = { .tv_sec = 0,
|
|
|
|
.tv_usec = MGMTD_TXN_PROC_DELAY_USEC };
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
assert(mgmt_txn_mm && mgmt_txn_tm);
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case MGMTD_TXN_PROC_SETCFG:
|
2023-06-26 18:59:59 +02:00
|
|
|
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg, txn,
|
|
|
|
&tv, &txn->proc_set_cfg);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_COMMITCFG:
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
|
2023-06-26 18:59:59 +02:00
|
|
|
txn, &tv, &txn->proc_comm_cfg);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_GETCFG:
|
2023-06-26 18:59:59 +02:00
|
|
|
event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg, txn,
|
|
|
|
&tv, &txn->proc_get_cfg);
|
2021-10-28 09:07:11 +02:00
|
|
|
break;
|
|
|
|
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
2023-07-07 05:23:24 +02:00
|
|
|
event_add_timer(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout, txn,
|
|
|
|
MGMTD_TXN_CFG_COMMIT_MAX_DELAY_SEC,
|
|
|
|
&txn->comm_cfg_timeout);
|
|
|
|
break;
|
|
|
|
case MGMTD_TXN_PROC_GETTREE:
|
2024-03-19 20:11:59 +01:00
|
|
|
case MGMTD_TXN_PROC_RPC:
|
2023-07-07 05:23:24 +02:00
|
|
|
assert(!"code bug do not register this event");
|
|
|
|
break;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-24 21:20:48 +01:00
|
|
|
int mgmt_txn_init(struct mgmt_master *m, struct event_loop *loop)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
if (mgmt_txn_mm || mgmt_txn_tm)
|
|
|
|
assert(!"MGMTD TXN: Call txn_init() only once");
|
|
|
|
|
2025-01-24 21:20:48 +01:00
|
|
|
mgmt_txn_mm = m;
|
|
|
|
mgmt_txn_tm = loop;
|
|
|
|
mgmt_txns_init(&m->txn_list);
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_hash_init();
|
2025-01-24 21:20:48 +01:00
|
|
|
assert(!m->cfg_txn);
|
|
|
|
m->cfg_txn = NULL;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mgmt_txn_destroy(void)
|
|
|
|
{
|
|
|
|
mgmt_txn_cleanup_all_txns();
|
|
|
|
mgmt_txn_hash_destroy();
|
|
|
|
}
|
|
|
|
|
2024-01-25 12:54:45 +01:00
|
|
|
bool mgmt_config_txn_in_progress(void)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
|
2024-01-25 12:54:45 +01:00
|
|
|
return true;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-01-25 12:54:45 +01:00
|
|
|
return false;
|
2021-10-28 09:07:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
txn = mgmt_txn_create_new(session_id, type);
|
|
|
|
return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mgmt_destroy_txn(uint64_t *txn_id)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(*txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return;
|
|
|
|
|
2025-02-26 18:34:05 +01:00
|
|
|
mgmt_txn_delete(&txn, false);
|
2021-10-28 09:07:11 +02:00
|
|
|
*txn_id = MGMTD_TXN_ID_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
|
2023-06-26 18:59:59 +02:00
|
|
|
Mgmtd__DatastoreId ds_id,
|
|
|
|
struct mgmt_ds_ctx *ds_ctx,
|
|
|
|
Mgmtd__YangCfgDataReq **cfg_req,
|
|
|
|
size_t num_req, bool implicit_commit,
|
|
|
|
Mgmtd__DatastoreId dst_ds_id,
|
|
|
|
struct mgmt_ds_ctx *dst_ds_ctx)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
size_t indx;
|
|
|
|
uint16_t *num_chgs;
|
|
|
|
struct nb_cfg_change *cfg_chg;
|
2023-10-06 01:58:58 +02:00
|
|
|
struct nb_node *node;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err(
|
2021-10-28 09:07:11 +02:00
|
|
|
"For implicit commit config only one SETCFG-REQ can be allowed!");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
|
|
|
|
txn_req->req.set_cfg->ds_id = ds_id;
|
|
|
|
txn_req->req.set_cfg->ds_ctx = ds_ctx;
|
|
|
|
num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
|
|
|
|
for (indx = 0; indx < num_req; indx++) {
|
|
|
|
cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
|
|
|
|
|
2023-10-06 01:58:58 +02:00
|
|
|
switch (cfg_req[indx]->req_type) {
|
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA:
|
2023-10-06 14:01:16 +02:00
|
|
|
cfg_chg->operation = NB_OP_DELETE;
|
|
|
|
break;
|
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__REMOVE_DATA:
|
2021-10-28 09:07:11 +02:00
|
|
|
cfg_chg->operation = NB_OP_DESTROY;
|
2023-10-06 01:58:58 +02:00
|
|
|
break;
|
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__SET_DATA:
|
|
|
|
/*
|
|
|
|
* For backward compatibility, we need to allow creating
|
|
|
|
* *new* list keys with SET_DATA operation. NB_OP_MODIFY
|
|
|
|
* is not allowed for keys, so use NB_OP_CREATE_EXCL.
|
|
|
|
*/
|
|
|
|
node = nb_node_find(cfg_req[indx]->data->xpath);
|
|
|
|
if (node && lysc_is_key(node->snode))
|
|
|
|
cfg_chg->operation = NB_OP_CREATE_EXCL;
|
|
|
|
else
|
|
|
|
cfg_chg->operation = NB_OP_MODIFY;
|
|
|
|
break;
|
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__CREATE_DATA:
|
|
|
|
cfg_chg->operation = NB_OP_CREATE_EXCL;
|
|
|
|
break;
|
2023-10-09 02:21:16 +02:00
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__REPLACE_DATA:
|
|
|
|
cfg_chg->operation = NB_OP_REPLACE;
|
|
|
|
break;
|
2023-10-06 01:58:58 +02:00
|
|
|
case MGMTD__CFG_DATA_REQ_TYPE__REQ_TYPE_NONE:
|
|
|
|
case _MGMTD__CFG_DATA_REQ_TYPE_IS_INT_SIZE:
|
|
|
|
default:
|
2021-10-28 09:07:11 +02:00
|
|
|
continue;
|
2023-10-06 01:58:58 +02:00
|
|
|
}
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
|
|
|
|
(cfg_req[indx]->data->value &&
|
|
|
|
cfg_req[indx]->data->value->encoded_str_val
|
|
|
|
? cfg_req[indx]->data->value->encoded_str_val
|
|
|
|
: "NULL"));
|
2021-10-28 09:07:11 +02:00
|
|
|
strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
|
|
|
|
sizeof(cfg_chg->xpath));
|
2023-06-26 18:59:59 +02:00
|
|
|
cfg_chg->value =
|
|
|
|
(cfg_req[indx]->data->value &&
|
|
|
|
cfg_req[indx]->data->value->encoded_str_val
|
|
|
|
? strdup(cfg_req[indx]
|
|
|
|
->data->value->encoded_str_val)
|
|
|
|
: NULL);
|
2021-10-28 09:07:11 +02:00
|
|
|
if (cfg_chg->value)
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Allocated value at %p ==> '%s'", cfg_chg->value,
|
|
|
|
cfg_chg->value);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
(*num_chgs)++;
|
|
|
|
}
|
|
|
|
txn_req->req.set_cfg->implicit_commit = implicit_commit;
|
|
|
|
txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
|
|
|
|
txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
|
|
|
|
txn_req->req.set_cfg->setcfg_stats =
|
|
|
|
mgmt_fe_get_session_setcfg_stats(txn->session_id);
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
|
2023-06-14 15:32:16 +02:00
|
|
|
Mgmtd__DatastoreId src_ds_id,
|
|
|
|
struct mgmt_ds_ctx *src_ds_ctx,
|
|
|
|
Mgmtd__DatastoreId dst_ds_id,
|
|
|
|
struct mgmt_ds_ctx *dst_ds_ctx,
|
|
|
|
bool validate_only, bool abort,
|
2024-03-03 20:40:16 +01:00
|
|
|
bool implicit, struct mgmt_edit_req *edit)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (txn->commit_cfg_req) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Commit already in-progress txn-id: %" PRIu64
|
|
|
|
" session-id: %" PRIu64 ". Cannot start another",
|
|
|
|
txn->txn_id, txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
txn_req->req.commit_cfg.src_ds_id = src_ds_id;
|
|
|
|
txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
|
|
|
|
txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
|
|
|
|
txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
|
|
|
|
txn_req->req.commit_cfg.validate_only = validate_only;
|
|
|
|
txn_req->req.commit_cfg.abort = abort;
|
|
|
|
txn_req->req.commit_cfg.implicit = implicit;
|
2024-03-03 20:40:16 +01:00
|
|
|
txn_req->req.commit_cfg.edit = edit;
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.commit_cfg.cmt_stats =
|
|
|
|
mgmt_fe_get_session_commit_stats(txn->session_id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trigger a COMMIT-CONFIG process.
|
|
|
|
*/
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
|
2023-06-26 18:59:59 +02:00
|
|
|
bool connect)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
|
|
|
static struct mgmt_commit_stats dummy_stats;
|
|
|
|
struct nb_config_cbs *adapter_cfgs = NULL;
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
struct mgmt_ds_ctx *ds_ctx;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
memset(&dummy_stats, 0, sizeof(dummy_stats));
|
|
|
|
if (connect) {
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
|
|
|
|
assert(ds_ctx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock the running datastore to prevent any changes while we
|
|
|
|
* are initializing the backend.
|
|
|
|
*/
|
|
|
|
if (mgmt_ds_lock(ds_ctx, 0) != 0)
|
|
|
|
return -1;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
/* Get config for this single backend client */
|
2023-06-14 15:32:16 +02:00
|
|
|
mgmt_be_get_adapter_config(adapter, &adapter_cfgs);
|
2021-10-28 09:07:11 +02:00
|
|
|
if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
|
|
|
|
SET_FLAG(adapter->flags,
|
|
|
|
MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
mgmt_ds_unlock(ds_ctx);
|
2021-10-28 09:07:11 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a CONFIG transaction to push the config changes
|
|
|
|
* provided to the backend client.
|
|
|
|
*/
|
|
|
|
txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
|
|
|
|
if (!txn) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
|
|
|
|
adapter->name);
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
mgmt_ds_unlock(ds_ctx);
|
2024-01-25 12:53:53 +01:00
|
|
|
nb_config_diff_del_changes(adapter_cfgs);
|
2021-10-28 09:07:11 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Created initial txn-id: %" PRIu64 " for BE client '%s'",
|
|
|
|
txn->txn_id, adapter->name);
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* Set the changeset for transaction to commit and trigger the
|
|
|
|
* commit request.
|
|
|
|
*/
|
2023-06-26 18:59:59 +02:00
|
|
|
txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
|
|
|
|
txn_req->req.commit_cfg.src_ds_ctx = 0;
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
|
|
|
|
txn_req->req.commit_cfg.dst_ds_ctx = ds_ctx;
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.commit_cfg.validate_only = false;
|
|
|
|
txn_req->req.commit_cfg.abort = false;
|
mgmtd, vtysh: fix possible conflict when reading the config
When FRR starts, after mgmtd is initialized, backend clients connect to
it and request their config. To supply the config, mgmtd creates a
configuration transaction. At the same time, `vtysh -b` tries to read
the startup config and configure mgmtd, which also creates a
configuration transaction. If these two actions happen at the exact same
time, there's a conflict between them, because only a single
configuration translaction is allowed. Because of that, vtysh fails and
the config is completely ignored.
When starting the config reading, vtysh locks candidate and running
datastores in mgmtd. This commit adds locking of running datastore when
initializing the backend client. It allows to retry locking on the vtysh
side and read the config only when the lock is aquired instead of
failing.
This change also prevents running datastore from being changed during
initialization of backend clients. This could lead to a desynchronized
state between mgmtd and backends.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2024-02-02 23:42:58 +01:00
|
|
|
txn_req->req.commit_cfg.init = true;
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
|
|
|
|
txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trigger a COMMIT-CONFIG process.
|
|
|
|
*/
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Check if any transaction is currently on-going that
|
|
|
|
* involves this backend client. If so, report the transaction
|
|
|
|
* has failed.
|
|
|
|
*/
|
|
|
|
FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
|
2023-05-23 14:35:56 +02:00
|
|
|
/* TODO: update with operational state when that is
|
|
|
|
* completed */
|
2021-10-28 09:07:11 +02:00
|
|
|
if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
|
|
|
|
cmtcfg_req = txn->commit_cfg_req
|
2023-06-26 18:59:59 +02:00
|
|
|
? &txn->commit_cfg_req->req
|
|
|
|
.commit_cfg
|
2021-10-28 09:07:11 +02:00
|
|
|
: NULL;
|
2023-07-09 05:11:15 +02:00
|
|
|
if (cmtcfg_req && IS_IDBIT_SET(cmtcfg_req->clients,
|
|
|
|
adapter->id)) {
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Backend daemon disconnected while processing commit!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-26 18:59:59 +02:00
|
|
|
int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
|
|
|
|
struct mgmt_be_client_adapter *adapter)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!create && !txn->commit_cfg_req)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
assert(txn->commit_cfg_req);
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
|
|
|
if (create) {
|
|
|
|
if (success) {
|
|
|
|
/*
|
|
|
|
* Done with TXN_CREATE. Move the backend client to
|
|
|
|
* next phase.
|
|
|
|
*/
|
2024-01-11 22:41:29 +01:00
|
|
|
assert(cmtcfg_req->phase ==
|
2023-06-26 18:59:59 +02:00
|
|
|
MGMTD_COMMIT_PHASE_TXN_CREATE);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send CFGDATA_CREATE-REQs to the backend immediately.
|
|
|
|
*/
|
|
|
|
mgmt_txn_send_be_cfg_data(txn, adapter);
|
|
|
|
} else {
|
|
|
|
mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
|
|
|
"Internal error! Failed to initiate transaction at backend!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-20 23:51:01 +02:00
|
|
|
int mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, bool success,
|
|
|
|
char *error_if_any,
|
2023-06-26 18:59:59 +02:00
|
|
|
struct mgmt_be_client_adapter *adapter)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
2023-06-02 21:09:29 +02:00
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!txn->commit_cfg_req)
|
|
|
|
return -1;
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
|
|
|
|
|
|
|
if (!success) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
|
|
|
|
" err: %s",
|
|
|
|
adapter->name, txn->txn_id,
|
|
|
|
error_if_any ? error_if_any : "None");
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
2023-06-26 18:59:59 +02:00
|
|
|
error_if_any
|
|
|
|
? error_if_any
|
|
|
|
: "Internal error! Failed to download config data to backend!");
|
2021-10-28 09:07:11 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
|
|
|
|
" err: %s",
|
|
|
|
adapter->name, txn->txn_id, error_if_any ? error_if_any : "None");
|
2023-10-20 23:51:01 +02:00
|
|
|
|
|
|
|
cmtcfg_req->be_phase[adapter->id] = MGMTD_COMMIT_PHASE_APPLY_CFG;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-08 23:11:43 +01:00
|
|
|
int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
|
2023-10-17 14:02:57 +02:00
|
|
|
char *error_if_any,
|
2021-10-28 09:07:11 +02:00
|
|
|
struct mgmt_be_client_adapter *adapter)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
2023-06-26 18:59:59 +02:00
|
|
|
if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
|
2021-10-28 09:07:11 +02:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
|
|
|
|
|
|
|
|
if (!success) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
|
|
|
|
" err: %s",
|
|
|
|
adapter->name, txn->txn_id,
|
|
|
|
error_if_any ? error_if_any : "None");
|
2021-10-28 09:07:11 +02:00
|
|
|
mgmt_txn_send_commit_cfg_reply(
|
|
|
|
txn, MGMTD_INTERNAL_ERROR,
|
2023-06-26 18:59:59 +02:00
|
|
|
error_if_any
|
|
|
|
? error_if_any
|
|
|
|
: "Internal error! Failed to apply config data on backend!");
|
2021-10-28 09:07:11 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-10-20 23:51:01 +02:00
|
|
|
cmtcfg_req->be_phase[adapter->id] = MGMTD_COMMIT_PHASE_TXN_DELETE;
|
2021-10-28 09:07:11 +02:00
|
|
|
|
2023-10-17 14:02:57 +02:00
|
|
|
/*
|
|
|
|
* All configuration for the specific backend has been applied.
|
|
|
|
* Send TXN-DELETE to wrap up the transaction for this backend.
|
|
|
|
*/
|
|
|
|
SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
|
|
|
|
mgmt_txn_send_be_txn_delete(txn, adapter);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
|
|
|
|
if (mm->perf_stats_en)
|
|
|
|
gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-27 19:58:54 +02:00
|
|
|
int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
|
|
|
|
Mgmtd__DatastoreId ds_id, struct nb_config *cfg_root,
|
|
|
|
Mgmtd__YangGetDataReq **data_req, size_t num_reqs)
|
2021-10-28 09:07:11 +02:00
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
2023-06-27 19:58:54 +02:00
|
|
|
enum mgmt_txn_event req_event;
|
2021-10-28 09:07:11 +02:00
|
|
|
size_t indx;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
2023-10-10 02:52:54 +02:00
|
|
|
req_event = MGMTD_TXN_PROC_GETCFG;
|
2023-06-27 19:58:54 +02:00
|
|
|
txn_req = mgmt_txn_req_alloc(txn, req_id, req_event);
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.get_data->ds_id = ds_id;
|
2023-06-27 19:58:54 +02:00
|
|
|
txn_req->req.get_data->cfg_root = cfg_root;
|
2021-10-28 09:07:11 +02:00
|
|
|
for (indx = 0;
|
|
|
|
indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
|
|
|
|
indx++) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("XPath: '%s'", data_req[indx]->data->xpath);
|
2021-10-28 09:07:11 +02:00
|
|
|
txn_req->req.get_data->xpaths[indx] =
|
|
|
|
strdup(data_req[indx]->data->xpath);
|
|
|
|
txn_req->req.get_data->num_xpaths++;
|
|
|
|
}
|
|
|
|
|
2023-06-27 19:58:54 +02:00
|
|
|
mgmt_txn_register_event(txn, req_event);
|
2021-10-28 09:07:11 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Send get-tree requests to each client indicated in `clients` bitmask, which
|
|
|
|
* has registered operational state that matches the given `xpath`
|
|
|
|
*/
|
|
|
|
int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
|
2024-01-26 02:08:55 +01:00
|
|
|
uint64_t clients, Mgmtd__DatastoreId ds_id,
|
|
|
|
LYD_FORMAT result_type, uint8_t flags,
|
|
|
|
uint32_t wd_options, bool simple_xpath,
|
|
|
|
const char *xpath)
|
2023-07-07 05:23:24 +02:00
|
|
|
{
|
|
|
|
struct mgmt_msg_get_tree *msg;
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct txn_req_get_tree *get_tree;
|
|
|
|
enum mgmt_be_client_id id;
|
2023-12-13 23:32:43 +01:00
|
|
|
ssize_t slen = strlen(xpath);
|
2023-07-07 05:23:24 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* If error in this function below here, be sure to free the req */
|
|
|
|
txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETTREE);
|
|
|
|
get_tree = txn_req->req.get_tree;
|
|
|
|
get_tree->result_type = result_type;
|
2024-01-29 21:50:26 +01:00
|
|
|
get_tree->wd_options = wd_options;
|
2024-01-13 23:53:21 +01:00
|
|
|
get_tree->exact = CHECK_FLAG(flags, GET_DATA_FLAG_EXACT);
|
2024-01-06 10:45:29 +01:00
|
|
|
get_tree->simple_xpath = simple_xpath;
|
2023-07-07 05:23:24 +02:00
|
|
|
get_tree->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
|
2024-01-13 21:51:45 +01:00
|
|
|
|
|
|
|
if (CHECK_FLAG(flags, GET_DATA_FLAG_CONFIG)) {
|
2024-01-26 02:08:55 +01:00
|
|
|
/*
|
|
|
|
* If the requested datastore is operational, get the config
|
|
|
|
* from running.
|
|
|
|
*/
|
2024-01-13 21:51:45 +01:00
|
|
|
struct mgmt_ds_ctx *ds =
|
2024-01-26 02:08:55 +01:00
|
|
|
mgmt_ds_get_ctx_by_id(mm, ds_id == MGMTD_DS_OPERATIONAL
|
|
|
|
? MGMTD_DS_RUNNING
|
|
|
|
: ds_id);
|
2024-01-13 21:51:45 +01:00
|
|
|
struct nb_config *config = mgmt_ds_get_nb_config(ds);
|
|
|
|
|
|
|
|
if (config) {
|
|
|
|
struct ly_set *set = NULL;
|
|
|
|
LY_ERR err;
|
|
|
|
|
|
|
|
err = lyd_find_xpath(config->dnode, xpath, &set);
|
|
|
|
if (err) {
|
|
|
|
get_tree->partial_error = err;
|
|
|
|
goto state;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there's a single result, duplicate the returned
|
|
|
|
* node. If there are multiple results, duplicate the
|
|
|
|
* whole config and mark simple_xpath as false so the
|
|
|
|
* result is trimmed later in txn_get_tree_data_done.
|
|
|
|
*/
|
|
|
|
if (set->count == 1) {
|
|
|
|
err = lyd_dup_single(set->dnodes[0], NULL,
|
|
|
|
LYD_DUP_WITH_PARENTS |
|
|
|
|
LYD_DUP_WITH_FLAGS |
|
|
|
|
LYD_DUP_RECURSIVE,
|
|
|
|
&get_tree->client_results);
|
|
|
|
if (!err)
|
|
|
|
while (get_tree->client_results->parent)
|
|
|
|
get_tree->client_results = lyd_parent(
|
|
|
|
get_tree->client_results);
|
|
|
|
} else if (set->count > 1) {
|
|
|
|
err = lyd_dup_siblings(config->dnode, NULL,
|
|
|
|
LYD_DUP_RECURSIVE |
|
|
|
|
LYD_DUP_WITH_FLAGS,
|
|
|
|
&get_tree->client_results);
|
|
|
|
if (!err)
|
|
|
|
get_tree->simple_xpath = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
get_tree->partial_error = err;
|
|
|
|
|
|
|
|
ly_set_free(set, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
state:
|
|
|
|
/* If we are only getting config, we are done */
|
2024-01-26 02:08:55 +01:00
|
|
|
if (!CHECK_FLAG(flags, GET_DATA_FLAG_STATE) ||
|
|
|
|
ds_id != MGMTD_DS_OPERATIONAL || !clients)
|
2024-01-13 21:51:45 +01:00
|
|
|
return txn_get_tree_data_done(txn, txn_req);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
2023-12-13 23:32:43 +01:00
|
|
|
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, slen + 1,
|
|
|
|
MTYPE_MSG_NATIVE_GET_TREE);
|
|
|
|
msg->refer_id = txn_id;
|
2023-07-07 05:23:24 +02:00
|
|
|
msg->req_id = req_id;
|
|
|
|
msg->code = MGMT_MSG_CODE_GET_TREE;
|
|
|
|
/* Always operate with the binary format in the backend */
|
|
|
|
msg->result_type = LYD_LYB;
|
2023-12-13 23:32:43 +01:00
|
|
|
strlcpy(msg->xpath, xpath, slen + 1);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
assert(clients);
|
|
|
|
FOREACH_BE_CLIENT_BITS (id, clients) {
|
2023-12-13 23:32:43 +01:00
|
|
|
ret = mgmt_be_send_native(id, msg);
|
2023-07-07 05:23:24 +02:00
|
|
|
if (ret) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Could not send get-tree message to backend client %s",
|
|
|
|
mgmt_be_client_id2name(id));
|
2023-07-07 05:23:24 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Sent get-tree req to backend client %s",
|
|
|
|
mgmt_be_client_id2name(id));
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/* record that we sent the request to the client */
|
|
|
|
get_tree->sent_clients |= (1u << id);
|
|
|
|
}
|
|
|
|
|
2023-12-13 23:32:43 +01:00
|
|
|
mgmt_msg_native_free_msg(msg);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
2024-01-28 00:18:10 +01:00
|
|
|
/* Return if we didn't send any messages to backends */
|
|
|
|
if (!get_tree->sent_clients)
|
|
|
|
return txn_get_tree_data_done(txn, txn_req);
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
/* Start timeout timer - pulled out of register event code so we can
|
|
|
|
* pass a different arg
|
|
|
|
*/
|
|
|
|
event_add_timer(mgmt_txn_tm, txn_get_tree_timeout, txn_req,
|
|
|
|
MGMTD_TXN_GET_TREE_MAX_DELAY_SEC,
|
|
|
|
&txn->get_tree_timeout);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-03 20:40:16 +01:00
|
|
|
int mgmt_txn_send_edit(uint64_t txn_id, uint64_t req_id,
|
|
|
|
Mgmtd__DatastoreId ds_id, struct mgmt_ds_ctx *ds_ctx,
|
|
|
|
Mgmtd__DatastoreId commit_ds_id,
|
|
|
|
struct mgmt_ds_ctx *commit_ds_ctx, bool unlock,
|
|
|
|
bool commit, LYD_FORMAT request_type, uint8_t flags,
|
|
|
|
uint8_t operation, const char *xpath, const char *data)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_edit_req *edit;
|
|
|
|
struct nb_config *nb_config;
|
|
|
|
char errstr[BUFSIZ];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
edit = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_edit_req));
|
|
|
|
|
|
|
|
nb_config = mgmt_ds_get_nb_config(ds_ctx);
|
|
|
|
assert(nb_config);
|
|
|
|
|
|
|
|
ret = nb_candidate_edit_tree(nb_config, operation, request_type, xpath,
|
2024-09-17 08:27:31 +02:00
|
|
|
data, &edit->created, edit->xpath_created,
|
|
|
|
errstr, sizeof(errstr));
|
2024-03-03 20:40:16 +01:00
|
|
|
if (ret)
|
|
|
|
goto reply;
|
|
|
|
|
|
|
|
if (commit) {
|
|
|
|
edit->unlock = unlock;
|
|
|
|
|
|
|
|
mgmt_txn_send_commit_config_req(txn_id, req_id, ds_id, ds_ctx,
|
|
|
|
commit_ds_id, commit_ds_ctx,
|
|
|
|
false, false, true, edit);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
reply:
|
|
|
|
mgmt_fe_adapter_send_edit_reply(txn->session_id, txn->txn_id, req_id,
|
2024-09-17 08:27:31 +02:00
|
|
|
unlock, commit, edit->created,
|
|
|
|
edit->xpath_created,
|
2024-09-17 08:27:03 +02:00
|
|
|
errno_from_nb_error(ret), errstr);
|
2024-03-03 20:40:16 +01:00
|
|
|
|
|
|
|
XFREE(MTYPE_MGMTD_TXN_REQ, edit);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-19 20:11:59 +01:00
|
|
|
int mgmt_txn_send_rpc(uint64_t txn_id, uint64_t req_id, uint64_t clients,
|
|
|
|
LYD_FORMAT result_type, const char *xpath,
|
|
|
|
const char *data, size_t data_len)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct mgmt_msg_rpc *msg;
|
|
|
|
struct txn_req_rpc *rpc;
|
|
|
|
uint64_t id;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
if (!txn)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_RPC);
|
|
|
|
rpc = txn_req->req.rpc;
|
|
|
|
rpc->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
|
|
|
|
rpc->result_type = result_type;
|
|
|
|
|
|
|
|
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_rpc, 0,
|
|
|
|
MTYPE_MSG_NATIVE_RPC);
|
|
|
|
msg->refer_id = txn_id;
|
|
|
|
msg->req_id = req_id;
|
|
|
|
msg->code = MGMT_MSG_CODE_RPC;
|
|
|
|
msg->request_type = result_type;
|
|
|
|
|
|
|
|
mgmt_msg_native_xpath_encode(msg, xpath);
|
|
|
|
if (data)
|
|
|
|
mgmt_msg_native_append(msg, data, data_len);
|
|
|
|
|
|
|
|
assert(clients);
|
|
|
|
FOREACH_BE_CLIENT_BITS (id, clients) {
|
|
|
|
ret = mgmt_be_send_native(id, msg);
|
|
|
|
if (ret) {
|
|
|
|
__log_err("Could not send rpc message to backend client %s",
|
|
|
|
mgmt_be_client_id2name(id));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
__dbg("Sent rpc req to backend client %s",
|
|
|
|
mgmt_be_client_id2name(id));
|
|
|
|
|
|
|
|
/* record that we sent the request to the client */
|
|
|
|
rpc->sent_clients |= (1u << id);
|
|
|
|
}
|
|
|
|
|
|
|
|
mgmt_msg_native_free_msg(msg);
|
|
|
|
|
|
|
|
if (!rpc->sent_clients)
|
|
|
|
return txn_rpc_done(txn, txn_req);
|
|
|
|
|
|
|
|
event_add_timer(mgmt_txn_tm, txn_rpc_timeout, txn_req,
|
|
|
|
MGMTD_TXN_RPC_MAX_DELAY_SEC, &txn->rpc_timeout);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-01-08 16:34:57 +01:00
|
|
|
int mgmt_txn_send_notify_selectors(uint64_t req_id, uint64_t clients, const char **selectors)
|
|
|
|
{
|
|
|
|
struct mgmt_msg_notify_select *msg;
|
|
|
|
char **all_selectors = NULL;
|
|
|
|
uint64_t id;
|
|
|
|
int ret;
|
|
|
|
uint i;
|
|
|
|
|
|
|
|
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_notify_select, 0,
|
|
|
|
MTYPE_MSG_NATIVE_NOTIFY_SELECT);
|
|
|
|
msg->refer_id = MGMTD_TXN_ID_NONE;
|
|
|
|
msg->req_id = req_id;
|
|
|
|
msg->code = MGMT_MSG_CODE_NOTIFY_SELECT;
|
|
|
|
msg->replace = selectors == NULL;
|
|
|
|
|
|
|
|
if (selectors == NULL) {
|
|
|
|
/* Get selectors for all sessions */
|
|
|
|
all_selectors = mgmt_fe_get_all_selectors();
|
|
|
|
selectors = (const char **)all_selectors;
|
|
|
|
}
|
|
|
|
|
|
|
|
darr_foreach_i (selectors, i)
|
|
|
|
mgmt_msg_native_add_str(msg, selectors[i]);
|
|
|
|
|
|
|
|
assert(clients);
|
|
|
|
FOREACH_BE_CLIENT_BITS (id, clients) {
|
|
|
|
/* make sure the backend is running/connected */
|
|
|
|
if (!mgmt_be_get_adapter_by_id(id))
|
|
|
|
continue;
|
|
|
|
ret = mgmt_be_send_native(id, msg);
|
|
|
|
if (ret) {
|
|
|
|
__log_err("Could not send notify-select message to backend client %s",
|
|
|
|
mgmt_be_client_id2name(id));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
__dbg("Sent notify-select req to backend client %s", mgmt_be_client_id2name(id));
|
|
|
|
}
|
|
|
|
mgmt_msg_native_free_msg(msg);
|
|
|
|
|
|
|
|
if (all_selectors)
|
|
|
|
darr_free_free(all_selectors);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-07-07 05:23:24 +02:00
|
|
|
/*
|
|
|
|
* Error reply from the backend client.
|
|
|
|
*/
|
|
|
|
int mgmt_txn_notify_error(struct mgmt_be_client_adapter *adapter,
|
|
|
|
uint64_t txn_id, uint64_t req_id, int error,
|
|
|
|
const char *errstr)
|
|
|
|
{
|
|
|
|
enum mgmt_be_client_id id = adapter->id;
|
|
|
|
struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
struct txn_req_get_tree *get_tree;
|
2024-03-19 20:11:59 +01:00
|
|
|
struct txn_req_rpc *rpc;
|
2023-07-07 05:23:24 +02:00
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
|
|
|
|
if (!txn) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Error reply from %s cannot find txn-id %" PRIu64,
|
|
|
|
adapter->name, txn_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the request. */
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
|
|
|
|
if (txn_req->req_id == req_id)
|
|
|
|
break;
|
2024-03-19 20:11:59 +01:00
|
|
|
if (!txn_req)
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->rpc_reqs, txn_req)
|
|
|
|
if (txn_req->req_id == req_id)
|
|
|
|
break;
|
2023-07-07 05:23:24 +02:00
|
|
|
if (!txn_req) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Error reply from %s for txn-id %" PRIu64
|
|
|
|
" cannot find req_id %" PRIu64,
|
|
|
|
adapter->name, txn_id, req_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("Error reply from %s for txn-id %" PRIu64 " req_id %" PRIu64,
|
|
|
|
adapter->name, txn_id, req_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
switch (txn_req->req_event) {
|
|
|
|
case MGMTD_TXN_PROC_GETTREE:
|
|
|
|
get_tree = txn_req->req.get_tree;
|
|
|
|
get_tree->recv_clients |= (1u << id);
|
|
|
|
get_tree->partial_error = error;
|
|
|
|
|
|
|
|
/* check if done yet */
|
|
|
|
if (get_tree->recv_clients != get_tree->sent_clients)
|
|
|
|
return 0;
|
|
|
|
return txn_get_tree_data_done(txn, txn_req);
|
2024-03-19 20:11:59 +01:00
|
|
|
case MGMTD_TXN_PROC_RPC:
|
|
|
|
rpc = txn_req->req.rpc;
|
|
|
|
rpc->recv_clients |= (1u << id);
|
2024-05-07 04:40:27 +02:00
|
|
|
if (errstr) {
|
|
|
|
XFREE(MTYPE_MGMTD_ERR, rpc->errstr);
|
|
|
|
rpc->errstr = XSTRDUP(MTYPE_MGMTD_ERR, errstr);
|
|
|
|
}
|
2024-03-19 20:11:59 +01:00
|
|
|
/* check if done yet */
|
|
|
|
if (rpc->recv_clients != rpc->sent_clients)
|
|
|
|
return 0;
|
|
|
|
return txn_rpc_done(txn, txn_req);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/* non-native message events */
|
|
|
|
case MGMTD_TXN_PROC_SETCFG:
|
|
|
|
case MGMTD_TXN_PROC_COMMITCFG:
|
|
|
|
case MGMTD_TXN_PROC_GETCFG:
|
|
|
|
case MGMTD_TXN_COMMITCFG_TIMEOUT:
|
|
|
|
default:
|
2024-08-14 22:53:09 +02:00
|
|
|
assert(!"non-native req event in native error path");
|
2023-07-07 05:23:24 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get-tree data from the backend client.
|
|
|
|
*/
|
|
|
|
int mgmt_txn_notify_tree_data_reply(struct mgmt_be_client_adapter *adapter,
|
|
|
|
struct mgmt_msg_tree_data *data_msg,
|
|
|
|
size_t msg_len)
|
|
|
|
{
|
2023-12-13 23:32:43 +01:00
|
|
|
uint64_t txn_id = data_msg->refer_id;
|
2023-07-07 05:23:24 +02:00
|
|
|
uint64_t req_id = data_msg->req_id;
|
|
|
|
|
|
|
|
enum mgmt_be_client_id id = adapter->id;
|
|
|
|
struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct txn_req_get_tree *get_tree;
|
|
|
|
struct lyd_node *tree = NULL;
|
|
|
|
LY_ERR err;
|
|
|
|
|
|
|
|
if (!txn) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("GETTREE reply from %s for a missing txn-id %" PRIu64,
|
|
|
|
adapter->name, txn_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the request. */
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
|
|
|
|
if (txn_req->req_id == req_id)
|
|
|
|
break;
|
|
|
|
if (!txn_req) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("GETTREE reply from %s for txn-id %" PRIu64
|
|
|
|
" missing req_id %" PRIu64,
|
|
|
|
adapter->name, txn_id, req_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
get_tree = txn_req->req.get_tree;
|
|
|
|
|
|
|
|
/* store the result */
|
|
|
|
err = lyd_parse_data_mem(ly_native_ctx, (const char *)data_msg->result,
|
|
|
|
data_msg->result_type,
|
|
|
|
LYD_PARSE_STRICT | LYD_PARSE_ONLY,
|
|
|
|
0 /*LYD_VALIDATE_OPERATIONAL*/, &tree);
|
|
|
|
if (err) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("GETTREE reply from %s for txn-id %" PRIu64
|
|
|
|
" req_id %" PRIu64 " error parsing result of type %u",
|
|
|
|
adapter->name, txn_id, req_id, data_msg->result_type);
|
2023-07-07 05:23:24 +02:00
|
|
|
}
|
|
|
|
if (!err) {
|
|
|
|
/* TODO: we could merge ly_errs here if it's not binary */
|
|
|
|
|
|
|
|
if (!get_tree->client_results)
|
|
|
|
get_tree->client_results = tree;
|
|
|
|
else
|
|
|
|
err = lyd_merge_siblings(&get_tree->client_results,
|
|
|
|
tree, LYD_MERGE_DESTRUCT);
|
|
|
|
if (err) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err("GETTREE reply from %s for txn-id %" PRIu64
|
|
|
|
" req_id %" PRIu64 " error merging result",
|
|
|
|
adapter->name, txn_id, req_id);
|
2023-07-07 05:23:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!get_tree->partial_error)
|
|
|
|
get_tree->partial_error = (data_msg->partial_error
|
|
|
|
? data_msg->partial_error
|
|
|
|
: (int)err);
|
|
|
|
|
2023-10-30 10:09:19 +01:00
|
|
|
if (!data_msg->more)
|
|
|
|
get_tree->recv_clients |= (1u << id);
|
2023-07-07 05:23:24 +02:00
|
|
|
|
|
|
|
/* check if done yet */
|
|
|
|
if (get_tree->recv_clients != get_tree->sent_clients)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return txn_get_tree_data_done(txn, txn_req);
|
|
|
|
}
|
|
|
|
|
2024-03-19 20:11:59 +01:00
|
|
|
int mgmt_txn_notify_rpc_reply(struct mgmt_be_client_adapter *adapter,
|
|
|
|
struct mgmt_msg_rpc_reply *reply_msg,
|
|
|
|
size_t msg_len)
|
|
|
|
{
|
|
|
|
uint64_t txn_id = reply_msg->refer_id;
|
|
|
|
uint64_t req_id = reply_msg->req_id;
|
|
|
|
enum mgmt_be_client_id id = adapter->id;
|
|
|
|
struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
struct txn_req_rpc *rpc;
|
2024-05-07 04:40:27 +02:00
|
|
|
struct lyd_node *tree;
|
2024-03-19 20:11:59 +01:00
|
|
|
size_t data_len = msg_len - sizeof(*reply_msg);
|
2024-05-07 04:40:27 +02:00
|
|
|
LY_ERR err = LY_SUCCESS;
|
2024-03-19 20:11:59 +01:00
|
|
|
|
|
|
|
if (!txn) {
|
|
|
|
__log_err("RPC reply from %s for a missing txn-id %" PRIu64,
|
|
|
|
adapter->name, txn_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the request. */
|
|
|
|
FOREACH_TXN_REQ_IN_LIST (&txn->rpc_reqs, txn_req)
|
|
|
|
if (txn_req->req_id == req_id)
|
|
|
|
break;
|
|
|
|
if (!txn_req) {
|
|
|
|
__log_err("RPC reply from %s for txn-id %" PRIu64
|
|
|
|
" missing req_id %" PRIu64,
|
|
|
|
adapter->name, txn_id, req_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rpc = txn_req->req.rpc;
|
|
|
|
|
2024-05-07 04:40:27 +02:00
|
|
|
tree = NULL;
|
|
|
|
if (data_len)
|
2024-03-19 20:11:59 +01:00
|
|
|
err = yang_parse_rpc(rpc->xpath, reply_msg->result_type,
|
2024-05-07 04:40:27 +02:00
|
|
|
reply_msg->data, true, &tree);
|
|
|
|
if (err) {
|
|
|
|
__log_err("RPC reply from %s for txn-id %" PRIu64
|
|
|
|
" req_id %" PRIu64 " error parsing result of type %u: %s",
|
|
|
|
adapter->name, txn_id, req_id, reply_msg->result_type,
|
|
|
|
ly_strerrcode(err));
|
|
|
|
}
|
|
|
|
if (!err && tree) {
|
|
|
|
if (!rpc->client_results)
|
|
|
|
rpc->client_results = tree;
|
|
|
|
else
|
|
|
|
err = lyd_merge_siblings(&rpc->client_results, tree,
|
|
|
|
LYD_MERGE_DESTRUCT);
|
2024-03-19 20:11:59 +01:00
|
|
|
if (err) {
|
|
|
|
__log_err("RPC reply from %s for txn-id %" PRIu64
|
2024-05-07 04:40:27 +02:00
|
|
|
" req_id %" PRIu64 " error merging result: %s",
|
2024-03-19 20:11:59 +01:00
|
|
|
adapter->name, txn_id, req_id,
|
2024-05-07 04:40:27 +02:00
|
|
|
ly_strerrcode(err));
|
2024-03-19 20:11:59 +01:00
|
|
|
}
|
|
|
|
}
|
2024-05-07 04:40:27 +02:00
|
|
|
if (err) {
|
|
|
|
XFREE(MTYPE_MGMTD_ERR, rpc->errstr);
|
|
|
|
rpc->errstr = XSTRDUP(MTYPE_MGMTD_ERR,
|
|
|
|
"Cannot parse result from the backend");
|
|
|
|
}
|
2024-03-19 20:11:59 +01:00
|
|
|
|
|
|
|
rpc->recv_clients |= (1u << id);
|
|
|
|
|
|
|
|
/* check if done yet */
|
|
|
|
if (rpc->recv_clients != rpc->sent_clients)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return txn_rpc_done(txn, txn_req);
|
|
|
|
}
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
void mgmt_txn_status_write(struct vty *vty)
|
|
|
|
{
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
|
|
|
|
vty_out(vty, "MGMTD Transactions\n");
|
|
|
|
|
|
|
|
FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
|
2023-05-15 06:11:46 +02:00
|
|
|
vty_out(vty, " Txn: \t\t\t0x%p\n", txn);
|
|
|
|
vty_out(vty, " Txn-Id: \t\t\t%" PRIu64 "\n", txn->txn_id);
|
|
|
|
vty_out(vty, " Session-Id: \t\t%" PRIu64 "\n",
|
|
|
|
txn->session_id);
|
2021-10-28 09:07:11 +02:00
|
|
|
vty_out(vty, " Type: \t\t\t%s\n",
|
|
|
|
mgmt_txn_type2str(txn->type));
|
|
|
|
vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
|
|
|
|
}
|
|
|
|
vty_out(vty, " Total: %d\n",
|
|
|
|
(int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
|
|
|
|
}
|
|
|
|
|
|
|
|
int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
|
|
|
|
struct mgmt_ds_ctx *dst_ds_ctx)
|
|
|
|
{
|
|
|
|
static struct nb_config_cbs changes;
|
2023-06-18 22:19:54 +02:00
|
|
|
static struct mgmt_commit_stats dummy_stats;
|
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
struct nb_config_cbs *cfg_chgs = NULL;
|
|
|
|
struct mgmt_txn_ctx *txn;
|
|
|
|
struct mgmt_txn_req *txn_req;
|
|
|
|
|
|
|
|
memset(&changes, 0, sizeof(changes));
|
|
|
|
memset(&dummy_stats, 0, sizeof(dummy_stats));
|
|
|
|
/*
|
|
|
|
* This could be the case when the config is directly
|
|
|
|
* loaded onto the candidate DS from a file. Get the
|
|
|
|
* diff from a full comparison of the candidate and
|
|
|
|
* running DSs.
|
|
|
|
*/
|
|
|
|
nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
|
|
|
|
mgmt_ds_get_nb_config(src_ds_ctx), &changes);
|
|
|
|
cfg_chgs = &changes;
|
|
|
|
|
|
|
|
if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
|
|
|
|
/*
|
|
|
|
* This means there's no changes to commit whatsoever
|
|
|
|
* is the source of the changes in config.
|
|
|
|
*/
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a CONFIG transaction to push the config changes
|
|
|
|
* provided to the backend client.
|
|
|
|
*/
|
|
|
|
txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
|
|
|
|
if (!txn) {
|
2024-01-31 01:50:52 +01:00
|
|
|
__log_err(
|
2021-10-28 09:07:11 +02:00
|
|
|
"Failed to create CONFIG Transaction for downloading CONFIGs");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2024-01-31 01:50:52 +01:00
|
|
|
__dbg("Created rollback txn-id: %" PRIu64, txn->txn_id);
|
2023-03-08 23:11:43 +01:00
|
|
|
|
2021-10-28 09:07:11 +02:00
|
|
|
/*
|
|
|
|
* Set the changeset for transaction to commit and trigger the commit
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
|
|
|
|
txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
|
|
|
|
txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
|
|
|
|
txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
|
|
|
|
txn_req->req.commit_cfg.validate_only = false;
|
|
|
|
txn_req->req.commit_cfg.abort = false;
|
|
|
|
txn_req->req.commit_cfg.rollback = true;
|
|
|
|
txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
|
|
|
|
txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trigger a COMMIT-CONFIG process.
|
|
|
|
*/
|
|
|
|
mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
|
|
|
|
return 0;
|
|
|
|
}
|