Merge pull request #15154 from idryzhov/mgmt-get-data

mgmtd get-data request expansion
This commit is contained in:
Christian Hopps 2024-01-15 07:03:34 -05:00 committed by GitHub
commit f2bb687426
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 287 additions and 56 deletions

View file

@ -50,7 +50,8 @@ Besides the common invocation options (:ref:`common-invocation-options`), the
When *Zebra* starts with this option, the VRF backend is based on Linux
network namespaces. That implies that all network namespaces discovered by
ZEBRA will create an associated VRF. The other daemons will operate on the VRF
VRF defined by *Zebra*, as usual.
VRF defined by *Zebra*, as usual. If this option is specified when running
*Zebra*, one must also specify the same option for *mgmtd*.
.. seealso:: :ref:`zebra-vrf`

View file

@ -306,25 +306,26 @@ int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
}
/*
* Send get-tree request.
* Send get-data request.
*/
int mgmt_fe_send_get_tree_req(struct mgmt_fe_client *client,
uint64_t session_id, uint64_t req_id,
LYD_FORMAT result_type, const char *xpath)
int mgmt_fe_send_get_data_req(struct mgmt_fe_client *client, uint64_t session_id,
uint64_t req_id, LYD_FORMAT result_type,
uint8_t flags, const char *xpath)
{
struct mgmt_msg_get_tree *msg;
struct mgmt_msg_get_data *msg;
size_t xplen = strlen(xpath);
int ret;
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, xplen + 1,
MTYPE_MSG_NATIVE_GET_TREE);
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_data, xplen + 1,
MTYPE_MSG_NATIVE_GET_DATA);
msg->refer_id = session_id;
msg->req_id = req_id;
msg->code = MGMT_MSG_CODE_GET_TREE;
msg->code = MGMT_MSG_CODE_GET_DATA;
msg->result_type = result_type;
msg->flags = flags;
strlcpy(msg->xpath, xpath, xplen + 1);
MGMTD_FE_CLIENT_DBG("Sending GET_TREE_REQ session-id %" PRIu64
MGMTD_FE_CLIENT_DBG("Sending GET_DATA_REQ session-id %" PRIu64
" req-id %" PRIu64 " xpath: %s",
session_id, req_id, xpath);

View file

@ -15,6 +15,7 @@ extern "C" {
#include "mgmt_pb.h"
#include "frrevent.h"
#include "mgmt_defines.h"
#include "mgmt_msg_native.h"
/***************************************************************
* Macros
@ -367,7 +368,7 @@ extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
int num_reqs);
/*
* Send GET-TREE to MGMTD daemon.
* Send GET-DATA to MGMTD daemon.
*
* client
* Client object.
@ -381,15 +382,19 @@ extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
* result_type
* The LYD_FORMAT of the result.
*
* flags
* Flags to control the behavior of the request.
*
* xpath
* the xpath to get.
*
* Returns:
* 0 on success, otherwise msg_conn_send_msg() return values.
*/
extern int mgmt_fe_send_get_tree_req(struct mgmt_fe_client *client,
extern int mgmt_fe_send_get_data_req(struct mgmt_fe_client *client,
uint64_t session_id, uint64_t req_id,
LYD_FORMAT result_type, const char *xpath);
LYD_FORMAT result_type, uint8_t flags,
const char *xpath);
/*
* Destroy library and cleanup everything.

View file

@ -13,6 +13,7 @@ DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_MSG, "native mgmt msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_ERROR, "native error msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_TREE, "native get tree msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_TREE_DATA, "native tree data msg");
DEFINE_MTYPE(MSG_NATIVE, MSG_NATIVE_GET_DATA, "native get data msg");
int vmgmt_msg_native_send_error(struct msg_conn *conn, uint64_t sess_or_txn_id,
uint64_t req_id, bool short_circuit_ok,

View file

@ -142,6 +142,7 @@ DECLARE_MTYPE(MSG_NATIVE_MSG);
DECLARE_MTYPE(MSG_NATIVE_ERROR);
DECLARE_MTYPE(MSG_NATIVE_GET_TREE);
DECLARE_MTYPE(MSG_NATIVE_TREE_DATA);
DECLARE_MTYPE(MSG_NATIVE_GET_DATA);
/*
* Native message codes
@ -149,6 +150,7 @@ DECLARE_MTYPE(MSG_NATIVE_TREE_DATA);
#define MGMT_MSG_CODE_ERROR 0
#define MGMT_MSG_CODE_GET_TREE 1
#define MGMT_MSG_CODE_TREE_DATA 2
#define MGMT_MSG_CODE_GET_DATA 3
/**
* struct mgmt_msg_header - Header common to all native messages.
@ -193,7 +195,7 @@ _Static_assert(sizeof(struct mgmt_msg_error) ==
"Size mismatch");
/**
* struct mgmt_msg_get_tree - Message carrying xpath query request.
* struct mgmt_msg_get_tree - backend oper data request.
*
* @result_type: ``LYD_FORMAT`` for the returned result.
* @xpath: the query for the data to return.
@ -231,6 +233,30 @@ _Static_assert(sizeof(struct mgmt_msg_tree_data) ==
offsetof(struct mgmt_msg_tree_data, result),
"Size mismatch");
/* Flags for get-data request */
#define GET_DATA_FLAG_STATE 0x01 /* get only "config false" data */
#define GET_DATA_FLAG_CONFIG 0x02 /* get only "config true" data */
#define GET_DATA_FLAG_EXACT 0x04 /* get exact data node instead of the full tree */
/**
* struct mgmt_msg_get_data - frontend get-data request.
*
* @result_type: ``LYD_FORMAT`` for the returned result.
* @flags: combination of ``GET_DATA_FLAG_*`` flags.
* @xpath: the query for the data to return.
*/
struct mgmt_msg_get_data {
struct mgmt_msg_header;
uint8_t result_type;
uint8_t flags;
uint8_t resv2[6];
alignas(8) char xpath[];
};
_Static_assert(sizeof(struct mgmt_msg_get_data) ==
offsetof(struct mgmt_msg_get_data, xpath),
"Size mismatch");
#define MGMT_MSG_VALIDATE_NUL_TERM(msgp, len) \
((len) >= sizeof(*msg) + 1 && ((char *)msgp)[(len)-1] == 0)

View file

@ -515,8 +515,18 @@ static enum nb_error nb_op_ys_init_node_infos(struct nb_op_yield_state *ys)
/* Move up to the container if on a leaf currently. */
if (node &&
!CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST))
!CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST)) {
struct lyd_node *leaf = node;
node = &node->parent->node;
/*
* If the leaf is not a key, delete it, because it has a wrong
* empty value.
*/
if (!lysc_is_key(leaf->schema))
lyd_free_tree(leaf);
}
assert(!node ||
CHECK_FLAG(node->schema->nodetype, LYS_CONTAINER | LYS_LIST));
if (!node)

View file

@ -4105,23 +4105,24 @@ int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
return 0;
}
int vty_mgmt_send_get_tree_req(struct vty *vty, LYD_FORMAT result_type,
const char *xpath)
int vty_mgmt_send_get_data_req(struct vty *vty, LYD_FORMAT result_type,
uint8_t flags, const char *xpath)
{
LYD_FORMAT intern_format = result_type;
vty->mgmt_req_id++;
if (mgmt_fe_send_get_tree_req(mgmt_fe_client, vty->mgmt_session_id,
vty->mgmt_req_id, intern_format, xpath)) {
zlog_err("Failed to send GET-TREE to MGMTD session-id: %" PRIu64
if (mgmt_fe_send_get_data_req(mgmt_fe_client, vty->mgmt_session_id,
vty->mgmt_req_id, intern_format, flags,
xpath)) {
zlog_err("Failed to send GET-DATA to MGMTD session-id: %" PRIu64
" req-id %" PRIu64 ".",
vty->mgmt_session_id, vty->mgmt_req_id);
vty_out(vty, "Failed to send GET-TREE to MGMTD!\n");
vty_out(vty, "Failed to send GET-DATA to MGMTD!\n");
return -1;
}
vty->mgmt_req_pending_cmd = "MESSAGE_GET_TREE_REQ";
vty->mgmt_req_pending_cmd = "MESSAGE_GET_DATA_REQ";
vty->mgmt_req_pending_data = result_type;
return 0;

View file

@ -420,8 +420,8 @@ extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
extern int vty_mgmt_send_get_req(struct vty *vty, bool is_config,
Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req);
extern int vty_mgmt_send_get_tree_req(struct vty *vty, LYD_FORMAT result_type,
const char *xpath);
extern int vty_mgmt_send_get_data_req(struct vty *vty, LYD_FORMAT result_type,
uint8_t flags, const char *xpath);
extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
bool lock, bool scok);
extern void vty_mgmt_resume_response(struct vty *vty, int ret);

View file

@ -1089,7 +1089,7 @@ LY_ERR yang_lyd_trim_xpath(struct lyd_node **root, const char *xpath)
}
return LY_SUCCESS;
#else
struct lyd_node *node;
struct lyd_node *node, *sib;
struct lyd_node **remove = NULL;
struct ly_set *set = NULL;
uint32_t i;
@ -1123,18 +1123,21 @@ LY_ERR yang_lyd_trim_xpath(struct lyd_node **root, const char *xpath)
}
darr_ensure_cap(remove, 128);
LYD_TREE_DFS_BEGIN (*root, node) {
/*
* If this is a direct matching node then include it's subtree
* which won't be marked and would otherwise be removed.
*/
if (node->priv == (void *)2)
LYD_TREE_DFS_continue = 1;
else if (!node->priv) {
*darr_append(remove) = node;
LYD_TREE_DFS_continue = 1;
LY_LIST_FOR(*root, sib) {
LYD_TREE_DFS_BEGIN (sib, node) {
/*
* If this is a direct matching node then include its
* subtree which won't be marked and would otherwise
* be removed.
*/
if (node->priv == (void *)2)
LYD_TREE_DFS_continue = 1;
else if (!node->priv) {
*darr_append(remove) = node;
LYD_TREE_DFS_continue = 1;
}
LYD_TREE_DFS_END(sib, node);
}
LYD_TREE_DFS_END(*root, node);
}
darr_foreach_i (remove, i) {
if (remove[i] == *root)

View file

@ -1132,15 +1132,15 @@ done:
}
/**
* fe_adapter_handle_get_tree() - Handle a get-tree message from a FE client.
* fe_adapter_handle_get_data() - Handle a get-tree message from a FE client.
* @session: the client session.
* @msg_raw: the message data.
* @msg_len: the length of the message data.
*/
static void fe_adapter_handle_get_tree(struct mgmt_fe_session_ctx *session,
static void fe_adapter_handle_get_data(struct mgmt_fe_session_ctx *session,
void *__msg, size_t msg_len)
{
struct mgmt_msg_get_tree *msg = __msg;
struct mgmt_msg_get_data *msg = __msg;
struct lysc_node **snodes = NULL;
char *xpath_resolved = NULL;
uint64_t req_id = msg->req_id;
@ -1149,7 +1149,7 @@ static void fe_adapter_handle_get_tree(struct mgmt_fe_session_ctx *session,
LY_ERR err;
int ret;
MGMTD_FE_ADAPTER_DBG("Received get-tree request from client %s for session-id %" PRIu64
MGMTD_FE_ADAPTER_DBG("Received get-data request from client %s for session-id %" PRIu64
" req-id %" PRIu64,
session->adapter->name, session->session_id,
msg->req_id);
@ -1181,7 +1181,7 @@ static void fe_adapter_handle_get_tree(struct mgmt_fe_session_ctx *session,
darr_free(snodes);
clients = mgmt_be_interested_clients(msg->xpath, false);
if (!clients) {
if (!clients && !CHECK_FLAG(msg->flags, GET_DATA_FLAG_CONFIG)) {
MGMTD_FE_ADAPTER_DBG("No backends provide xpath: %s for txn-id: %" PRIu64
" session-id: %" PRIu64,
msg->xpath, session->txn_id,
@ -1207,8 +1207,8 @@ static void fe_adapter_handle_get_tree(struct mgmt_fe_session_ctx *session,
/* Create a GET-TREE request under the transaction */
ret = mgmt_txn_send_get_tree_oper(session->txn_id, req_id, clients,
msg->result_type, simple_xpath,
msg->xpath);
msg->result_type, msg->flags,
simple_xpath, msg->xpath);
if (ret) {
/* destroy the just created txn */
mgmt_destroy_txn(&session->txn_id);
@ -1238,8 +1238,8 @@ static void fe_adapter_handle_native_msg(struct mgmt_fe_client_adapter *adapter,
assert(session->adapter == adapter);
switch (msg->code) {
case MGMT_MSG_CODE_GET_TREE:
fe_adapter_handle_get_tree(session, msg, msg_len);
case MGMT_MSG_CODE_GET_DATA:
fe_adapter_handle_get_data(session, msg, msg_len);
break;
default:
MGMTD_FE_ADAPTER_ERR("unknown native message session-id %" PRIu64

View file

@ -22,6 +22,7 @@ static const struct option longopts[] = {
{"skip_runas", no_argument, NULL, 'S'},
{"no_zebra", no_argument, NULL, 'Z'},
{"socket_size", required_argument, NULL, 's'},
{"vrfwnetns", no_argument, NULL, 'n'},
{0}};
static void mgmt_exit(int);
@ -237,6 +238,9 @@ int main(int argc, char **argv)
case 's':
buffer_size = atoi(optarg);
break;
case 'n':
vrf_configure_backend(VRF_BACKEND_NETNS);
break;
default:
frr_help_exit(1);
break;
@ -249,6 +253,9 @@ int main(int argc, char **argv)
/* VRF commands initialization. */
vrf_cmd_init(NULL);
/* Interface commands initialization. */
if_cmd_init(NULL);
/* MGMTD related initialization. */
mgmt_init();

View file

@ -176,6 +176,7 @@ struct txn_req_get_tree {
uint64_t recv_clients; /* Bitmask of clients recv reply from */
int32_t partial_error; /* an error while gather results */
uint8_t result_type; /* LYD_FORMAT for results */
uint8_t exact; /* if exact node is requested */
uint8_t simple_xpath; /* if xpath is simple */
struct lyd_node *client_results; /* result tree from clients */
};
@ -1258,6 +1259,7 @@ static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn,
{
struct txn_req_get_tree *get_tree = txn_req->req.get_tree;
uint64_t req_id = txn_req->req_id;
struct lyd_node *result;
int ret = NB_OK;
/* cancel timer and send reply onward */
@ -1272,12 +1274,17 @@ static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn,
ret = NB_ERR;
}
result = get_tree->client_results;
if (ret == NB_OK && result && get_tree->exact)
result = yang_dnode_get(result, get_tree->xpath);
if (ret == NB_OK)
ret = mgmt_fe_adapter_send_tree_data(txn->session_id,
txn->txn_id,
txn_req->req_id,
get_tree->result_type,
get_tree->client_results,
result,
get_tree->partial_error,
false);
@ -2364,7 +2371,8 @@ int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
*/
int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
uint64_t clients, LYD_FORMAT result_type,
bool simple_xpath, const char *xpath)
uint8_t flags, bool simple_xpath,
const char *xpath)
{
struct mgmt_msg_get_tree *msg;
struct mgmt_txn_ctx *txn;
@ -2382,9 +2390,61 @@ int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETTREE);
get_tree = txn_req->req.get_tree;
get_tree->result_type = result_type;
get_tree->exact = CHECK_FLAG(flags, GET_DATA_FLAG_EXACT);
get_tree->simple_xpath = simple_xpath;
get_tree->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
if (CHECK_FLAG(flags, GET_DATA_FLAG_CONFIG)) {
struct mgmt_ds_ctx *ds =
mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
struct nb_config *config = mgmt_ds_get_nb_config(ds);
if (config) {
struct ly_set *set = NULL;
LY_ERR err;
err = lyd_find_xpath(config->dnode, xpath, &set);
if (err) {
get_tree->partial_error = err;
goto state;
}
/*
* If there's a single result, duplicate the returned
* node. If there are multiple results, duplicate the
* whole config and mark simple_xpath as false so the
* result is trimmed later in txn_get_tree_data_done.
*/
if (set->count == 1) {
err = lyd_dup_single(set->dnodes[0], NULL,
LYD_DUP_WITH_PARENTS |
LYD_DUP_WITH_FLAGS |
LYD_DUP_RECURSIVE,
&get_tree->client_results);
if (!err)
while (get_tree->client_results->parent)
get_tree->client_results = lyd_parent(
get_tree->client_results);
} else if (set->count > 1) {
err = lyd_dup_siblings(config->dnode, NULL,
LYD_DUP_RECURSIVE |
LYD_DUP_WITH_FLAGS,
&get_tree->client_results);
if (!err)
get_tree->simple_xpath = false;
}
if (err)
get_tree->partial_error = err;
ly_set_free(set, NULL);
}
}
state:
/* If we are only getting config, we are done */
if (!CHECK_FLAG(flags, GET_DATA_FLAG_STATE) || !clients)
return txn_get_tree_data_done(txn, txn_req);
msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, slen + 1,
MTYPE_MSG_NATIVE_GET_TREE);
msg->refer_id = txn_id;

View file

@ -203,6 +203,7 @@ extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
* req_id: FE client request identifier.
* clients: Bitmask of clients to send get-tree to.
* result_type: LYD_FORMAT result format.
* flags: option flags for the request.
* simple_xpath: true if xpath is simple (only key predicates).
* xpath: The xpath to get the tree from.
*
@ -211,7 +212,8 @@ extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
*/
extern int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
uint64_t clients, LYD_FORMAT result_type,
bool simple_xpath, const char *xpath);
uint8_t flags, bool simple_xpath,
const char *xpath);
/*
* Notifiy backend adapter on connection.

View file

@ -251,17 +251,27 @@ DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
}
DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
"show mgmt get-data WORD$path [json|xml]$fmt",
"show mgmt get-data WORD$path [with-config|only-config]$content [exact]$exact [json|xml]$fmt",
SHOW_STR
MGMTD_STR
"Get a data from the operational datastore\n"
"XPath expression specifying the YANG data root\n"
"Include \"config true\" data\n"
"Get only \"config true\" data\n"
"Get exact node instead of the whole data tree\n"
"JSON output format\n"
"XML output format\n")
{
LYD_FORMAT format = (fmt && fmt[0] == 'x') ? LYD_XML : LYD_JSON;
int plen = strlen(path);
char *xpath = NULL;
uint8_t flags = content ? GET_DATA_FLAG_CONFIG : GET_DATA_FLAG_STATE;
if (content && content[0] == 'w')
flags |= GET_DATA_FLAG_STATE;
if (exact)
flags |= GET_DATA_FLAG_EXACT;
/* get rid of extraneous trailing slash-* or single '/' unless root */
if (plen > 2 && ((path[plen - 2] == '/' && path[plen - 1] == '*') ||
@ -272,7 +282,7 @@ DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
path = xpath;
}
vty_mgmt_send_get_tree_req(vty, format, path);
vty_mgmt_send_get_data_req(vty, format, flags, path);
if (xpath)
XFREE(MTYPE_TMP, xpath);

View file

@ -84,6 +84,7 @@ def setup_module(mod):
router.net.set_intf_netns(rname + "-eth2", ns, up=True)
for rname, router in router_list.items():
router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns")
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format(rname)),

View file

@ -132,6 +132,7 @@ def setup_module(mod):
for rname, router in router_list.items():
if rname == "r1":
router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns")
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format(rname)),

View file

@ -94,6 +94,7 @@ def setup_module(module):
router.net.set_intf_netns("r1-eth0", ns, up=True)
# run daemons
router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns")
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format("r1")),
@ -205,7 +206,6 @@ def test_bgp_vrf_netns():
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
ret = pytest.main(args)

View file

@ -66,7 +66,7 @@ def do_oper_test(tgen, query_results):
r1 = tgen.gears["r1"].net
qcmd = (
r"vtysh -c 'show mgmt get-data {}' "
r"vtysh -c 'show mgmt get-data {} {}' "
r"""| sed -e 's/"phy-address": ".*"/"phy-address": "rubout"/'"""
r"""| sed -e 's/"uptime": ".*"/"uptime": "rubout"/'"""
r"""| sed -e 's/"vrf": "[0-9]*"/"vrf": "rubout"/'"""
@ -81,7 +81,7 @@ def do_oper_test(tgen, query_results):
if doreset:
doreset = False
expected = open(qr[1], encoding="ascii").read()
output = r1.cmd_nostatus(qcmd.format(qr[0]))
output = r1.cmd_nostatus(qcmd.format(qr[0], qr[2] if len(qr) > 2 else ""))
try:
ojson = json.loads(output)

View file

@ -14,10 +14,12 @@ debug mgmt client backend
interface r1-eth0
ip address 1.1.1.1/24
description r1-eth0-desc
exit
interface r1-eth1 vrf red
ip address 3.3.3.1/24
description r1-eth1-desc
exit
ip route 11.11.11.11/32 1.1.1.2
!ip route 13.13.13.13/32 3.3.3.2 vrf red
!ip route 13.13.13.13/32 3.3.3.2 vrf red

View file

@ -0,0 +1,14 @@
{
"frr-interface:lib": {
"interface": [
{
"name": "r1-eth0",
"description": "r1-eth0-desc"
},
{
"name": "r1-eth1",
"description": "r1-eth1-desc"
}
]
}
}

View file

@ -0,0 +1,3 @@
{
"frr-interface:description": "r1-eth0-desc"
}

View file

@ -0,0 +1,22 @@
{
"frr-interface:interface": [
{
"name": "r1-eth0",
"vrf": "default",
"state": {
"if-index": "rubout",
"mtu": 1500,
"mtu6": 1500,
"speed": 10000,
"metric": 0,
"phy-address": "rubout"
},
"frr-zebra:zebra": {
"state": {
"up-count": 0,
"down-count": 0
}
}
}
]
}

View file

@ -0,0 +1,10 @@
{
"frr-interface:lib": {
"interface": [
{
"name": "r1-eth0",
"description": "r1-eth0-desc"
}
]
}
}

View file

@ -0,0 +1,25 @@
{
"frr-interface:lib": {
"interface": [
{
"name": "r1-eth0",
"vrf": "default",
"description": "r1-eth0-desc",
"state": {
"if-index": "rubout",
"mtu": 1500,
"mtu6": 1500,
"speed": 10000,
"metric": 0,
"phy-address": "rubout"
},
"frr-zebra:zebra": {
"state": {
"up-count": 0,
"down-count": 0
}
}
}
]
}
}

View file

@ -112,6 +112,31 @@ def test_oper_simple(tgen):
'route[prefix="1.1.1.0/24"]/route-entry[protocol="connected"]/metric',
"simple-results/result-singleton-metric.json",
),
(
'/frr-interface:lib/interface[name="r1-eth0"]',
"simple-results/result-intf-eth0-with-config.json",
"with-config",
),
(
'/frr-interface:lib/interface[name="r1-eth0"]',
"simple-results/result-intf-eth0-only-config.json",
"only-config",
),
(
"/frr-interface:lib/interface/description",
"simple-results/result-intf-description.json",
"with-config",
),
(
'/frr-interface:lib/interface[name="r1-eth0"]',
"simple-results/result-intf-eth0-exact.json",
"exact",
),
(
'/frr-interface:lib/interface[name="r1-eth0"]/description',
"simple-results/result-intf-eth0-description-exact.json",
"with-config exact",
),
# Interface state
(
'/frr-interface:lib/interface[name="r1-eth0"]/state',

View file

@ -87,6 +87,7 @@ def setup_module(mod):
router.net.set_intf_netns(rname + "-eth0", ns, up=True)
router.net.set_intf_netns(rname + "-eth1", ns, up=True)
router.load_config(TopoRouter.RD_MGMTD, None, "--vrfwnetns")
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format(rname)),

View file

@ -56,9 +56,9 @@ extern struct event_loop *master;
VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \
VTYSH_ISISD | VTYSH_PIMD | VTYSH_PIM6D | VTYSH_NHRPD | \
VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD | \
VTYSH_VRRPD
VTYSH_VRRPD | VTYSH_MGMTD
#define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD
#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_MGMTD
#define VTYSH_VRF VTYSH_INTERFACE_SUBSET
#define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D | VTYSH_OSPFD
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD