2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Zebra next hop tracking code
|
|
|
|
* Copyright (C) 2013 Cumulus Networks, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "table.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "command.h"
|
|
|
|
#include "if.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "sockunion.h"
|
|
|
|
#include "linklist.h"
|
2023-03-07 20:22:48 +01:00
|
|
|
#include "frrevent.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "workqueue.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "routemap.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "nexthop.h"
|
2015-05-22 11:39:56 +02:00
|
|
|
#include "vrf.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2018-08-27 16:43:37 +02:00
|
|
|
#include "zebra/zebra_router.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "zebra/rib.h"
|
|
|
|
#include "zebra/rt.h"
|
|
|
|
#include "zebra/zserv.h"
|
2016-04-14 04:40:18 +02:00
|
|
|
#include "zebra/zebra_ns.h"
|
2016-04-14 15:20:47 +02:00
|
|
|
#include "zebra/zebra_vrf.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "zebra/redistribute.h"
|
|
|
|
#include "zebra/debug.h"
|
|
|
|
#include "zebra/zebra_rnh.h"
|
2016-05-11 17:47:02 +02:00
|
|
|
#include "zebra/zebra_routemap.h"
|
2020-07-20 13:43:54 +02:00
|
|
|
#include "zebra/zebra_srte.h"
|
2016-02-23 05:17:09 +01:00
|
|
|
#include "zebra/interface.h"
|
2018-06-19 20:29:05 +02:00
|
|
|
#include "zebra/zebra_errors.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2019-06-21 08:10:39 +02:00
|
|
|
DEFINE_MTYPE_STATIC(ZEBRA, RNH, "Nexthop tracking object");
|
|
|
|
|
2021-03-01 16:48:05 +01:00
|
|
|
/* UI controls whether to notify about changes that only involve backup
|
|
|
|
* nexthops. Default is to notify all changes.
|
|
|
|
*/
|
|
|
|
static bool rnh_hide_backups;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
static void free_state(vrf_id_t vrf_id, struct route_entry *re,
|
|
|
|
struct route_node *rn);
|
2020-02-25 14:29:46 +01:00
|
|
|
static void copy_state(struct rnh *rnh, const struct route_entry *re,
|
2015-05-20 03:04:20 +02:00
|
|
|
struct route_node *rn);
|
2021-03-01 16:49:32 +01:00
|
|
|
static bool compare_state(struct route_entry *r1, struct route_entry *r2);
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
static void print_rnh(struct route_node *rn, struct vty *vty,
|
|
|
|
json_object *json);
|
2018-04-22 23:03:52 +02:00
|
|
|
static int zebra_client_cleanup_rnh(struct zserv *client);
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2018-04-22 23:03:52 +02:00
|
|
|
void zebra_rnh_init(void)
|
|
|
|
{
|
2018-04-24 00:35:35 +02:00
|
|
|
hook_register(zserv_client_close, zebra_client_cleanup_rnh);
|
2018-04-22 23:03:52 +02:00
|
|
|
}
|
|
|
|
|
2021-09-24 22:36:27 +02:00
|
|
|
static inline struct route_table *get_rnh_table(vrf_id_t vrfid, afi_t afi,
|
|
|
|
safi_t safi)
|
2015-05-20 03:04:20 +02:00
|
|
|
{
|
2015-05-22 11:39:56 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
2015-05-20 03:04:20 +02:00
|
|
|
struct route_table *t = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-02 15:16:58 +01:00
|
|
|
zvrf = zebra_vrf_lookup_by_id(vrfid);
|
2021-09-24 22:36:27 +02:00
|
|
|
if (zvrf) {
|
|
|
|
if (safi == SAFI_UNICAST)
|
|
|
|
t = zvrf->rnh_table[afi];
|
2021-09-24 22:42:25 +02:00
|
|
|
else if (safi == SAFI_MULTICAST)
|
|
|
|
t = zvrf->rnh_table_multicast[afi];
|
2021-09-24 22:36:27 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2019-02-06 16:23:58 +01:00
|
|
|
static void zebra_rnh_remove_from_routing_table(struct rnh *rnh)
|
|
|
|
{
|
|
|
|
struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(rnh->vrf_id);
|
2021-09-24 22:42:25 +02:00
|
|
|
struct route_table *table = zvrf->table[rnh->afi][rnh->safi];
|
2019-02-06 16:23:58 +01:00
|
|
|
struct route_node *rn;
|
|
|
|
rib_dest_t *dest;
|
|
|
|
|
|
|
|
if (!table)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rn = route_node_match(table, &rnh->resolved_route);
|
|
|
|
if (!rn)
|
|
|
|
return;
|
|
|
|
|
2020-09-30 16:42:50 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: %s(%u):%pRN removed from tracking on %pRN",
|
|
|
|
__func__, VRF_LOGNAME(zvrf->vrf), rnh->vrf_id,
|
|
|
|
rnh->node, rn);
|
2019-02-08 15:11:47 +01:00
|
|
|
|
2019-02-06 16:23:58 +01:00
|
|
|
dest = rib_dest_from_rnode(rn);
|
2019-05-01 00:04:57 +02:00
|
|
|
rnh_list_del(&dest->nht, rnh);
|
2019-02-06 16:23:58 +01:00
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void zebra_rnh_store_in_routing_table(struct rnh *rnh)
|
|
|
|
{
|
|
|
|
struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(rnh->vrf_id);
|
2021-09-24 22:42:25 +02:00
|
|
|
struct route_table *table = zvrf->table[rnh->afi][rnh->safi];
|
2019-02-06 16:23:58 +01:00
|
|
|
struct route_node *rn;
|
|
|
|
rib_dest_t *dest;
|
|
|
|
|
|
|
|
rn = route_node_match(table, &rnh->resolved_route);
|
|
|
|
if (!rn)
|
|
|
|
return;
|
|
|
|
|
2020-09-30 16:42:50 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: %s(%u):%pRN added for tracking on %pRN",
|
|
|
|
__func__, VRF_LOGNAME(zvrf->vrf), rnh->vrf_id,
|
|
|
|
rnh->node, rn);
|
2019-02-08 15:11:47 +01:00
|
|
|
|
2019-02-06 16:23:58 +01:00
|
|
|
dest = rib_dest_from_rnode(rn);
|
2019-05-01 00:04:57 +02:00
|
|
|
rnh_list_add_tail(&dest->nht, rnh);
|
2019-02-06 16:23:58 +01:00
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
|
|
|
|
2021-10-21 15:16:21 +02:00
|
|
|
struct rnh *zebra_add_rnh(struct prefix *p, vrf_id_t vrfid, safi_t safi,
|
|
|
|
bool *exists)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct rnh *rnh = NULL;
|
2019-02-06 16:53:48 +01:00
|
|
|
afi_t afi = family2afi(p->family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2020-09-30 16:42:50 +02:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(vrfid);
|
|
|
|
|
2021-09-24 22:36:27 +02:00
|
|
|
zlog_debug("%s(%u): Add RNH %pFX for safi: %u",
|
|
|
|
VRF_LOGNAME(vrf), vrfid, p, safi);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
2021-09-24 22:36:27 +02:00
|
|
|
|
|
|
|
table = get_rnh_table(vrfid, afi, safi);
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!table) {
|
2020-09-30 16:42:50 +02:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(vrfid);
|
|
|
|
|
2018-09-13 21:21:05 +02:00
|
|
|
flog_warn(EC_ZEBRA_RNH_NO_TABLE,
|
2021-09-24 17:04:03 +02:00
|
|
|
"%s(%u): Add RNH %pFX - table not found",
|
|
|
|
VRF_LOGNAME(vrf), vrfid, p);
|
2020-11-18 17:05:30 +01:00
|
|
|
*exists = false;
|
2015-05-20 02:40:34 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Make it sure prefixlen is applied to the prefix. */
|
|
|
|
apply_mask(p);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Lookup (or add) route node.*/
|
|
|
|
rn = route_node_get(table, p);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!rn->info) {
|
|
|
|
rnh = XCALLOC(MTYPE_RNH, sizeof(struct rnh));
|
2019-02-06 17:16:07 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The resolved route is already 0.0.0.0/0 or
|
|
|
|
* 0::0/0 due to the calloc right above, but
|
|
|
|
* we should set the family so that future
|
|
|
|
* comparisons can just be done
|
|
|
|
*/
|
|
|
|
rnh->resolved_route.family = p->family;
|
2015-05-20 02:40:34 +02:00
|
|
|
rnh->client_list = list_new();
|
2016-02-26 20:13:34 +01:00
|
|
|
rnh->vrf_id = vrfid;
|
2019-02-06 16:23:58 +01:00
|
|
|
rnh->seqno = 0;
|
2019-02-06 16:53:48 +01:00
|
|
|
rnh->afi = afi;
|
2021-09-24 22:36:27 +02:00
|
|
|
rnh->safi = safi;
|
2017-06-30 17:26:04 +02:00
|
|
|
rnh->zebra_pseudowire_list = list_new();
|
2015-05-20 02:40:34 +02:00
|
|
|
route_lock_node(rn);
|
|
|
|
rn->info = rnh;
|
|
|
|
rnh->node = rn;
|
2018-08-23 22:05:02 +02:00
|
|
|
*exists = false;
|
2019-02-06 16:23:58 +01:00
|
|
|
|
|
|
|
zebra_rnh_store_in_routing_table(rnh);
|
2018-08-23 22:05:02 +02:00
|
|
|
} else
|
|
|
|
*exists = true;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
route_unlock_node(rn);
|
|
|
|
return (rn->info);
|
|
|
|
}
|
|
|
|
|
2021-09-24 22:36:27 +02:00
|
|
|
struct rnh *zebra_lookup_rnh(struct prefix *p, vrf_id_t vrfid, safi_t safi)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
|
|
|
|
2021-09-24 22:36:27 +02:00
|
|
|
table = get_rnh_table(vrfid, family2afi(PREFIX_FAMILY(p)), safi);
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!table)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Make it sure prefixlen is applied to the prefix. */
|
|
|
|
apply_mask(p);
|
|
|
|
|
|
|
|
/* Lookup route node.*/
|
|
|
|
rn = route_node_lookup(table, p);
|
|
|
|
if (!rn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
route_unlock_node(rn);
|
|
|
|
return (rn->info);
|
|
|
|
}
|
|
|
|
|
2016-10-31 18:15:16 +01:00
|
|
|
void zebra_free_rnh(struct rnh *rnh)
|
|
|
|
{
|
2019-02-06 16:23:58 +01:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
struct route_table *table;
|
|
|
|
|
|
|
|
zebra_rnh_remove_from_routing_table(rnh);
|
2016-10-31 18:15:16 +01:00
|
|
|
rnh->flags |= ZEBRA_NHT_DELETED;
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&rnh->client_list);
|
|
|
|
list_delete(&rnh->zebra_pseudowire_list);
|
2019-02-06 16:23:58 +01:00
|
|
|
|
|
|
|
zvrf = zebra_vrf_lookup_by_id(rnh->vrf_id);
|
2021-09-24 22:42:25 +02:00
|
|
|
table = zvrf->table[family2afi(rnh->resolved_route.family)][rnh->safi];
|
2019-02-06 16:23:58 +01:00
|
|
|
|
|
|
|
if (table) {
|
|
|
|
struct route_node *rern;
|
|
|
|
|
|
|
|
rern = route_node_match(table, &rnh->resolved_route);
|
|
|
|
if (rern) {
|
|
|
|
rib_dest_t *dest;
|
|
|
|
|
|
|
|
route_unlock_node(rern);
|
|
|
|
|
|
|
|
dest = rib_dest_from_rnode(rern);
|
2019-05-01 00:04:57 +02:00
|
|
|
rnh_list_del(&dest->nht, rnh);
|
2019-02-06 16:23:58 +01:00
|
|
|
}
|
|
|
|
}
|
2016-10-31 18:15:16 +01:00
|
|
|
free_state(rnh->vrf_id, rnh->state, rnh->node);
|
|
|
|
XFREE(MTYPE_RNH, rnh);
|
|
|
|
}
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
static void zebra_delete_rnh(struct rnh *rnh)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct route_node *rn;
|
|
|
|
|
2019-01-04 17:02:35 +01:00
|
|
|
if (!list_isempty(rnh->client_list)
|
|
|
|
|| !list_isempty(rnh->zebra_pseudowire_list))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((rnh->flags & ZEBRA_NHT_DELETED) || !(rn = rnh->node))
|
2015-05-20 02:40:34 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2020-09-30 16:42:50 +02:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(rnh->vrf_id);
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zlog_debug("%s(%u): Del RNH %pRN", VRF_LOGNAME(vrf),
|
|
|
|
rnh->vrf_id, rnh->node);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2016-10-31 18:15:16 +01:00
|
|
|
zebra_free_rnh(rnh);
|
2015-05-20 02:40:34 +02:00
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
|
|
|
|
2018-08-23 22:05:02 +02:00
|
|
|
/*
|
|
|
|
* This code will send to the registering client
|
|
|
|
* the looked up rnh.
|
|
|
|
* For a rnh that was created, there is no data
|
|
|
|
* so it will send an empty nexthop group
|
|
|
|
* If rnh exists then we know it has been evaluated
|
|
|
|
* and as such it will have a resolved rnh.
|
|
|
|
*/
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
void zebra_add_rnh_client(struct rnh *rnh, struct zserv *client,
|
2021-09-24 17:04:03 +02:00
|
|
|
vrf_id_t vrf_id)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2020-09-30 16:42:50 +02:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zlog_debug("%s(%u): Client %s registers for RNH %pRN",
|
2020-09-30 16:42:50 +02:00
|
|
|
VRF_LOGNAME(vrf), vrf_id,
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_route_string(client->proto), rnh->node);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
2018-11-28 17:43:26 +01:00
|
|
|
if (!listnode_lookup(rnh->client_list, client))
|
2015-05-20 02:40:34 +02:00
|
|
|
listnode_add(rnh->client_list, client);
|
2018-11-28 17:43:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We always need to respond with known information,
|
|
|
|
* currently multiple daemons expect this behavior
|
|
|
|
*/
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_send_rnh_update(rnh, client, vrf_id, 0);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
void zebra_remove_rnh_client(struct rnh *rnh, struct zserv *client)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2020-09-30 16:42:50 +02:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(rnh->vrf_id);
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zlog_debug("Client %s unregisters for RNH %s(%u)%pRN",
|
2020-09-30 16:42:50 +02:00
|
|
|
zebra_route_string(client->proto), VRF_LOGNAME(vrf),
|
2021-09-24 17:04:03 +02:00
|
|
|
vrf->vrf_id, rnh->node);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
listnode_delete(rnh->client_list, client);
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_delete_rnh(rnh);
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
|
|
|
|
2017-06-30 17:26:04 +02:00
|
|
|
/* XXX move this utility function elsewhere? */
|
|
|
|
static void addr2hostprefix(int af, const union g_addr *addr,
|
|
|
|
struct prefix *prefix)
|
|
|
|
{
|
|
|
|
switch (af) {
|
|
|
|
case AF_INET:
|
|
|
|
prefix->family = AF_INET;
|
|
|
|
prefix->prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
prefix->u.prefix4 = addr->ipv4;
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
prefix->family = AF_INET6;
|
|
|
|
prefix->prefixlen = IPV6_MAX_BITLEN;
|
|
|
|
prefix->u.prefix6 = addr->ipv6;
|
|
|
|
break;
|
|
|
|
default:
|
2017-09-27 23:38:19 +02:00
|
|
|
memset(prefix, 0, sizeof(*prefix));
|
2019-08-23 14:28:43 +02:00
|
|
|
zlog_warn("%s: unknown address family %d", __func__, af);
|
2017-06-30 17:26:04 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:10:30 +02:00
|
|
|
void zebra_register_rnh_pseudowire(vrf_id_t vrf_id, struct zebra_pw *pw,
|
|
|
|
bool *nht_exists)
|
2017-06-30 17:26:04 +02:00
|
|
|
{
|
|
|
|
struct prefix nh;
|
|
|
|
struct rnh *rnh;
|
2018-08-23 22:05:02 +02:00
|
|
|
bool exists;
|
2018-10-11 19:46:55 +02:00
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
2020-05-27 18:10:30 +02:00
|
|
|
*nht_exists = false;
|
|
|
|
|
2023-03-28 21:49:50 +02:00
|
|
|
zvrf = zebra_vrf_lookup_by_id(vrf_id);
|
2018-10-11 19:46:55 +02:00
|
|
|
if (!zvrf)
|
|
|
|
return;
|
2017-06-30 17:26:04 +02:00
|
|
|
|
|
|
|
addr2hostprefix(pw->af, &pw->nexthop, &nh);
|
2021-10-21 15:16:21 +02:00
|
|
|
rnh = zebra_add_rnh(&nh, vrf_id, SAFI_UNICAST, &exists);
|
2020-05-27 18:10:30 +02:00
|
|
|
if (!rnh)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!listnode_lookup(rnh->zebra_pseudowire_list, pw)) {
|
2017-06-30 17:26:04 +02:00
|
|
|
listnode_add(rnh->zebra_pseudowire_list, pw);
|
|
|
|
pw->rnh = rnh;
|
2021-09-24 22:36:27 +02:00
|
|
|
zebra_evaluate_rnh(zvrf, family2afi(pw->af), 1, &nh,
|
|
|
|
SAFI_UNICAST);
|
2020-05-27 18:10:30 +02:00
|
|
|
} else
|
|
|
|
*nht_exists = true;
|
2017-06-30 17:26:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_deregister_rnh_pseudowire(vrf_id_t vrf_id, struct zebra_pw *pw)
|
|
|
|
{
|
|
|
|
struct rnh *rnh;
|
|
|
|
|
|
|
|
rnh = pw->rnh;
|
|
|
|
if (!rnh)
|
|
|
|
return;
|
|
|
|
|
|
|
|
listnode_delete(rnh->zebra_pseudowire_list, pw);
|
|
|
|
pw->rnh = NULL;
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_delete_rnh(rnh);
|
2017-06-30 17:26:04 +02:00
|
|
|
}
|
|
|
|
|
2019-04-15 20:27:00 +02:00
|
|
|
/* Clear the NEXTHOP_FLAG_RNH_FILTERED flags on all nexthops
|
|
|
|
*/
|
|
|
|
static void zebra_rnh_clear_nexthop_rnh_filters(struct route_entry *re)
|
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
|
|
|
if (re) {
|
2020-02-25 14:29:46 +01:00
|
|
|
for (nexthop = re->nhe->nhg.nexthop; nexthop;
|
2019-04-15 20:27:00 +02:00
|
|
|
nexthop = nexthop->next) {
|
|
|
|
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RNH_FILTERED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/* Apply the NHT route-map for a client to the route (and nexthops)
|
|
|
|
* resolving a NH.
|
|
|
|
*/
|
2019-01-21 15:30:36 +01:00
|
|
|
static int zebra_rnh_apply_nht_rmap(afi_t afi, struct zebra_vrf *zvrf,
|
2018-10-11 19:46:55 +02:00
|
|
|
struct route_node *prn,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re, int proto)
|
2015-05-20 02:47:22 +02:00
|
|
|
{
|
|
|
|
int at_least_one = 0;
|
|
|
|
struct nexthop *nexthop;
|
lib: Introducing a 3rd state for route-map match cmd: RMAP_NOOP
Introducing a 3rd state for route_map_apply library function: RMAP_NOOP
Traditionally route map MATCH rule apis were designed to return
a binary response, consisting of either RMAP_MATCH or RMAP_NOMATCH.
(Route-map SET rule apis return RMAP_OKAY or RMAP_ERROR).
Depending on this response, the following statemachine decided the
course of action:
State1:
If match cmd returns RMAP_MATCH then, keep existing behaviour.
If routemap type is PERMIT, execute set cmds or call cmds if applicable,
otherwise PERMIT!
Else If routemap type is DENY, we DENYMATCH right away
State2:
If match cmd returns RMAP_NOMATCH, continue on to next route-map. If there
are no other rules or if all the rules return RMAP_NOMATCH, return DENYMATCH
We require a 3rd state because of the following situation:
The issue - what if, the rule api needs to abort or ignore a rule?:
"match evpn vni xx" route-map filter can be applied to incoming routes
regardless of whether the tunnel type is vxlan or mpls.
This rule should be N/A for mpls based evpn route, but applicable to only
vxlan based evpn route.
Also, this rule should be applicable for routes with VNI label only, and
not for routes without labels. For example, type 3 and type 4 EVPN routes
do not have labels, so, this match cmd should let them through.
Today, the filter produces either a match or nomatch response regardless of
whether it is mpls/vxlan, resulting in either permitting or denying the
route.. So an mpls evpn route may get filtered out incorrectly.
Eg: "route-map RM1 permit 10 ; match evpn vni 20" or
"route-map RM2 deny 20 ; match vni 20"
With the introduction of the 3rd state, we can abort this rule check safely.
How? The rules api can now return RMAP_NOOP to indicate
that it encountered an invalid check, and needs to abort just that rule,
but continue with other rules.
As a result we have a 3rd state:
State3:
If match cmd returned RMAP_NOOP
Then, proceed to other route-map, otherwise if there are no more
rules or if all the rules return RMAP_NOOP, then, return RMAP_PERMITMATCH.
Signed-off-by: Lakshman Krishnamoorthy <lkrishnamoor@vmware.com>
2019-06-19 23:04:36 +02:00
|
|
|
route_map_result_t ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (prn && re) {
|
2020-02-25 14:29:46 +01:00
|
|
|
for (nexthop = re->nhe->nhg.nexthop; nexthop;
|
2018-01-26 16:12:35 +01:00
|
|
|
nexthop = nexthop->next) {
|
2018-10-11 19:44:55 +02:00
|
|
|
ret = zebra_nht_route_map_check(
|
2019-01-21 15:30:36 +01:00
|
|
|
afi, proto, &prn->p, zvrf, re, nexthop);
|
2019-04-15 20:27:00 +02:00
|
|
|
if (ret != RMAP_DENYMATCH)
|
2015-05-20 02:47:22 +02:00
|
|
|
at_least_one++; /* at least one valid NH */
|
2019-04-15 20:27:00 +02:00
|
|
|
else {
|
|
|
|
SET_FLAG(nexthop->flags,
|
2020-05-27 18:53:20 +02:00
|
|
|
NEXTHOP_FLAG_RNH_FILTERED);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 02:47:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (at_least_one);
|
|
|
|
}
|
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/*
|
|
|
|
* Notify clients registered for this nexthop about a change.
|
|
|
|
*/
|
2019-01-21 15:30:36 +01:00
|
|
|
static void zebra_rnh_notify_protocol_clients(struct zebra_vrf *zvrf, afi_t afi,
|
|
|
|
struct route_node *nrn,
|
|
|
|
struct rnh *rnh,
|
|
|
|
struct route_node *prn,
|
|
|
|
struct route_entry *re)
|
2015-12-09 20:01:21 +01:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct zserv *client;
|
|
|
|
int num_resolving_nh;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2017-06-01 13:26:25 +02:00
|
|
|
if (prn && re) {
|
2020-09-30 16:42:50 +02:00
|
|
|
zlog_debug("%s(%u):%pRN: NH resolved over route %pRN",
|
|
|
|
VRF_LOGNAME(zvrf->vrf), zvrf->vrf->vrf_id,
|
|
|
|
nrn, prn);
|
2015-12-09 20:01:21 +01:00
|
|
|
} else
|
2020-09-30 16:42:50 +02:00
|
|
|
zlog_debug("%s(%u):%pRN: NH has become unresolved",
|
|
|
|
VRF_LOGNAME(zvrf->vrf), zvrf->vrf->vrf_id,
|
|
|
|
nrn);
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(rnh->client_list, node, client)) {
|
2017-06-01 13:26:25 +02:00
|
|
|
if (prn && re) {
|
2015-12-09 20:01:21 +01:00
|
|
|
/* Apply route-map for this client to route resolving
|
|
|
|
* this
|
|
|
|
* nexthop to see if it is filtered or not.
|
|
|
|
*/
|
2019-04-15 20:27:00 +02:00
|
|
|
zebra_rnh_clear_nexthop_rnh_filters(re);
|
2017-06-01 13:26:25 +02:00
|
|
|
num_resolving_nh = zebra_rnh_apply_nht_rmap(
|
2019-01-21 15:30:36 +01:00
|
|
|
afi, zvrf, prn, re, client->proto);
|
2015-12-09 20:01:21 +01:00
|
|
|
if (num_resolving_nh)
|
|
|
|
rnh->filtered[client->proto] = 0;
|
|
|
|
else
|
|
|
|
rnh->filtered[client->proto] = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT)
|
|
|
|
zlog_debug(
|
2020-09-30 16:42:50 +02:00
|
|
|
"%s(%u):%pRN: Notifying client %s about NH %s",
|
|
|
|
VRF_LOGNAME(zvrf->vrf),
|
|
|
|
zvrf->vrf->vrf_id, nrn,
|
2015-12-09 20:01:21 +01:00
|
|
|
zebra_route_string(client->proto),
|
|
|
|
num_resolving_nh
|
|
|
|
? ""
|
|
|
|
: "(filtered by route-map)");
|
|
|
|
} else {
|
|
|
|
rnh->filtered[client->proto] = 0;
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT)
|
|
|
|
zlog_debug(
|
2020-09-30 16:42:50 +02:00
|
|
|
"%s(%u):%pRN: Notifying client %s about NH (unreachable)",
|
|
|
|
VRF_LOGNAME(zvrf->vrf),
|
|
|
|
zvrf->vrf->vrf_id, nrn,
|
2015-12-09 20:01:21 +01:00
|
|
|
zebra_route_string(client->proto));
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_send_rnh_update(rnh, client, zvrf->vrf->vrf_id, 0);
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
2019-04-15 20:27:00 +02:00
|
|
|
|
|
|
|
if (re)
|
|
|
|
zebra_rnh_clear_nexthop_rnh_filters(re);
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
2015-05-20 03:04:20 +02:00
|
|
|
|
2018-08-27 22:03:37 +02:00
|
|
|
/*
|
|
|
|
* Utility to determine whether a candidate nexthop is useable. We make this
|
|
|
|
* check in a couple of places, so this is a single home for the logic we
|
|
|
|
* use.
|
|
|
|
*/
|
2021-03-01 16:49:32 +01:00
|
|
|
|
|
|
|
static const int RNH_INVALID_NH_FLAGS = (NEXTHOP_FLAG_RECURSIVE |
|
|
|
|
NEXTHOP_FLAG_DUPLICATE |
|
|
|
|
NEXTHOP_FLAG_RNH_FILTERED);
|
|
|
|
|
2021-07-07 22:52:24 +02:00
|
|
|
bool rnh_nexthop_valid(const struct route_entry *re, const struct nexthop *nh)
|
2018-08-27 22:03:37 +02:00
|
|
|
{
|
2019-01-14 22:37:53 +01:00
|
|
|
return (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
|
2019-04-15 20:27:00 +02:00
|
|
|
&& CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
|
2021-03-01 16:49:32 +01:00
|
|
|
&& !CHECK_FLAG(nh->flags, RNH_INVALID_NH_FLAGS));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine whether an re's nexthops are valid for tracking.
|
|
|
|
*/
|
|
|
|
static bool rnh_check_re_nexthops(const struct route_entry *re,
|
|
|
|
const struct rnh *rnh)
|
|
|
|
{
|
|
|
|
bool ret = false;
|
|
|
|
const struct nexthop *nexthop = NULL;
|
|
|
|
|
|
|
|
/* Check route's nexthops */
|
|
|
|
for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) {
|
|
|
|
if (rnh_nexthop_valid(re, nexthop))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check backup nexthops, if any. */
|
|
|
|
if (nexthop == NULL && re->nhe->backup_info &&
|
|
|
|
re->nhe->backup_info->nhe) {
|
|
|
|
for (ALL_NEXTHOPS(re->nhe->backup_info->nhe->nhg, nexthop)) {
|
|
|
|
if (rnh_nexthop_valid(re, nexthop))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nexthop == NULL) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug(
|
|
|
|
" Route Entry %s no nexthops",
|
|
|
|
zebra_route_string(re->type));
|
|
|
|
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
*: Introduce Local Host Routes to FRR
Create Local routes in FRR:
S 0.0.0.0/0 [1/0] via 192.168.119.1, enp39s0, weight 1, 00:03:46
K>* 0.0.0.0/0 [0/100] via 192.168.119.1, enp39s0, 00:03:51
O 192.168.119.0/24 [110/100] is directly connected, enp39s0, weight 1, 00:03:46
C>* 192.168.119.0/24 is directly connected, enp39s0, 00:03:51
L>* 192.168.119.224/32 is directly connected, enp39s0, 00:03:51
O 192.168.119.229/32 [110/100] via 0.0.0.0, enp39s0 inactive, weight 1, 00:03:46
C>* 192.168.119.229/32 is directly connected, enp39s0, 00:03:46
Create ability to redistribute local routes.
Modify tests to support this change.
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
2023-01-05 00:32:43 +01:00
|
|
|
/*
|
|
|
|
* Some special checks if registration asked for them.
|
|
|
|
* LOCAL routes are by their definition not CONNECTED
|
|
|
|
* and as such should not be considered here
|
|
|
|
*/
|
2021-03-01 16:49:32 +01:00
|
|
|
if (CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED)) {
|
*: Introduce Local Host Routes to FRR
Create Local routes in FRR:
S 0.0.0.0/0 [1/0] via 192.168.119.1, enp39s0, weight 1, 00:03:46
K>* 0.0.0.0/0 [0/100] via 192.168.119.1, enp39s0, 00:03:51
O 192.168.119.0/24 [110/100] is directly connected, enp39s0, weight 1, 00:03:46
C>* 192.168.119.0/24 is directly connected, enp39s0, 00:03:51
L>* 192.168.119.224/32 is directly connected, enp39s0, 00:03:51
O 192.168.119.229/32 [110/100] via 0.0.0.0, enp39s0 inactive, weight 1, 00:03:46
C>* 192.168.119.229/32 is directly connected, enp39s0, 00:03:46
Create ability to redistribute local routes.
Modify tests to support this change.
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
2023-01-05 00:32:43 +01:00
|
|
|
if ((re->type == ZEBRA_ROUTE_CONNECT) ||
|
|
|
|
(re->type == ZEBRA_ROUTE_STATIC))
|
2021-03-01 16:49:32 +01:00
|
|
|
ret = true;
|
|
|
|
if (re->type == ZEBRA_ROUTE_NHRP) {
|
|
|
|
|
|
|
|
for (nexthop = re->nhe->nhg.nexthop;
|
|
|
|
nexthop;
|
|
|
|
nexthop = nexthop->next)
|
|
|
|
if (nexthop->type == NEXTHOP_TYPE_IFINDEX)
|
|
|
|
break;
|
|
|
|
if (nexthop)
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
2018-08-27 22:03:37 +02:00
|
|
|
}
|
|
|
|
|
2017-11-01 14:39:16 +01:00
|
|
|
/*
|
|
|
|
* Determine appropriate route (route entry) resolving a tracked
|
|
|
|
* nexthop.
|
|
|
|
*/
|
|
|
|
static struct route_entry *
|
2019-01-21 15:30:36 +01:00
|
|
|
zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi,
|
2021-03-01 16:49:32 +01:00
|
|
|
struct route_node *nrn, const struct rnh *rnh,
|
2017-11-01 14:39:16 +01:00
|
|
|
struct route_node **prn)
|
|
|
|
{
|
|
|
|
struct route_table *route_table;
|
|
|
|
struct route_node *rn;
|
|
|
|
struct route_entry *re;
|
|
|
|
|
|
|
|
*prn = NULL;
|
|
|
|
|
2021-09-24 22:42:25 +02:00
|
|
|
route_table = zvrf->table[afi][rnh->safi];
|
2017-11-01 14:39:16 +01:00
|
|
|
if (!route_table)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
rn = route_node_match(route_table, &nrn->p);
|
|
|
|
if (!rn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Unlock route node - we don't need to lock when walking the tree. */
|
|
|
|
route_unlock_node(rn);
|
|
|
|
|
|
|
|
/* While resolving nexthops, we may need to walk up the tree from the
|
|
|
|
* most-specific match. Do similar logic as in zebra_rib.c
|
|
|
|
*/
|
|
|
|
while (rn) {
|
2020-09-30 16:42:50 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: %s(%u):%pRN Possible Match to %pRN",
|
|
|
|
__func__, VRF_LOGNAME(zvrf->vrf),
|
|
|
|
rnh->vrf_id, rnh->node, rn);
|
2019-02-08 15:11:47 +01:00
|
|
|
|
2017-11-01 14:39:16 +01:00
|
|
|
/* Do not resolve over default route unless allowed &&
|
|
|
|
* match route to be exact if so specified
|
|
|
|
*/
|
|
|
|
if (is_default_prefix(&rn->p)
|
2021-09-24 21:51:18 +02:00
|
|
|
&& (!CHECK_FLAG(rnh->flags, ZEBRA_NHT_RESOLVE_VIA_DEFAULT)
|
|
|
|
&& !rnh_resolve_via_default(zvrf, rn->p.family))) {
|
2019-02-08 15:11:47 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug(
|
2021-09-24 21:51:18 +02:00
|
|
|
" Not allowed to resolve through default prefix: rnh->resolve_via_default: %u",
|
|
|
|
CHECK_FLAG(
|
|
|
|
rnh->flags,
|
|
|
|
ZEBRA_NHT_RESOLVE_VIA_DEFAULT));
|
2017-11-01 14:39:16 +01:00
|
|
|
return NULL;
|
2019-02-08 15:11:47 +01:00
|
|
|
}
|
2017-11-01 14:39:16 +01:00
|
|
|
|
|
|
|
/* Identify appropriate route entry. */
|
|
|
|
RNODE_FOREACH_RE (rn, re) {
|
2019-02-08 15:11:47 +01:00
|
|
|
if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug(
|
2020-03-24 17:38:20 +01:00
|
|
|
" Route Entry %s removed",
|
2019-02-08 15:11:47 +01:00
|
|
|
zebra_route_string(re->type));
|
2017-11-01 14:39:16 +01:00
|
|
|
continue;
|
2019-02-08 15:11:47 +01:00
|
|
|
}
|
2020-02-19 17:27:06 +01:00
|
|
|
if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED) &&
|
|
|
|
!CHECK_FLAG(re->flags, ZEBRA_FLAG_FIB_OVERRIDE)) {
|
2019-02-08 15:11:47 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug(
|
2020-03-24 17:38:20 +01:00
|
|
|
" Route Entry %s !selected",
|
2019-02-08 15:11:47 +01:00
|
|
|
zebra_route_string(re->type));
|
2017-11-01 14:39:16 +01:00
|
|
|
continue;
|
2019-02-08 15:11:47 +01:00
|
|
|
}
|
2017-11-01 14:39:16 +01:00
|
|
|
|
2019-07-01 19:17:46 +02:00
|
|
|
if (CHECK_FLAG(re->status, ROUTE_ENTRY_QUEUED)) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug(
|
2020-03-24 17:38:20 +01:00
|
|
|
" Route Entry %s queued",
|
2019-07-01 19:17:46 +02:00
|
|
|
zebra_route_string(re->type));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-08-17 22:53:24 +02:00
|
|
|
/* Just being SELECTED isn't quite enough - must
|
|
|
|
* have an installed nexthop to be useful.
|
|
|
|
*/
|
2021-03-01 16:49:32 +01:00
|
|
|
if (rnh_check_re_nexthops(re, rnh))
|
2017-11-01 14:39:16 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Route entry found, we're done; else, walk up the tree. */
|
|
|
|
if (re) {
|
|
|
|
*prn = rn;
|
|
|
|
return re;
|
zebra: Nexthop tracking, route resolution recursive lookup
Description:
===========
Change is intended for fixing the NHT resolution logic.
While recursively resolving nexthop, keep looking for a valid/useable route in the rib,
by not stopping at the first/most-specific route in the rib.
Consider the following set of events taking place on R1:
R1(config)# ip route 2.2.2.0/24 ens192
R1# sharp watch nexthop 2.2.2.32 connected
R1# show ip nht
2.2.2.32(Connected)
resolved via static
is directly connected, ens192
Client list: sharp(fd 33)
-2.2.2.32 NHT is resolved over the above valid static route.
R1# sharp install routes 2.2.2.32 nexthop 2.2.2.32 1
R1# 2.2.2.32(Connected)
resolved via static
is directly connected, ens192
Client list: sharp(fd 33)
-.32/32 comes which is going to resolve through itself, but since this is an invalid route,
it will be marked as inactive and will not affect the NHT.
R1# sharp install routes 2.2.2.31 nexthop 2.2.2.32 1
R1# 2.2.2.32(Connected)
unresolved(Connected)
Client list: sharp(fd 50)
-Now a .31/32 comes which will resolve over .32 route, but as per the current logic,
this will trigger the NHT check, in turn making the NHT unresolved.
-With fix, NHT should stay in resolved state as long as the valid static or connected route stays installed
Fix:
====
-While resolving nexthops, walk up the tree from the most-specific match,
walk up the tree without any ZEBRA_NHT_CONNECTED check.
Co-authored-by: Vishal Dhingra <vdhingra@vmware.com>
Co-authored-by: Kantesh Mundaragi <kmundaragi@vmware.com>
Signed-off-by: Iqra Siddiqui <imujeebsiddi@vmware.com>
2021-02-03 14:58:12 +01:00
|
|
|
} else {
|
|
|
|
/* Resolve the nexthop recursively by finding matching
|
|
|
|
* route with lower prefix length
|
|
|
|
*/
|
2017-11-01 14:39:16 +01:00
|
|
|
rn = rn->parent;
|
2019-02-08 15:11:47 +01:00
|
|
|
}
|
2017-11-01 14:39:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-30 17:26:04 +02:00
|
|
|
static void zebra_rnh_process_pseudowires(vrf_id_t vrfid, struct rnh *rnh)
|
|
|
|
{
|
|
|
|
struct zebra_pw *pw;
|
|
|
|
struct listnode *node;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(rnh->zebra_pseudowire_list, node, pw))
|
|
|
|
zebra_pw_update(pw);
|
|
|
|
}
|
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/*
|
|
|
|
* See if a tracked nexthop entry has undergone any change, and if so,
|
|
|
|
* take appropriate action; this involves notifying any clients and/or
|
|
|
|
* scheduling dependent static routes for processing.
|
|
|
|
*/
|
2019-01-21 15:30:36 +01:00
|
|
|
static void zebra_rnh_eval_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi,
|
2018-10-11 19:46:55 +02:00
|
|
|
int force, struct route_node *nrn,
|
2015-12-09 20:01:21 +01:00
|
|
|
struct rnh *rnh,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_node *prn,
|
|
|
|
struct route_entry *re)
|
2015-12-09 20:01:21 +01:00
|
|
|
{
|
|
|
|
int state_changed = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/* If we're resolving over a different route, resolution has changed or
|
|
|
|
* the resolving route has some change (e.g., metric), there is a state
|
|
|
|
* change.
|
|
|
|
*/
|
2019-02-06 16:23:58 +01:00
|
|
|
zebra_rnh_remove_from_routing_table(rnh);
|
2019-06-24 15:50:55 +02:00
|
|
|
if (!prefix_same(&rnh->resolved_route, prn ? &prn->p : NULL)) {
|
2015-12-09 20:01:21 +01:00
|
|
|
if (prn)
|
|
|
|
prefix_copy(&rnh->resolved_route, &prn->p);
|
2019-02-06 17:16:07 +01:00
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Just quickly store the family of the resolved
|
|
|
|
* route so that we can reset it in a second here
|
|
|
|
*/
|
|
|
|
int family = rnh->resolved_route.family;
|
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
memset(&rnh->resolved_route, 0, sizeof(struct prefix));
|
2019-02-06 17:16:07 +01:00
|
|
|
rnh->resolved_route.family = family;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
copy_state(rnh, re, nrn);
|
2015-12-09 20:01:21 +01:00
|
|
|
state_changed = 1;
|
2017-06-01 13:26:25 +02:00
|
|
|
} else if (compare_state(re, rnh->state)) {
|
|
|
|
copy_state(rnh, re, nrn);
|
2015-12-09 20:01:21 +01:00
|
|
|
state_changed = 1;
|
|
|
|
}
|
2019-02-06 16:23:58 +01:00
|
|
|
zebra_rnh_store_in_routing_table(rnh);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (state_changed || force) {
|
|
|
|
/* NOTE: Use the "copy" of resolving route stored in 'rnh' i.e.,
|
|
|
|
* rnh->state.
|
|
|
|
*/
|
|
|
|
/* Notify registered protocol clients. */
|
2019-01-21 15:30:36 +01:00
|
|
|
zebra_rnh_notify_protocol_clients(zvrf, afi, nrn, rnh, prn,
|
2015-12-09 20:01:21 +01:00
|
|
|
rnh->state);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-30 17:26:04 +02:00
|
|
|
/* Process pseudowires attached to this nexthop */
|
2018-10-11 19:46:55 +02:00
|
|
|
zebra_rnh_process_pseudowires(zvrf->vrf->vrf_id, rnh);
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
|
|
|
}
|
2015-05-20 02:47:22 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/* Evaluate one tracked entry */
|
2019-01-21 15:30:36 +01:00
|
|
|
static void zebra_rnh_evaluate_entry(struct zebra_vrf *zvrf, afi_t afi,
|
2021-09-24 17:04:03 +02:00
|
|
|
int force, struct route_node *nrn)
|
2015-12-09 20:01:21 +01:00
|
|
|
{
|
|
|
|
struct rnh *rnh;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2015-12-09 20:01:21 +01:00
|
|
|
struct route_node *prn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
2021-09-24 17:04:03 +02:00
|
|
|
zlog_debug("%s(%u):%pRN: Evaluate RNH, %s",
|
2020-09-30 16:42:50 +02:00
|
|
|
VRF_LOGNAME(zvrf->vrf), zvrf->vrf->vrf_id, nrn,
|
2021-09-24 17:04:03 +02:00
|
|
|
force ? "(force)" : "");
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
rnh = nrn->info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* Identify route entry (RE) resolving this tracked entry. */
|
2021-09-24 16:45:07 +02:00
|
|
|
re = zebra_rnh_resolve_nexthop_entry(zvrf, afi, nrn, rnh, &prn);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/* If the entry cannot be resolved and that is also the existing state,
|
|
|
|
* there is nothing further to do.
|
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!re && rnh->state == NULL && !force)
|
2015-12-09 20:01:21 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
/* Process based on type of entry. */
|
2021-09-24 16:45:07 +02:00
|
|
|
zebra_rnh_eval_nexthop_entry(zvrf, afi, force, nrn, rnh, prn, re);
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
|
|
|
|
2017-03-07 21:13:04 +01:00
|
|
|
/*
|
2017-06-01 13:26:25 +02:00
|
|
|
* Clear the ROUTE_ENTRY_NEXTHOPS_CHANGED flag
|
|
|
|
* from the re entries.
|
2017-03-07 21:13:04 +01:00
|
|
|
*
|
|
|
|
* Please note we are doing this *after* we have
|
|
|
|
* notified the world about each nexthop as that
|
2017-06-01 13:26:25 +02:00
|
|
|
* we can have a situation where one re entry
|
2017-03-07 21:13:04 +01:00
|
|
|
* covers multiple nexthops we are interested in.
|
|
|
|
*/
|
2019-01-21 15:30:36 +01:00
|
|
|
static void zebra_rnh_clear_nhc_flag(struct zebra_vrf *zvrf, afi_t afi,
|
2021-09-24 17:04:03 +02:00
|
|
|
struct route_node *nrn)
|
2017-03-07 21:13:04 +01:00
|
|
|
{
|
|
|
|
struct rnh *rnh;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2017-03-07 21:13:04 +01:00
|
|
|
struct route_node *prn;
|
|
|
|
|
|
|
|
rnh = nrn->info;
|
|
|
|
|
2017-11-01 14:39:16 +01:00
|
|
|
/* Identify route entry (RIB) resolving this tracked entry. */
|
2021-09-24 16:45:07 +02:00
|
|
|
re = zebra_rnh_resolve_nexthop_entry(zvrf, afi, nrn, rnh, &prn);
|
2017-03-07 21:13:04 +01:00
|
|
|
|
2019-07-29 20:53:58 +02:00
|
|
|
if (re)
|
2017-06-09 01:29:12 +02:00
|
|
|
UNSET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED);
|
2017-03-07 21:13:04 +01:00
|
|
|
}
|
2015-12-09 20:01:21 +01:00
|
|
|
|
|
|
|
/* Evaluate all tracked entries (nexthops or routes for import into BGP)
|
|
|
|
* of a particular VRF and address-family or a specific prefix.
|
|
|
|
*/
|
2019-01-21 15:30:36 +01:00
|
|
|
void zebra_evaluate_rnh(struct zebra_vrf *zvrf, afi_t afi, int force,
|
2022-03-11 12:47:46 +01:00
|
|
|
const struct prefix *p, safi_t safi)
|
2015-12-09 20:01:21 +01:00
|
|
|
{
|
|
|
|
struct route_table *rnh_table;
|
|
|
|
struct route_node *nrn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-09-24 22:36:27 +02:00
|
|
|
rnh_table = get_rnh_table(zvrf->vrf->vrf_id, afi, safi);
|
2015-12-09 20:01:21 +01:00
|
|
|
if (!rnh_table) // unexpected
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (p) {
|
|
|
|
/* Evaluating a specific entry, make sure it exists. */
|
|
|
|
nrn = route_node_lookup(rnh_table, p);
|
|
|
|
if (nrn && nrn->info)
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_rnh_evaluate_entry(zvrf, afi, force, nrn);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-12-09 20:01:21 +01:00
|
|
|
if (nrn)
|
|
|
|
route_unlock_node(nrn);
|
|
|
|
} else {
|
|
|
|
/* Evaluate entire table. */
|
|
|
|
nrn = route_top(rnh_table);
|
|
|
|
while (nrn) {
|
|
|
|
if (nrn->info)
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_rnh_evaluate_entry(zvrf, afi, force, nrn);
|
2015-12-09 20:01:21 +01:00
|
|
|
nrn = route_next(nrn); /* this will also unlock nrn */
|
|
|
|
}
|
2017-03-07 21:13:04 +01:00
|
|
|
nrn = route_top(rnh_table);
|
|
|
|
while (nrn) {
|
|
|
|
if (nrn->info)
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_rnh_clear_nhc_flag(zvrf, afi, nrn);
|
2017-03-07 21:13:04 +01:00
|
|
|
nrn = route_next(nrn); /* this will also unlock nrn */
|
|
|
|
}
|
2015-12-09 20:01:21 +01:00
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2021-10-21 15:17:41 +02:00
|
|
|
void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, safi_t safi,
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
struct vty *vty, const struct prefix *p,
|
|
|
|
json_object *json)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct route_table *table;
|
|
|
|
struct route_node *rn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-10-21 15:17:41 +02:00
|
|
|
table = get_rnh_table(vrfid, afi, safi);
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!table) {
|
2019-08-23 14:28:43 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT)
|
|
|
|
zlog_debug("print_rnhs: rnh table not found");
|
2015-05-20 02:40:34 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-04-04 20:28:31 +02:00
|
|
|
for (rn = route_top(table); rn; rn = route_next(rn)) {
|
2019-07-31 17:07:58 +02:00
|
|
|
if (p && !prefix_match(&rn->p, p))
|
2019-04-04 20:28:31 +02:00
|
|
|
continue;
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (rn->info)
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
print_rnh(rn, vty, json);
|
2019-04-04 20:28:31 +02:00
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-06-01 13:26:25 +02:00
|
|
|
* free_state - free up the re structure associated with the rnh.
|
2015-05-20 02:40:34 +02:00
|
|
|
*/
|
2017-06-01 13:26:25 +02:00
|
|
|
static void free_state(vrf_id_t vrf_id, struct route_entry *re,
|
|
|
|
struct route_node *rn)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!re)
|
2015-05-20 02:40:34 +02:00
|
|
|
return;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
/* free RE and nexthops */
|
2019-11-22 21:30:53 +01:00
|
|
|
zebra_nhg_free(re->nhe);
|
2017-06-01 13:26:25 +02:00
|
|
|
XFREE(MTYPE_RE, re);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2020-02-25 14:29:46 +01:00
|
|
|
static void copy_state(struct rnh *rnh, const struct route_entry *re,
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_node *rn)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *state;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
|
|
|
if (rnh->state) {
|
2016-02-26 20:13:34 +01:00
|
|
|
free_state(rnh->vrf_id, rnh->state, rn);
|
2015-05-20 02:40:34 +02:00
|
|
|
rnh->state = NULL;
|
|
|
|
}
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
if (!re)
|
2015-05-20 02:40:34 +02:00
|
|
|
return;
|
|
|
|
|
2017-06-01 13:26:25 +02:00
|
|
|
state = XCALLOC(MTYPE_RE, sizeof(struct route_entry));
|
|
|
|
state->type = re->type;
|
2017-07-25 12:17:50 +02:00
|
|
|
state->distance = re->distance;
|
2017-06-01 13:26:25 +02:00
|
|
|
state->metric = re->metric;
|
2018-01-12 15:20:30 +01:00
|
|
|
state->vrf_id = re->vrf_id;
|
2019-01-14 22:37:53 +01:00
|
|
|
state->status = re->status;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2020-05-27 18:53:20 +02:00
|
|
|
state->nhe = zebra_nhe_copy(re->nhe, 0);
|
|
|
|
|
|
|
|
/* Copy the 'fib' nexthops also, if present - we want to capture
|
|
|
|
* the true installed nexthops.
|
|
|
|
*/
|
|
|
|
if (re->fib_ng.nexthop)
|
|
|
|
nexthop_group_copy(&state->fib_ng, &re->fib_ng);
|
|
|
|
if (re->fib_backup_ng.nexthop)
|
|
|
|
nexthop_group_copy(&state->fib_backup_ng, &re->fib_backup_ng);
|
2019-11-22 21:30:53 +01:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
rnh->state = state;
|
|
|
|
}
|
|
|
|
|
2020-05-27 18:53:20 +02:00
|
|
|
/*
|
2021-03-01 16:49:32 +01:00
|
|
|
* Locate the next primary nexthop, used when comparing current rnh info with
|
|
|
|
* an updated route.
|
|
|
|
*/
|
|
|
|
static struct nexthop *next_valid_primary_nh(struct route_entry *re,
|
|
|
|
struct nexthop *nh)
|
|
|
|
{
|
|
|
|
struct nexthop_group *nhg;
|
|
|
|
struct nexthop *bnh;
|
|
|
|
int i, idx;
|
|
|
|
bool default_path = true;
|
|
|
|
|
|
|
|
/* Fib backup ng present: some backups are installed,
|
|
|
|
* and we're configured for special handling if there are backups.
|
|
|
|
*/
|
|
|
|
if (rnh_hide_backups && (re->fib_backup_ng.nexthop != NULL))
|
|
|
|
default_path = false;
|
|
|
|
|
|
|
|
/* Default path: no special handling, just using the 'installed'
|
|
|
|
* primary nexthops and the common validity test.
|
|
|
|
*/
|
|
|
|
if (default_path) {
|
|
|
|
if (nh == NULL) {
|
|
|
|
nhg = rib_get_fib_nhg(re);
|
|
|
|
nh = nhg->nexthop;
|
|
|
|
} else
|
|
|
|
nh = nexthop_next(nh);
|
|
|
|
|
|
|
|
while (nh) {
|
|
|
|
if (rnh_nexthop_valid(re, nh))
|
|
|
|
break;
|
|
|
|
else
|
|
|
|
nh = nexthop_next(nh);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nh;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hide backup activation/switchover events.
|
|
|
|
*
|
|
|
|
* If we've had a switchover, an inactive primary won't be in
|
|
|
|
* the fib list at all - the 'fib' list could even be empty
|
|
|
|
* in the case where no primary is installed. But we want to consider
|
|
|
|
* those primaries "valid" if they have an activated backup nh.
|
|
|
|
*
|
|
|
|
* The logic is something like:
|
|
|
|
* if (!fib_nhg)
|
|
|
|
* // then all primaries are installed
|
|
|
|
* else
|
|
|
|
* for each primary in re nhg
|
|
|
|
* if in fib_nhg
|
|
|
|
* primary is installed
|
|
|
|
* else if a backup is installed
|
|
|
|
* primary counts as installed
|
|
|
|
* else
|
|
|
|
* primary !installed
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Start with the first primary */
|
|
|
|
if (nh == NULL)
|
|
|
|
nh = re->nhe->nhg.nexthop;
|
|
|
|
else
|
|
|
|
nh = nexthop_next(nh);
|
|
|
|
|
|
|
|
while (nh) {
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: checking primary NH %pNHv",
|
|
|
|
__func__, nh);
|
|
|
|
|
|
|
|
/* If this nexthop is in the fib list, it's installed */
|
|
|
|
nhg = rib_get_fib_nhg(re);
|
|
|
|
|
|
|
|
for (bnh = nhg->nexthop; bnh; bnh = nexthop_next(bnh)) {
|
|
|
|
if (nexthop_cmp(nh, bnh) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bnh != NULL) {
|
|
|
|
/* Found the match */
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: NH in fib list", __func__);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Else if this nexthop's backup is installed, it counts */
|
|
|
|
nhg = rib_get_fib_backup_nhg(re);
|
|
|
|
bnh = nhg->nexthop;
|
|
|
|
|
|
|
|
for (idx = 0; bnh != NULL; idx++) {
|
|
|
|
/* If we find an active backup nh for this
|
|
|
|
* primary, we're done;
|
|
|
|
*/
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: checking backup %pNHv [%d]",
|
|
|
|
__func__, bnh, idx);
|
|
|
|
|
|
|
|
if (!CHECK_FLAG(bnh->flags, NEXTHOP_FLAG_ACTIVE))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (i = 0; i < nh->backup_num; i++) {
|
|
|
|
/* Found a matching activated backup nh */
|
|
|
|
if (nh->backup_idx[i] == idx) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: backup %d activated",
|
|
|
|
__func__, i);
|
|
|
|
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that we're not recursing here if the
|
|
|
|
* backups are recursive: the primary's index is
|
|
|
|
* only valid in the top-level backup list.
|
|
|
|
*/
|
|
|
|
bnh = bnh->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try the next primary nexthop */
|
|
|
|
nh = nexthop_next(nh);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
|
|
|
return nh;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compare two route_entries' nexthops. Account for backup nexthops
|
|
|
|
* and for the 'fib' nexthop lists, if present.
|
2020-05-27 18:53:20 +02:00
|
|
|
*/
|
|
|
|
static bool compare_valid_nexthops(struct route_entry *r1,
|
|
|
|
struct route_entry *r2)
|
|
|
|
{
|
|
|
|
bool matched_p = false;
|
|
|
|
struct nexthop_group *nhg1, *nhg2;
|
|
|
|
struct nexthop *nh1, *nh2;
|
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
/* Start with the primary nexthops */
|
2020-05-27 18:53:20 +02:00
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
nh1 = next_valid_primary_nh(r1, NULL);
|
|
|
|
nh2 = next_valid_primary_nh(r2, NULL);
|
2020-05-27 18:53:20 +02:00
|
|
|
|
|
|
|
while (1) {
|
2021-03-01 16:49:32 +01:00
|
|
|
/* Find any differences in the nexthop lists */
|
2020-05-27 18:53:20 +02:00
|
|
|
|
|
|
|
if (nh1 && nh2) {
|
|
|
|
/* Any difference is a no-match */
|
|
|
|
if (nexthop_cmp(nh1, nh2) != 0) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
2021-03-01 16:49:32 +01:00
|
|
|
zlog_debug("%s: nh1: %pNHv, nh2: %pNHv differ",
|
|
|
|
__func__, nh1, nh2);
|
2020-05-27 18:53:20 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (nh1 || nh2) {
|
|
|
|
/* One list has more valid nexthops than the other */
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: nh1 %s, nh2 %s", __func__,
|
|
|
|
nh1 ? "non-NULL" : "NULL",
|
|
|
|
nh2 ? "non-NULL" : "NULL");
|
|
|
|
goto done;
|
|
|
|
} else
|
|
|
|
break; /* Done with both lists */
|
2021-03-01 16:49:32 +01:00
|
|
|
|
|
|
|
nh1 = next_valid_primary_nh(r1, nh1);
|
|
|
|
nh2 = next_valid_primary_nh(r2, nh2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If configured, don't compare installed backup state - we've
|
|
|
|
* accounted for that with the primaries above.
|
|
|
|
*
|
|
|
|
* But we do want to compare the routes' backup info,
|
|
|
|
* in case the owning route has changed the backups -
|
|
|
|
* that change we do want to report.
|
|
|
|
*/
|
|
|
|
if (rnh_hide_backups) {
|
|
|
|
uint32_t hash1 = 0, hash2 = 0;
|
|
|
|
|
|
|
|
if (r1->nhe->backup_info)
|
|
|
|
hash1 = nexthop_group_hash(
|
|
|
|
&r1->nhe->backup_info->nhe->nhg);
|
|
|
|
|
|
|
|
if (r2->nhe->backup_info)
|
|
|
|
hash2 = nexthop_group_hash(
|
|
|
|
&r2->nhe->backup_info->nhe->nhg);
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: backup hash1 %#x, hash2 %#x",
|
|
|
|
__func__, hash1, hash2);
|
|
|
|
|
|
|
|
if (hash1 != hash2)
|
|
|
|
goto done;
|
|
|
|
else
|
|
|
|
goto finished;
|
2020-05-27 18:53:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The test for the backups is slightly different: the only installed
|
|
|
|
* backups will be in the 'fib' list.
|
|
|
|
*/
|
|
|
|
nhg1 = rib_get_fib_backup_nhg(r1);
|
|
|
|
nhg2 = rib_get_fib_backup_nhg(r2);
|
|
|
|
|
2020-07-17 19:10:29 +02:00
|
|
|
nh1 = nhg1->nexthop;
|
|
|
|
nh2 = nhg2->nexthop;
|
2020-05-27 18:53:20 +02:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
/* Find each backup list's next valid nexthop */
|
|
|
|
while ((nh1 != NULL) && !rnh_nexthop_valid(r1, nh1))
|
|
|
|
nh1 = nexthop_next(nh1);
|
|
|
|
|
|
|
|
while ((nh2 != NULL) && !rnh_nexthop_valid(r2, nh2))
|
|
|
|
nh2 = nexthop_next(nh2);
|
|
|
|
|
|
|
|
if (nh1 && nh2) {
|
|
|
|
/* Any difference is a no-match */
|
|
|
|
if (nexthop_cmp(nh1, nh2) != 0) {
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
2021-03-01 16:49:32 +01:00
|
|
|
zlog_debug("%s: backup nh1: %pNHv, nh2: %pNHv differ",
|
|
|
|
__func__, nh1, nh2);
|
2020-05-27 18:53:20 +02:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
nh1 = nexthop_next(nh1);
|
|
|
|
nh2 = nexthop_next(nh2);
|
|
|
|
} else if (nh1 || nh2) {
|
|
|
|
/* One list has more valid nexthops than the other */
|
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: backup nh1 %s, nh2 %s",
|
|
|
|
__func__,
|
|
|
|
nh1 ? "non-NULL" : "NULL",
|
|
|
|
nh2 ? "non-NULL" : "NULL");
|
|
|
|
goto done;
|
|
|
|
} else
|
|
|
|
break; /* Done with both lists */
|
|
|
|
}
|
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
finished:
|
2020-05-27 18:53:20 +02:00
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
/* Well, it's a match */
|
2020-05-27 18:53:20 +02:00
|
|
|
matched_p = true;
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
|
|
|
|
zlog_debug("%s: %smatched",
|
|
|
|
__func__, (matched_p ? "" : "NOT "));
|
|
|
|
|
2020-05-27 18:53:20 +02:00
|
|
|
return matched_p;
|
|
|
|
}
|
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
/* Returns 'false' if no difference. */
|
|
|
|
static bool compare_state(struct route_entry *r1,
|
|
|
|
struct route_entry *r2)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
if (!r1 && !r2)
|
2021-03-01 16:49:32 +01:00
|
|
|
return false;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
|
|
|
if ((!r1 && r2) || (r1 && !r2))
|
2021-03-01 16:49:32 +01:00
|
|
|
return true;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2017-07-25 12:17:50 +02:00
|
|
|
if (r1->distance != r2->distance)
|
2021-03-01 16:49:32 +01:00
|
|
|
return true;
|
2017-07-25 12:17:50 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (r1->metric != r2->metric)
|
2021-03-01 16:49:32 +01:00
|
|
|
return true;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2020-05-27 18:53:20 +02:00
|
|
|
if (!compare_valid_nexthops(r1, r2))
|
2021-03-01 16:49:32 +01:00
|
|
|
return true;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2021-03-01 16:49:32 +01:00
|
|
|
return false;
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2020-07-20 13:43:54 +02:00
|
|
|
int zebra_send_rnh_update(struct rnh *rnh, struct zserv *client,
|
2021-09-24 17:04:03 +02:00
|
|
|
vrf_id_t vrf_id, uint32_t srte_color)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2020-06-08 13:58:19 +02:00
|
|
|
struct stream *s = NULL;
|
2017-06-01 13:26:25 +02:00
|
|
|
struct route_entry *re;
|
2015-05-20 02:40:34 +02:00
|
|
|
unsigned long nump;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t num;
|
2018-03-16 15:53:58 +01:00
|
|
|
struct nexthop *nh;
|
2015-05-20 02:40:34 +02:00
|
|
|
struct route_node *rn;
|
2020-06-08 13:58:19 +02:00
|
|
|
int ret;
|
2020-07-20 13:43:54 +02:00
|
|
|
uint32_t message = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
rn = rnh->node;
|
2015-05-20 03:04:20 +02:00
|
|
|
re = rnh->state;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
/* Get output stream. */
|
2018-03-07 00:08:37 +01:00
|
|
|
s = stream_new(ZEBRA_MAX_PACKET_SIZ);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-09-24 16:32:30 +02:00
|
|
|
zclient_create_header(s, ZEBRA_NEXTHOP_UPDATE, vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-07-20 13:43:54 +02:00
|
|
|
/* Message flags. */
|
|
|
|
if (srte_color)
|
|
|
|
SET_FLAG(message, ZAPI_MESSAGE_SRTE);
|
|
|
|
stream_putl(s, message);
|
|
|
|
|
2022-03-12 16:47:16 +01:00
|
|
|
/*
|
|
|
|
* Put what we were told to match against
|
|
|
|
*/
|
2021-09-24 22:58:27 +02:00
|
|
|
stream_putw(s, rnh->safi);
|
2017-06-01 13:26:25 +02:00
|
|
|
stream_putw(s, rn->p.family);
|
2022-03-12 17:05:23 +01:00
|
|
|
stream_putc(s, rn->p.prefixlen);
|
2015-05-20 02:47:20 +02:00
|
|
|
switch (rn->p.family) {
|
|
|
|
case AF_INET:
|
2015-05-20 02:40:34 +02:00
|
|
|
stream_put_in_addr(s, &rn->p.u.prefix4);
|
|
|
|
break;
|
2016-09-02 16:32:14 +02:00
|
|
|
case AF_INET6:
|
2015-05-20 03:04:20 +02:00
|
|
|
stream_put(s, &rn->p.u.prefix6, IPV6_MAX_BYTELEN);
|
2015-05-20 02:40:34 +02:00
|
|
|
break;
|
2016-09-02 16:32:14 +02:00
|
|
|
default:
|
2018-09-13 21:21:05 +02:00
|
|
|
flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY,
|
2021-02-14 15:35:07 +01:00
|
|
|
"%s: Unknown family (%d) notification attempted",
|
2020-03-17 13:57:42 +01:00
|
|
|
__func__, rn->p.family);
|
2020-06-08 13:58:19 +02:00
|
|
|
goto failure;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2022-03-12 16:47:16 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* What we matched against
|
|
|
|
*/
|
|
|
|
stream_putw(s, rnh->resolved_route.family);
|
|
|
|
stream_putc(s, rnh->resolved_route.prefixlen);
|
|
|
|
switch (rnh->resolved_route.family) {
|
|
|
|
case AF_INET:
|
|
|
|
stream_put_in_addr(s, &rnh->resolved_route.u.prefix4);
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
stream_put(s, &rnh->resolved_route.u.prefix6, IPV6_MAX_BYTELEN);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY,
|
|
|
|
"%s: Unknown family (%d) notification attempted",
|
|
|
|
__func__, rn->p.family);
|
|
|
|
goto failure;
|
|
|
|
}
|
|
|
|
|
2020-07-20 13:43:54 +02:00
|
|
|
if (srte_color)
|
|
|
|
stream_putl(s, srte_color);
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (re) {
|
2019-11-13 22:06:06 +01:00
|
|
|
struct zapi_nexthop znh;
|
2020-05-27 18:53:20 +02:00
|
|
|
struct nexthop_group *nhg;
|
2019-11-13 22:06:06 +01:00
|
|
|
|
2018-03-01 01:22:47 +01:00
|
|
|
stream_putc(s, re->type);
|
|
|
|
stream_putw(s, re->instance);
|
2017-06-01 13:26:25 +02:00
|
|
|
stream_putc(s, re->distance);
|
2015-05-20 03:04:20 +02:00
|
|
|
stream_putl(s, re->metric);
|
2017-07-17 14:03:14 +02:00
|
|
|
num = 0;
|
2015-05-20 02:40:34 +02:00
|
|
|
nump = stream_get_endp(s);
|
|
|
|
stream_putc(s, 0);
|
2020-05-27 18:53:20 +02:00
|
|
|
|
|
|
|
nhg = rib_get_fib_nhg(re);
|
|
|
|
for (ALL_NEXTHOPS_PTR(nhg, nh))
|
2019-01-14 22:37:53 +01:00
|
|
|
if (rnh_nexthop_valid(re, nh)) {
|
2019-11-13 22:06:06 +01:00
|
|
|
zapi_nexthop_from_nexthop(&znh, nh);
|
2020-07-20 13:43:54 +02:00
|
|
|
ret = zapi_nexthop_encode(s, &znh, 0, message);
|
2020-06-08 13:58:19 +02:00
|
|
|
if (ret < 0)
|
|
|
|
goto failure;
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
num++;
|
|
|
|
}
|
2020-05-27 18:53:20 +02:00
|
|
|
|
|
|
|
nhg = rib_get_fib_backup_nhg(re);
|
|
|
|
if (nhg) {
|
|
|
|
for (ALL_NEXTHOPS_PTR(nhg, nh))
|
|
|
|
if (rnh_nexthop_valid(re, nh)) {
|
|
|
|
zapi_nexthop_from_nexthop(&znh, nh);
|
2020-07-16 17:00:17 +02:00
|
|
|
ret = zapi_nexthop_encode(
|
2020-07-20 13:43:54 +02:00
|
|
|
s, &znh, 0 /* flags */,
|
|
|
|
0 /* message */);
|
2020-07-16 17:00:17 +02:00
|
|
|
if (ret < 0)
|
|
|
|
goto failure;
|
|
|
|
|
2020-05-27 18:53:20 +02:00
|
|
|
num++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
stream_putc_at(s, nump, num);
|
|
|
|
} else {
|
2018-03-01 01:22:47 +01:00
|
|
|
stream_putc(s, 0); // type
|
|
|
|
stream_putw(s, 0); // instance
|
2016-12-09 18:05:29 +01:00
|
|
|
stream_putc(s, 0); // distance
|
|
|
|
stream_putl(s, 0); // metric
|
|
|
|
stream_putc(s, 0); // nexthops
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-01-18 01:30:43 +01:00
|
|
|
client->nh_last_upd_time = monotime(NULL);
|
2018-04-24 00:35:35 +02:00
|
|
|
return zserv_send_message(client, s);
|
2020-06-08 13:58:19 +02:00
|
|
|
|
|
|
|
failure:
|
2020-06-10 20:57:30 +02:00
|
|
|
|
|
|
|
stream_free(s);
|
2020-06-08 13:58:19 +02:00
|
|
|
return -1;
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Render a nexthop into a json object; the caller allocates and owns
|
|
|
|
* the json object memory.
|
|
|
|
*/
|
|
|
|
void show_nexthop_json_helper(json_object *json_nexthop,
|
|
|
|
const struct nexthop *nexthop,
|
2024-05-14 16:28:17 +02:00
|
|
|
const struct route_node *rn,
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
const struct route_entry *re)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2024-01-03 21:33:58 +01:00
|
|
|
bool display_vrfid = false;
|
|
|
|
uint8_t rn_family;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
if (re == NULL || nexthop->vrf_id != re->vrf_id)
|
|
|
|
display_vrfid = true;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
if (rn)
|
|
|
|
rn_family = rn->p.family;
|
|
|
|
else
|
|
|
|
rn_family = AF_UNSPEC;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
nexthop_json_helper(json_nexthop, nexthop, display_vrfid, rn_family);
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper for nexthop output, used in the 'show ip route' path
|
|
|
|
*/
|
2024-05-14 16:28:17 +02:00
|
|
|
void show_route_nexthop_helper(struct vty *vty, const struct route_node *rn,
|
|
|
|
const struct route_entry *re,
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
const struct nexthop *nexthop)
|
|
|
|
{
|
2024-01-03 21:33:58 +01:00
|
|
|
bool display_vrfid = false;
|
|
|
|
uint8_t rn_family;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
if (re == NULL || nexthop->vrf_id != re->vrf_id)
|
|
|
|
display_vrfid = true;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
if (rn)
|
|
|
|
rn_family = rn->p.family;
|
|
|
|
else
|
|
|
|
rn_family = AF_UNSPEC;
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
|
2024-01-03 21:33:58 +01:00
|
|
|
nexthop_vty_helper(vty, nexthop, display_vrfid, rn_family);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
static void print_rnh(struct route_node *rn, struct vty *vty, json_object *json)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct rnh *rnh;
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
struct listnode *node;
|
|
|
|
struct zserv *client;
|
|
|
|
char buf[BUFSIZ];
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
json_object *json_nht = NULL;
|
|
|
|
json_object *json_client_array = NULL;
|
|
|
|
json_object *json_client = NULL;
|
|
|
|
json_object *json_nexthop_array = NULL;
|
|
|
|
json_object *json_nexthop = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
rnh = rn->info;
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
|
|
|
|
if (json) {
|
|
|
|
json_nht = json_object_new_object();
|
|
|
|
json_nexthop_array = json_object_new_array();
|
|
|
|
json_client_array = json_object_new_array();
|
|
|
|
|
|
|
|
json_object_object_add(
|
|
|
|
json,
|
|
|
|
inet_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ),
|
|
|
|
json_nht);
|
|
|
|
json_object_boolean_add(
|
|
|
|
json_nht, "nhtConnected",
|
|
|
|
CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED));
|
|
|
|
json_object_object_add(json_nht, "clientList",
|
|
|
|
json_client_array);
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
json_object_object_add(json_nht, "nexthops",
|
|
|
|
json_nexthop_array);
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
} else {
|
|
|
|
vty_out(vty, "%s%s\n",
|
|
|
|
inet_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ),
|
2017-07-13 19:42:42 +02:00
|
|
|
CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED)
|
|
|
|
? "(Connected)"
|
|
|
|
: "");
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rnh->state) {
|
|
|
|
if (json)
|
|
|
|
json_object_string_add(
|
|
|
|
json_nht, "resolvedProtocol",
|
|
|
|
zebra_route_string(rnh->state->type));
|
|
|
|
else
|
|
|
|
vty_out(vty, " resolved via %s\n",
|
|
|
|
zebra_route_string(rnh->state->type));
|
|
|
|
|
|
|
|
for (nexthop = rnh->state->nhe->nhg.nexthop; nexthop;
|
|
|
|
nexthop = nexthop->next) {
|
|
|
|
if (json) {
|
|
|
|
json_nexthop = json_object_new_object();
|
|
|
|
json_object_array_add(json_nexthop_array,
|
|
|
|
json_nexthop);
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
show_nexthop_json_helper(json_nexthop, nexthop,
|
2024-05-14 16:28:17 +02:00
|
|
|
rn, NULL);
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
} else {
|
2024-05-14 16:28:17 +02:00
|
|
|
show_route_nexthop_helper(vty, rn, NULL,
|
|
|
|
nexthop);
|
zebra: fix JSON fields for "show ip/ipv6 nht"
1. Renamed "gates" to "nexthops"
2. Displaying afi of the nexthops being dispalyed in place of
"nexthops" JSON object in the old JSON output
3. Calling show_route_nexthop_helper() and show_nexthop_json_helper()
instead of print_nh() inorder to keeps the fields in "nexthops"
JSON object in sync with "nexthops" JSON object of
"show nexthop-group rib json".
Updated vtysh:
r1# show ip nht
192.168.0.2
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
192.168.0.4
resolved via connected
is directly connected, r1-eth0 (vrf default)
Client list: static(fd 28)
Updated JSON:
r1# show ip nht json
{
"default":{
"ipv4":{
"192.168.0.2":{
"nhtConnected":false,
"clientList":[
{
"protocol":"static",
"socket":28,
"protocolFiltered":"none"
}
],
"nexthops":[
{
"flags":3,
"fib":true,
"directlyConnected":true,
"interfaceIndex":2,
"interfaceName":"r1-eth0",
"vrf":"default",
"active":true
}
],
"resolvedProtocol":"connected"
}
}
}
}
Signed-off-by: Pooja Jagadeesh Doijode <pdoijode@nvidia.com>
2023-01-25 03:15:36 +01:00
|
|
|
vty_out(vty, "\n");
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (json)
|
|
|
|
json_object_boolean_add(
|
|
|
|
json_nht, "unresolved",
|
|
|
|
CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED));
|
|
|
|
else
|
|
|
|
vty_out(vty, " unresolved%s\n",
|
|
|
|
CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED)
|
|
|
|
? "(Connected)"
|
|
|
|
: "");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!json)
|
|
|
|
vty_out(vty, " Client list:");
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(rnh->client_list, node, client)) {
|
|
|
|
if (json) {
|
|
|
|
json_client = json_object_new_object();
|
|
|
|
json_object_array_add(json_client_array, json_client);
|
|
|
|
|
|
|
|
json_object_string_add(
|
|
|
|
json_client, "protocol",
|
|
|
|
zebra_route_string(client->proto));
|
|
|
|
json_object_int_add(json_client, "socket",
|
|
|
|
client->sock);
|
|
|
|
json_object_string_add(json_client, "protocolFiltered",
|
|
|
|
(rnh->filtered[client->proto]
|
|
|
|
? "(filtered)"
|
|
|
|
: "none"));
|
|
|
|
} else {
|
|
|
|
vty_out(vty, " %s(fd %d)%s",
|
|
|
|
zebra_route_string(client->proto), client->sock,
|
|
|
|
rnh->filtered[client->proto] ? "(filtered)"
|
|
|
|
: "");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_isempty(rnh->zebra_pseudowire_list)) {
|
|
|
|
if (json)
|
|
|
|
json_object_boolean_true_add(json_nht,
|
|
|
|
"zebraPseudowires");
|
|
|
|
else
|
|
|
|
vty_out(vty, " zebra[pseudowires]");
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
zebra: json support for show ip nht
show ip/ipv6 nht vrf <all | name> json support added.
Commands enhanced with JSON:
----------------------------
show ip nht json
show ip nht <addr> json
show ipv6 nht json
show ipv6 nht <addr> json
show ip nht vrf <name> json
show ip nht vrf all json
show ipv6 nht vrf <name> json
show ipv6 nht vrf all json
show ip nht vrf default <addr> json
show ipv6 nht vrf default <addr> json
Sample JSON output:
-------------------
tor-1# show ip nht vrf default json
{
"default":{
"nexthops":{
"27.0.0.5":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
},
"27.0.0.6":{
"nhtConnected":false,
"clientList":[
{
"protocol":"bgp",
"socket":70,
"protocolFiltered":"none"
}
],
"gates":[
{
"ip":"fe80::202:ff:fe00:2b",
"interface":"uplink_1"
},
{
"ip":"fe80::202:ff:fe00:35",
"interface":"uplink_2"
}
],
"resolvedProtocol":"bgp"
}
}
}
}
tor-1# show ipv6 nht vrf default json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
}
}
tor-1# show ipv6 nht vrf all json
{
"default": {
"nexthops": {
"fe80::202:ff:fe00:25": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
},
"fe80::202:ff:fe00:2b": {
"nhtConnected": true,
"clientList": [
{
"protocol": "bgp",
"socket": 45,
"protocolFiltered": "none"
}
],
"gates": [
{
"interface": "swp1",
"directlyConnected": true
}
],
"resolvedProtocol": "connected"
}
}
},
"mgmt": {
"nexthops": {}
},
"sym_1": {
"nexthops": {}
}
}
Ticket:#3229013
Issue:3229013
Testing Done: Unit test completed.
Signed-off-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Sindhu Parvathi Gopinathan <sgopinathan@nvidia.com>
2022-11-15 04:33:56 +01:00
|
|
|
if (!json)
|
|
|
|
vty_out(vty, "\n");
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
2018-04-22 22:01:20 +02:00
|
|
|
|
2021-10-21 15:17:12 +02:00
|
|
|
static int zebra_cleanup_rnh_client(vrf_id_t vrf_id, afi_t afi, safi_t safi,
|
2021-09-24 17:04:03 +02:00
|
|
|
struct zserv *client)
|
2018-04-22 22:01:20 +02:00
|
|
|
{
|
|
|
|
struct route_table *ntable;
|
|
|
|
struct route_node *nrn;
|
|
|
|
struct rnh *rnh;
|
|
|
|
|
2020-09-30 16:42:50 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_NHT) {
|
|
|
|
struct vrf *vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
|
2021-09-24 17:04:03 +02:00
|
|
|
zlog_debug("%s(%u): Client %s RNH cleanup for family %s",
|
|
|
|
VRF_LOGNAME(vrf), vrf_id,
|
|
|
|
zebra_route_string(client->proto), afi2str(afi));
|
2020-09-30 16:42:50 +02:00
|
|
|
}
|
2018-04-22 22:01:20 +02:00
|
|
|
|
2021-10-21 15:17:12 +02:00
|
|
|
ntable = get_rnh_table(vrf_id, afi, safi);
|
2018-04-22 22:01:20 +02:00
|
|
|
if (!ntable) {
|
2019-03-14 19:41:15 +01:00
|
|
|
zlog_debug("cleanup_rnh_client: rnh table not found");
|
2018-04-22 22:01:20 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (nrn = route_top(ntable); nrn; nrn = route_next(nrn)) {
|
|
|
|
if (!nrn->info)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rnh = nrn->info;
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_remove_rnh_client(rnh, client);
|
2018-04-22 22:01:20 +02:00
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cleanup registered nexthops (across VRFs) upon client disconnect. */
|
2018-04-22 23:03:52 +02:00
|
|
|
static int zebra_client_cleanup_rnh(struct zserv *client)
|
2018-04-22 22:01:20 +02:00
|
|
|
{
|
|
|
|
struct vrf *vrf;
|
|
|
|
struct zebra_vrf *zvrf;
|
|
|
|
|
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
|
2018-04-23 04:32:49 +02:00
|
|
|
zvrf = vrf->info;
|
|
|
|
if (zvrf) {
|
2021-10-21 15:17:12 +02:00
|
|
|
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP,
|
|
|
|
SAFI_UNICAST, client);
|
|
|
|
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP,
|
|
|
|
SAFI_MULTICAST, client);
|
2021-09-24 17:04:03 +02:00
|
|
|
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP6,
|
2021-10-21 15:17:12 +02:00
|
|
|
SAFI_UNICAST, client);
|
|
|
|
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP6,
|
|
|
|
SAFI_MULTICAST, client);
|
2018-04-22 22:01:20 +02:00
|
|
|
}
|
|
|
|
}
|
2018-04-22 23:03:52 +02:00
|
|
|
|
|
|
|
return 0;
|
2018-04-22 22:01:20 +02:00
|
|
|
}
|
2019-08-28 16:01:38 +02:00
|
|
|
|
|
|
|
int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family)
|
|
|
|
{
|
|
|
|
if (((family == AF_INET) && zvrf->zebra_rnh_ip_default_route)
|
|
|
|
|| ((family == AF_INET6) && zvrf->zebra_rnh_ipv6_default_route))
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
2021-03-01 16:48:05 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* UI control to avoid notifications if backup nexthop status changes
|
|
|
|
*/
|
|
|
|
void rnh_set_hide_backups(bool hide_p)
|
|
|
|
{
|
|
|
|
rnh_hide_backups = hide_p;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool rnh_get_hide_backups(void)
|
|
|
|
{
|
|
|
|
return rnh_hide_backups;
|
|
|
|
}
|