2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-09-30 14:41:18 +02:00
|
|
|
/*
|
|
|
|
* PIM for Quagga
|
|
|
|
* Copyright (C) 2015 Cumulus Networks, Inc.
|
|
|
|
* Donald Sharp
|
|
|
|
*/
|
|
|
|
#include <zebra.h>
|
|
|
|
|
2016-09-14 17:12:13 +02:00
|
|
|
#include "lib/json.h"
|
2015-10-02 18:30:02 +02:00
|
|
|
#include "log.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
#include "network.h"
|
2016-06-18 02:43:21 +02:00
|
|
|
#include "if.h"
|
2016-08-05 19:08:06 +02:00
|
|
|
#include "linklist.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "memory.h"
|
2016-08-23 22:22:14 +02:00
|
|
|
#include "vty.h"
|
|
|
|
#include "vrf.h"
|
2016-09-13 21:41:33 +02:00
|
|
|
#include "plist.h"
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
#include "nexthop.h"
|
2017-08-31 15:08:12 +02:00
|
|
|
#include "table.h"
|
2018-06-18 16:17:36 +02:00
|
|
|
#include "lib_errors.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
|
|
|
|
#include "pimd.h"
|
2022-05-06 12:36:51 +02:00
|
|
|
#include "pim_instance.h"
|
2016-08-05 15:07:46 +02:00
|
|
|
#include "pim_vty.h"
|
2015-10-02 18:30:02 +02:00
|
|
|
#include "pim_str.h"
|
2016-11-18 18:12:27 +01:00
|
|
|
#include "pim_iface.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
#include "pim_rp.h"
|
2015-10-28 15:00:31 +01:00
|
|
|
#include "pim_rpf.h"
|
2016-07-28 03:17:54 +02:00
|
|
|
#include "pim_sock.h"
|
2016-08-05 19:08:06 +02:00
|
|
|
#include "pim_memory.h"
|
2021-08-19 11:36:16 +02:00
|
|
|
#include "pim_neighbor.h"
|
2016-11-08 19:34:31 +01:00
|
|
|
#include "pim_msdp.h"
|
2017-02-22 16:28:36 +01:00
|
|
|
#include "pim_nht.h"
|
2019-02-22 17:42:19 +01:00
|
|
|
#include "pim_mroute.h"
|
|
|
|
#include "pim_oil.h"
|
|
|
|
#include "pim_zebra.h"
|
2019-05-02 18:23:48 +02:00
|
|
|
#include "pim_bsm.h"
|
2022-02-17 12:56:20 +01:00
|
|
|
#include "pim_util.h"
|
2022-03-02 12:30:51 +01:00
|
|
|
#include "pim_ssm.h"
|
2022-08-16 17:21:50 +02:00
|
|
|
#include "termtable.h"
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-19 00:51:31 +02:00
|
|
|
/* Cleanup pim->rpf_hash each node data */
|
|
|
|
void pim_rp_list_hash_clean(void *data)
|
|
|
|
{
|
2017-07-13 00:17:31 +02:00
|
|
|
struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
|
2017-05-19 00:51:31 +02:00
|
|
|
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&pnc->rp_list);
|
2017-07-14 19:29:47 +02:00
|
|
|
|
2023-03-21 13:54:21 +01:00
|
|
|
hash_clean_and_free(&pnc->upstream_hash, NULL);
|
2018-08-16 15:41:01 +02:00
|
|
|
if (pnc->nexthop)
|
|
|
|
nexthops_free(pnc->nexthop);
|
2017-07-14 19:29:47 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
|
2017-05-19 00:51:31 +02:00
|
|
|
}
|
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
static void pim_rp_info_free(struct rp_info *rp_info)
|
|
|
|
{
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
|
2018-08-03 02:02:13 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pim_rp_list_cmp(void *v1, void *v2)
|
|
|
|
{
|
|
|
|
struct rp_info *rp1 = (struct rp_info *)v1;
|
|
|
|
struct rp_info *rp2 = (struct rp_info *)v2;
|
2022-03-10 14:08:25 +01:00
|
|
|
int ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Sort by RP IP address
|
|
|
|
*/
|
2022-04-27 10:20:38 +02:00
|
|
|
ret = pim_addr_cmp(rp1->rp.rpf_addr, rp2->rp.rpf_addr);
|
2022-03-10 14:08:25 +01:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Sort by group IP address
|
|
|
|
*/
|
2022-03-10 14:08:25 +01:00
|
|
|
ret = prefix_cmp(&rp1->group, &rp2->group);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-03-31 22:43:36 +02:00
|
|
|
return 0;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_init(struct pim_instance *pim)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
|
|
|
struct rp_info *rp_info;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
pim->rp_list = list_new();
|
|
|
|
pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
|
|
|
|
pim->rp_list->cmp = pim_rp_list_cmp;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-08-31 15:08:12 +02:00
|
|
|
pim->rp_table = route_table_init();
|
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
|
|
|
|
|
2022-02-17 12:56:20 +01:00
|
|
|
if (!pim_get_all_mcast_group(&rp_info->group)) {
|
2018-09-13 21:34:28 +02:00
|
|
|
flog_err(EC_LIB_DEVELOPMENT,
|
2022-03-10 14:08:25 +01:00
|
|
|
"Unable to convert all-multicast prefix");
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&pim->rp_list);
|
2017-08-31 15:08:12 +02:00
|
|
|
route_table_finish(pim->rp_table);
|
2017-05-11 01:54:26 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return;
|
|
|
|
}
|
2022-04-27 10:20:38 +02:00
|
|
|
rp_info->rp.rpf_addr = PIMADDR_ANY;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_add(pim->rp_list, rp_info);
|
2017-08-31 15:08:12 +02:00
|
|
|
|
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
rn->info = rp_info;
|
2019-11-12 14:02:06 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2022-03-10 14:08:25 +01:00
|
|
|
zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
|
|
|
|
rp_info, &rp_info->group,
|
|
|
|
route_node_get_lock_count(rn));
|
2024-10-02 14:22:48 +02:00
|
|
|
|
|
|
|
#if PIM_IPV == 6
|
|
|
|
/*
|
|
|
|
* Embedded RP defaults
|
|
|
|
*/
|
|
|
|
pim->embedded_rp.enable = false;
|
|
|
|
pim->embedded_rp.group_list = NULL;
|
|
|
|
pim->embedded_rp.maximum_rps = PIM_EMBEDDED_RP_MAXIMUM;
|
|
|
|
|
|
|
|
pim->embedded_rp.table = route_table_init();
|
|
|
|
#endif /* PIM_IPV == 6 */
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_free(struct pim_instance *pim)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2024-10-02 14:22:48 +02:00
|
|
|
#if PIM_IPV == 6
|
|
|
|
struct route_node *rn;
|
|
|
|
|
|
|
|
pim_embedded_rp_set_group_list(pim, NULL);
|
|
|
|
|
|
|
|
for (rn = route_top(pim->embedded_rp.table); rn; rn = route_next(rn)) {
|
|
|
|
if (rn->info == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pim_embedded_rp_free(pim, rn->info);
|
|
|
|
rn->info = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
route_table_finish(pim->embedded_rp.table);
|
|
|
|
pim->embedded_rp.table = NULL;
|
|
|
|
#endif /* PIM_IPV == 6 */
|
|
|
|
|
2020-07-16 14:16:39 +02:00
|
|
|
if (pim->rp_table)
|
|
|
|
route_table_finish(pim->rp_table);
|
|
|
|
pim->rp_table = NULL;
|
pimd : memory leak in rp_table cleanup.
Problem Statement:
==================
valgrind shows memleaks in rp_table, when pimd shuts down gracefully.
2020-05-05 22:09:29,451 ERROR: Memory leaks in router [r4] for daemon [pimd]
2020-05-05 22:09:29,451 ERROR: Memory leaks in router [r4] for daemon [zebra]
2020-05-05 22:09:29,637 ERROR: Found memory leak in module pimd
2020-05-05 22:09:29,638 ERROR: ==6178== 184 (56 direct, 128 indirect) bytes in 1 blocks are definitely lost in loss record 21 of 21
2020-05-05 22:09:29,638 ERROR: ==6178== at 0x4C2FFAC: calloc (vg_replace_malloc.c:762)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x4E855EE: qcalloc (memory.c:111)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x4EAA43C: route_table_init_with_delegate (table.c:52)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x1281A1: pim_rp_init (pim_rp.c:114)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x11D0F8: pim_instance_init (pim_instance.c:117)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x11D0F8: pim_vrf_new (pim_instance.c:150)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x4EB1BEC: vrf_get (vrf.c:209)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x4EB2B2F: vrf_init (vrf.c:493)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x11D227: pim_vrf_init (pim_instance.c:217)
2020-05-05 22:09:29,638 ERROR: ==6178== by 0x11BBAB: main (pim_main.c:121)
Fix:
====
rp_info is allocated in pim_rp_init API. rp_info pointer is present
in rp_list and rp_table. In rp_list cleanup, the memory for rp_info
gets freed. rp_table clean up should be done first and then rp_list.
Signed-off-by: Mobashshera Rasool <mrasool@vmware.com>
2021-07-19 13:30:58 +02:00
|
|
|
|
|
|
|
if (pim->rp_list)
|
|
|
|
list_delete(&pim->rp_list);
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Given an RP's prefix-list, return the RP's rp_info for that prefix-list
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
|
2022-02-21 14:23:15 +01:00
|
|
|
pim_addr rp, const char *plist)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-04-27 10:20:38 +02:00
|
|
|
if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
|
2022-02-21 14:23:15 +01:00
|
|
|
rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
|
2016-09-13 21:41:33 +02:00
|
|
|
return rp_info;
|
|
|
|
}
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Return true if plist is used by any rp_info
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given an RP's address, return the RP's rp_info that is an exact match for
|
|
|
|
* 'group'
|
|
|
|
*/
|
2022-02-21 14:47:08 +01:00
|
|
|
static struct rp_info *pim_rp_find_exact(struct pim_instance *pim, pim_addr rp,
|
2018-07-12 22:05:19 +02:00
|
|
|
const struct prefix *group)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-04-27 10:20:38 +02:00
|
|
|
if ((!pim_addr_cmp(rp, rp_info->rp.rpf_addr)) &&
|
2022-02-21 14:47:08 +01:00
|
|
|
prefix_same(&rp_info->group, group))
|
2016-08-05 19:08:06 +02:00
|
|
|
return rp_info;
|
|
|
|
}
|
2015-10-02 18:30:02 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-04-20 06:11:57 +02:00
|
|
|
/*
|
|
|
|
* XXX: long-term issue: we don't actually have a good "ip address-list"
|
|
|
|
* implementation. ("access-list XYZ" is the closest but honestly it's
|
|
|
|
* kinda garbage.)
|
|
|
|
*
|
|
|
|
* So it's using a prefix-list to match an address here, which causes very
|
|
|
|
* unexpected results for the user since prefix-lists by default only match
|
|
|
|
* when the prefix length is an exact match too. i.e. you'd have to add the
|
|
|
|
* "le 32" and do "ip prefix-list foo permit 10.0.0.0/24 le 32"
|
|
|
|
*
|
|
|
|
* To avoid this pitfall, this code uses "address_mode = true" for the prefix
|
|
|
|
* list match (this is the only user for that.)
|
|
|
|
*
|
|
|
|
* In the long run, we need to add a "ip address-list", but that's a wholly
|
|
|
|
* separate bag of worms, and existing configs using ip prefix-list would
|
|
|
|
* drop into the UX pitfall.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "lib/plist_int.h"
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Given a group, return the rp_info for that group
|
|
|
|
*/
|
2019-02-22 17:42:19 +01:00
|
|
|
struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
|
2018-07-12 22:05:19 +02:00
|
|
|
const struct prefix *group)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
2017-08-29 15:46:12 +02:00
|
|
|
struct rp_info *best = NULL;
|
2016-08-05 19:08:06 +02:00
|
|
|
struct rp_info *rp_info;
|
2016-09-13 21:41:33 +02:00
|
|
|
struct prefix_list *plist;
|
2021-04-20 06:11:57 +02:00
|
|
|
const struct prefix *bp;
|
|
|
|
const struct prefix_list_entry *entry;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-10-02 14:22:48 +02:00
|
|
|
#if PIM_IPV == 6
|
|
|
|
/*
|
|
|
|
* Embedded RP search. Always try to match against embedded RP first.
|
|
|
|
*/
|
|
|
|
rn = route_node_match(pim->embedded_rp.table, group);
|
|
|
|
if (rn != NULL) {
|
|
|
|
rp_info = rn->info ? rn->info : NULL;
|
|
|
|
|
|
|
|
if (rp_info && PIM_DEBUG_PIM_TRACE_DETAIL) {
|
|
|
|
zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group, rn, &rp_info->group);
|
|
|
|
}
|
|
|
|
|
|
|
|
route_unlock_node(rn);
|
|
|
|
if (rp_info)
|
|
|
|
return rp_info;
|
|
|
|
}
|
|
|
|
#endif /* PIM_IPV == 6 */
|
|
|
|
|
2017-09-27 13:55:53 +02:00
|
|
|
bp = NULL;
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
if (rp_info->plist) {
|
2022-02-21 14:47:08 +01:00
|
|
|
plist = prefix_list_lookup(PIM_AFI, rp_info->plist);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-04-20 06:11:57 +02:00
|
|
|
if (prefix_list_apply_ext(plist, &entry, group, true)
|
|
|
|
== PREFIX_DENY || !entry)
|
2017-08-29 15:46:12 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!best) {
|
|
|
|
best = rp_info;
|
2021-04-20 06:11:57 +02:00
|
|
|
bp = &entry->prefix;
|
2017-08-29 15:46:12 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-04-20 06:11:57 +02:00
|
|
|
if (bp && bp->prefixlen < entry->prefix.prefixlen) {
|
2017-08-29 15:46:12 +02:00
|
|
|
best = rp_info;
|
2021-04-20 06:11:57 +02:00
|
|
|
bp = &entry->prefix;
|
2017-08-29 15:46:12 +02:00
|
|
|
}
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-31 15:08:12 +02:00
|
|
|
rn = route_node_match(pim->rp_table, group);
|
|
|
|
if (!rn) {
|
2018-08-03 20:03:29 +02:00
|
|
|
flog_err(
|
2018-09-13 21:34:28 +02:00
|
|
|
EC_LIB_DEVELOPMENT,
|
2021-02-14 15:35:07 +01:00
|
|
|
"%s: BUG We should have found default group information",
|
2020-03-06 15:23:22 +01:00
|
|
|
__func__);
|
2017-08-31 15:08:12 +02:00
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
rp_info = rn->info;
|
2024-01-10 22:00:33 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE_DETAIL) {
|
2022-06-14 15:50:54 +02:00
|
|
|
if (best)
|
|
|
|
zlog_debug(
|
|
|
|
"Lookedup(%pFX): prefix_list match %s, rn %p found: %pFX",
|
|
|
|
group, best->plist, rn, &rp_info->group);
|
|
|
|
else
|
|
|
|
zlog_debug("Lookedup(%pFX): rn %p found:%pFX", group,
|
|
|
|
rn, &rp_info->group);
|
|
|
|
}
|
2017-08-31 15:08:12 +02:00
|
|
|
|
2020-03-16 03:36:33 +01:00
|
|
|
route_unlock_node(rn);
|
|
|
|
|
2022-05-24 19:33:35 +02:00
|
|
|
/*
|
|
|
|
* rp's with prefix lists have the group as 224.0.0.0/4 which will
|
|
|
|
* match anything. So if we have a rp_info that should match a prefix
|
|
|
|
* list then if we do match then best should be the answer( even
|
|
|
|
* if it is NULL )
|
|
|
|
*/
|
|
|
|
if (!rp_info || (rp_info && rp_info->plist))
|
|
|
|
return best;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* So we have a non plist rp_info found in the lookup and no plists
|
|
|
|
* at all to be choosen, return it!
|
|
|
|
*/
|
2017-08-31 15:08:12 +02:00
|
|
|
if (!best)
|
|
|
|
return rp_info;
|
|
|
|
|
2022-05-24 19:33:35 +02:00
|
|
|
/*
|
|
|
|
* If we have a matching non prefix list and a matching prefix
|
|
|
|
* list we should return the actual rp_info that has the LPM
|
|
|
|
* If they are equal, use the prefix-list( but let's hope
|
|
|
|
* the end-operator doesn't do this )
|
|
|
|
*/
|
|
|
|
if (rp_info->group.prefixlen > bp->prefixlen)
|
2017-08-31 15:08:12 +02:00
|
|
|
best = rp_info;
|
|
|
|
|
2017-08-29 15:46:12 +02:00
|
|
|
return best;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2016-08-05 15:07:46 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* When the user makes "ip pim rp" configuration changes or if they change the
|
|
|
|
* prefix-list(s) used by these statements we must tickle the upstream state
|
|
|
|
* for each group to make them re-lookup who their RP should be.
|
|
|
|
*
|
|
|
|
* This is a placeholder function for now.
|
|
|
|
*/
|
2020-12-21 11:24:11 +01:00
|
|
|
void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2019-11-15 20:19:53 +01:00
|
|
|
pim_upstream_reeval_use_rpt(pim);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_prefix_list_update(struct pim_instance *pim,
|
|
|
|
struct prefix_list *plist)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
int refresh_needed = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
if (rp_info->plist
|
|
|
|
&& strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
|
|
|
|
refresh_needed = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
if (refresh_needed)
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
|
|
|
|
struct pim_interface *pim_ifp)
|
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct pim_secondary_addr *sec_addr;
|
2022-07-12 10:11:12 +02:00
|
|
|
pim_addr sec_paddr;
|
2022-01-14 17:52:36 +01:00
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
if (!pim_addr_cmp(pim_ifp->primary_address, rp_info->rp.rpf_addr))
|
2016-11-18 18:12:27 +01:00
|
|
|
return 1;
|
|
|
|
|
2024-09-29 20:57:28 +02:00
|
|
|
if (!pim_ifp->sec_addr_list)
|
2016-11-18 18:12:27 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
|
2022-07-12 10:11:12 +02:00
|
|
|
sec_paddr = pim_addr_from_prefix(&sec_addr->addr);
|
|
|
|
/* If an RP-address is self, It should be enough to say
|
|
|
|
* I am RP the prefix-length should not matter here */
|
|
|
|
if (!pim_addr_cmp(sec_paddr, rp_info->rp.rpf_addr))
|
2016-11-18 18:12:27 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
static void pim_rp_check_interfaces(struct pim_instance *pim,
|
|
|
|
struct rp_info *rp_info)
|
2016-08-23 22:22:14 +02:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
rp_info->i_am_rp = 0;
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2016-08-23 22:22:14 +02:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
|
|
|
|
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
|
|
|
|
rp_info->i_am_rp = 1;
|
|
|
|
}
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-22 17:42:19 +01:00
|
|
|
void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
|
|
|
|
{
|
|
|
|
struct pim_rpf old_rpf;
|
|
|
|
enum pim_rpf_result rpf_result;
|
2022-01-05 19:38:17 +01:00
|
|
|
pim_addr old_upstream_addr;
|
|
|
|
pim_addr new_upstream_addr;
|
2019-02-22 17:42:19 +01:00
|
|
|
|
|
|
|
old_upstream_addr = up->upstream_addr;
|
|
|
|
pim_rp_set_upstream_addr(pim, &new_upstream_addr, up->sg.src,
|
|
|
|
up->sg.grp);
|
|
|
|
|
2019-11-12 14:02:06 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2022-03-10 14:08:25 +01:00
|
|
|
zlog_debug("%s: pim upstream update for old upstream %pPA",
|
2020-10-22 16:01:20 +02:00
|
|
|
__func__, &old_upstream_addr);
|
2019-02-22 17:42:19 +01:00
|
|
|
|
2022-01-05 19:38:17 +01:00
|
|
|
if (!pim_addr_cmp(old_upstream_addr, new_upstream_addr))
|
2019-02-22 17:42:19 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Lets consider a case, where a PIM upstream has a better RP as a
|
|
|
|
* result of a new RP configuration with more precise group range.
|
|
|
|
* This upstream has to be added to the upstream hash of new RP's
|
|
|
|
* NHT(pnc) and has to be removed from old RP's NHT upstream hash
|
|
|
|
*/
|
2022-01-05 19:38:17 +01:00
|
|
|
if (!pim_addr_is_any(old_upstream_addr)) {
|
2019-02-22 17:42:19 +01:00
|
|
|
/* Deregister addr with Zebra NHT */
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
2022-04-27 10:20:38 +02:00
|
|
|
"%s: Deregister upstream %s addr %pPA with Zebra NHT",
|
|
|
|
__func__, up->sg_str, &old_upstream_addr);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_delete_tracked_nexthop(pim, old_upstream_addr, up, NULL);
|
2019-02-22 17:42:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the upstream address */
|
|
|
|
up->upstream_addr = new_upstream_addr;
|
|
|
|
|
|
|
|
old_rpf.source_nexthop.interface = up->rpf.source_nexthop.interface;
|
|
|
|
|
2019-11-15 20:37:31 +01:00
|
|
|
rpf_result = pim_rpf_update(pim, up, &old_rpf, __func__);
|
2019-02-22 17:42:19 +01:00
|
|
|
if (rpf_result == PIM_RPF_FAILURE)
|
2020-03-05 19:17:54 +01:00
|
|
|
pim_mroute_del(up->channel_oil, __func__);
|
2019-02-22 17:42:19 +01:00
|
|
|
|
|
|
|
/* update kernel multicast forwarding cache (MFC) */
|
2019-11-15 20:09:13 +01:00
|
|
|
if (up->rpf.source_nexthop.interface && up->channel_oil)
|
|
|
|
pim_upstream_mroute_iif_update(up->channel_oil, __func__);
|
2019-02-22 17:42:19 +01:00
|
|
|
|
2019-11-15 20:49:59 +01:00
|
|
|
if (rpf_result == PIM_RPF_CHANGED ||
|
|
|
|
(rpf_result == PIM_RPF_FAILURE &&
|
|
|
|
old_rpf.source_nexthop.interface))
|
2019-02-22 17:42:19 +01:00
|
|
|
pim_zebra_upstream_rpf_changed(pim, up, &old_rpf);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-02-02 16:00:50 +01:00
|
|
|
int pim_rp_new(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
|
2022-02-25 14:17:08 +01:00
|
|
|
const char *plist, enum rp_source rp_src_flag)
|
2019-05-02 18:23:48 +02:00
|
|
|
{
|
|
|
|
int result = 0;
|
2016-08-05 19:08:06 +02:00
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct rp_info *rp_all;
|
|
|
|
struct prefix group_all;
|
2016-09-13 21:41:33 +02:00
|
|
|
struct listnode *node, *nnode;
|
|
|
|
struct rp_info *tmp_rp_info;
|
|
|
|
char buffer[BUFSIZ];
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
2022-05-24 19:33:35 +02:00
|
|
|
struct route_node *rn = NULL;
|
2019-02-22 17:42:19 +01:00
|
|
|
struct pim_upstream *up;
|
2021-11-05 08:11:57 +01:00
|
|
|
bool upstream_updated = false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-02-02 16:00:50 +01:00
|
|
|
if (pim_addr_is_any(rp_addr))
|
2019-11-18 16:48:49 +01:00
|
|
|
return PIM_RP_BAD_ADDRESS;
|
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
rp_info->rp.rpf_addr = rp_addr;
|
2019-05-02 18:23:48 +02:00
|
|
|
prefix_copy(&rp_info->group, &group);
|
|
|
|
rp_info->rp_src = rp_src_flag;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
if (plist) {
|
|
|
|
/*
|
|
|
|
* Return if the prefix-list is already configured for this RP
|
|
|
|
*/
|
2022-02-21 14:23:15 +01:00
|
|
|
if (pim_rp_find_prefix_list(pim, rp_addr, plist)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Barf if the prefix-list is already configured for an RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim_rp_prefix_list_used(pim, plist)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_RP_PFXLIST_IN_USE;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Free any existing rp_info entries for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
|
2016-09-13 21:41:33 +02:00
|
|
|
tmp_rp_info)) {
|
2022-04-27 10:20:38 +02:00
|
|
|
if (!pim_addr_cmp(rp_info->rp.rpf_addr,
|
|
|
|
tmp_rp_info->rp.rpf_addr)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
if (tmp_rp_info->plist)
|
2022-02-02 16:36:47 +01:00
|
|
|
pim_rp_del_config(pim, rp_addr, NULL,
|
2019-05-02 18:23:48 +02:00
|
|
|
tmp_rp_info->plist);
|
2016-09-13 21:41:33 +02:00
|
|
|
else
|
2019-05-02 18:23:48 +02:00
|
|
|
pim_rp_del_config(
|
2022-02-02 16:36:47 +01:00
|
|
|
pim, rp_addr,
|
2016-09-13 21:41:33 +02:00
|
|
|
prefix2str(&tmp_rp_info->group,
|
|
|
|
buffer, BUFSIZ),
|
|
|
|
NULL);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
|
|
|
|
} else {
|
2017-08-31 15:08:12 +02:00
|
|
|
|
2022-02-17 12:56:20 +01:00
|
|
|
if (!pim_get_all_mcast_group(&group_all)) {
|
2017-05-11 01:54:26 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
|
|
|
}
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_all = pim_rp_find_match_group(pim, &group_all);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Barf if group is a non-multicast subnet
|
|
|
|
*/
|
|
|
|
if (!prefix_match(&rp_all->group, &rp_info->group)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Remove any prefix-list rp_info entries for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
|
2016-09-13 21:41:33 +02:00
|
|
|
tmp_rp_info)) {
|
2022-02-02 16:00:50 +01:00
|
|
|
if (tmp_rp_info->plist &&
|
2022-04-27 10:20:38 +02:00
|
|
|
(!pim_addr_cmp(rp_info->rp.rpf_addr,
|
|
|
|
tmp_rp_info->rp.rpf_addr))) {
|
2022-02-02 16:36:47 +01:00
|
|
|
pim_rp_del_config(pim, rp_addr, NULL,
|
2019-05-02 18:23:48 +02:00
|
|
|
tmp_rp_info->plist);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
2022-03-02 10:41:22 +01:00
|
|
|
* Take over the 224.0.0.0/4 group if the rp is INADDR_ANY
|
2016-09-13 21:41:33 +02:00
|
|
|
*/
|
2022-02-25 14:17:08 +01:00
|
|
|
if (prefix_same(&rp_all->group, &rp_info->group) &&
|
|
|
|
pim_rpf_addr_is_inaddr_any(&rp_all->rp)) {
|
2016-09-13 21:41:33 +02:00
|
|
|
rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
|
2019-05-02 18:23:48 +02:00
|
|
|
rp_all->rp_src = rp_src_flag;
|
2016-09-13 21:41:33 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-02-22 16:28:36 +01:00
|
|
|
/* Register addr with Zebra NHT */
|
2022-02-02 16:00:50 +01:00
|
|
|
nht_p = rp_all->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
zlog_debug(
|
2022-04-27 10:20:38 +02:00
|
|
|
"%s: NHT Register rp_all addr %pPA grp %pFX ",
|
2020-10-18 13:33:54 +02:00
|
|
|
__func__, &nht_p, &rp_all->group);
|
2019-02-22 17:42:19 +01:00
|
|
|
|
2019-12-21 05:00:31 +01:00
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
2019-02-22 17:42:19 +01:00
|
|
|
/* Find (*, G) upstream whose RP is not
|
|
|
|
* configured yet
|
|
|
|
*/
|
2022-01-14 19:12:32 +01:00
|
|
|
if (pim_addr_is_any(up->upstream_addr) &&
|
2022-01-04 21:48:13 +01:00
|
|
|
pim_addr_is_any(up->sg.src)) {
|
2019-02-22 17:42:19 +01:00
|
|
|
struct prefix grp;
|
|
|
|
struct rp_info *trp_info;
|
|
|
|
|
2022-01-05 19:38:17 +01:00
|
|
|
pim_addr_to_prefix(&grp, up->sg.grp);
|
2019-05-02 18:23:48 +02:00
|
|
|
trp_info = pim_rp_find_match_group(
|
|
|
|
pim, &grp);
|
2021-11-05 08:11:57 +01:00
|
|
|
if (trp_info == rp_all) {
|
2019-02-22 17:42:19 +01:00
|
|
|
pim_upstream_update(pim, up);
|
2021-11-05 08:11:57 +01:00
|
|
|
upstream_updated = true;
|
|
|
|
}
|
2019-02-22 17:42:19 +01:00
|
|
|
}
|
|
|
|
}
|
2021-11-05 08:11:57 +01:00
|
|
|
if (upstream_updated)
|
|
|
|
pim_zebra_update_all_interfaces(pim);
|
2019-02-22 17:42:19 +01:00
|
|
|
|
2019-04-03 03:15:21 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_all);
|
2022-07-11 21:58:27 +02:00
|
|
|
if (rp_all->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug("new RP %pPA for %pFX is ourselves",
|
|
|
|
&rp_all->rp.rpf_addr, &rp_all->group);
|
2019-04-03 03:15:21 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_all,
|
2021-06-25 10:53:26 +02:00
|
|
|
NULL);
|
2019-05-02 07:28:53 +02:00
|
|
|
|
2019-04-02 15:40:41 +02:00
|
|
|
if (!pim_ecmp_nexthop_lookup(pim,
|
|
|
|
&rp_all->rp.source_nexthop,
|
2022-05-23 14:18:28 +02:00
|
|
|
nht_p, &rp_all->group, 1))
|
2019-04-02 15:40:41 +02:00
|
|
|
return PIM_RP_NO_PATH;
|
2016-09-13 21:41:33 +02:00
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Return if the group is already configured for this RP
|
|
|
|
*/
|
2022-02-21 14:47:08 +01:00
|
|
|
tmp_rp_info = pim_rp_find_exact(pim, rp_addr, &rp_info->group);
|
2019-05-02 18:23:48 +02:00
|
|
|
if (tmp_rp_info) {
|
|
|
|
if ((tmp_rp_info->rp_src != rp_src_flag)
|
|
|
|
&& (rp_src_flag == RP_SRC_STATIC))
|
|
|
|
tmp_rp_info->rp_src = rp_src_flag;
|
2016-09-13 21:41:33 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2019-05-02 18:23:48 +02:00
|
|
|
return result;
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Barf if this group is already covered by some other RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
if (tmp_rp_info) {
|
|
|
|
if (tmp_rp_info->plist) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_PFXLIST_OVERLAP;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the only RP that covers this group is an
|
|
|
|
* RP configured for
|
|
|
|
* 224.0.0.0/4 that is fine, ignore that one.
|
|
|
|
* For all others
|
|
|
|
* though we must return PIM_GROUP_OVERLAP
|
|
|
|
*/
|
2017-08-31 15:08:12 +02:00
|
|
|
if (prefix_same(&rp_info->group,
|
|
|
|
&tmp_rp_info->group)) {
|
2019-05-02 18:23:48 +02:00
|
|
|
if ((rp_src_flag == RP_SRC_STATIC)
|
|
|
|
&& (tmp_rp_info->rp_src
|
|
|
|
== RP_SRC_STATIC)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_OVERLAP;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = pim_rp_change(
|
2022-02-03 12:31:54 +01:00
|
|
|
pim, rp_addr,
|
2019-05-02 18:23:48 +02:00
|
|
|
tmp_rp_info->group,
|
|
|
|
rp_src_flag);
|
2016-09-13 21:41:33 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2019-05-02 18:23:48 +02:00
|
|
|
return result;
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
}
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_add_sort(pim->rp_list, rp_info);
|
2022-05-24 19:33:35 +02:00
|
|
|
|
|
|
|
if (!rp_info->plist) {
|
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
rn->info = rp_info;
|
|
|
|
}
|
2017-08-31 15:08:12 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
|
|
|
zlog_debug("Allocated: %p for rp_info: %p(%pFX) Lock: %d", rn,
|
|
|
|
rp_info, &rp_info->group,
|
2022-05-24 19:33:35 +02:00
|
|
|
rn ? route_node_get_lock_count(rn) : 0);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-12-21 05:00:31 +01:00
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
2022-01-04 21:48:13 +01:00
|
|
|
if (pim_addr_is_any(up->sg.src)) {
|
2019-02-22 17:42:19 +01:00
|
|
|
struct prefix grp;
|
|
|
|
struct rp_info *trp_info;
|
|
|
|
|
2022-01-05 19:38:17 +01:00
|
|
|
pim_addr_to_prefix(&grp, up->sg.grp);
|
2019-02-22 17:42:19 +01:00
|
|
|
trp_info = pim_rp_find_match_group(pim, &grp);
|
|
|
|
|
2021-11-05 08:11:57 +01:00
|
|
|
if (trp_info == rp_info) {
|
2019-02-22 17:42:19 +01:00
|
|
|
pim_upstream_update(pim, up);
|
2021-11-05 08:11:57 +01:00
|
|
|
upstream_updated = true;
|
|
|
|
}
|
2019-02-22 17:42:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-05 08:11:57 +01:00
|
|
|
if (upstream_updated)
|
|
|
|
pim_zebra_update_all_interfaces(pim);
|
|
|
|
|
2019-04-03 03:15:21 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
2022-07-11 21:58:27 +02:00
|
|
|
if (rp_info->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug("new RP %pPA for %pFX is ourselves",
|
|
|
|
&rp_info->rp.rpf_addr, &rp_info->group);
|
2019-04-03 03:15:21 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
|
|
|
|
2017-02-22 16:28:36 +01:00
|
|
|
/* Register addr with Zebra NHT */
|
2022-02-02 16:00:50 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2022-04-27 10:20:38 +02:00
|
|
|
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
|
2020-10-18 13:33:54 +02:00
|
|
|
__func__, &nht_p, &rp_info->group);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
|
|
|
|
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
|
2019-04-02 15:40:41 +02:00
|
|
|
&rp_info->group, 1))
|
|
|
|
return PIM_RP_NO_PATH;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
return PIM_SUCCESS;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
|
|
|
|
2022-02-02 16:36:47 +01:00
|
|
|
void pim_rp_del_config(struct pim_instance *pim, pim_addr rp_addr,
|
|
|
|
const char *group_range, const char *plist)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct prefix group;
|
|
|
|
int result;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
if (group_range == NULL)
|
2022-02-17 12:56:20 +01:00
|
|
|
result = pim_get_all_mcast_group(&group);
|
2016-08-05 19:08:06 +02:00
|
|
|
else
|
|
|
|
result = str2prefix(group_range, &group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-02-02 16:36:47 +01:00
|
|
|
if (!result) {
|
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
|
|
|
zlog_debug(
|
|
|
|
"%s: String to prefix failed for %pPAs group",
|
|
|
|
__func__, &rp_addr);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-02-02 16:36:47 +01:00
|
|
|
pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
|
2019-05-02 18:23:48 +02:00
|
|
|
}
|
|
|
|
|
2022-02-21 14:44:04 +01:00
|
|
|
int pim_rp_del(struct pim_instance *pim, pim_addr rp_addr, struct prefix group,
|
|
|
|
const char *plist, enum rp_source rp_src_flag)
|
2019-05-02 18:23:48 +02:00
|
|
|
{
|
|
|
|
struct prefix g_all;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct rp_info *rp_all;
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
2019-05-02 18:23:48 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
bool was_plist = false;
|
|
|
|
struct rp_info *trp_info;
|
|
|
|
struct pim_upstream *up;
|
|
|
|
struct bsgrp_node *bsgrp = NULL;
|
|
|
|
struct bsm_rpinfo *bsrp = NULL;
|
2021-11-05 08:11:57 +01:00
|
|
|
bool upstream_updated = false;
|
2019-05-02 18:23:48 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
if (plist)
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
|
2016-09-13 21:41:33 +02:00
|
|
|
else
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_exact(pim, rp_addr, &group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
if (!rp_info)
|
2016-09-13 21:41:33 +02:00
|
|
|
return PIM_RP_NOT_FOUND;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
if (rp_info->plist) {
|
|
|
|
XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
|
2017-08-31 15:08:12 +02:00
|
|
|
was_plist = true;
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-11-12 14:02:06 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2022-02-21 14:44:04 +01:00
|
|
|
zlog_debug("%s: Delete RP %pPA for the group %pFX", __func__,
|
|
|
|
&rp_addr, &group);
|
2019-05-02 18:23:48 +02:00
|
|
|
|
|
|
|
/* While static RP is getting deleted, we need to check if dynamic RP
|
|
|
|
* present for the same group in BSM RP table, then install the dynamic
|
|
|
|
* RP for the group node into the main rp table
|
|
|
|
*/
|
|
|
|
if (rp_src_flag == RP_SRC_STATIC) {
|
|
|
|
bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
|
|
|
|
|
|
|
|
if (bsgrp) {
|
2021-05-03 15:07:44 +02:00
|
|
|
bsrp = bsm_rpinfos_first(bsgrp->bsrp_list);
|
2019-05-02 18:23:48 +02:00
|
|
|
if (bsrp) {
|
2022-02-21 14:44:04 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
2022-02-21 14:44:04 +01:00
|
|
|
"%s: BSM RP %pPA found for the group %pFX",
|
|
|
|
__func__, &bsrp->rp_address,
|
|
|
|
&group);
|
2019-05-02 18:23:48 +02:00
|
|
|
return pim_rp_change(pim, bsrp->rp_address,
|
|
|
|
group, RP_SRC_BSR);
|
|
|
|
}
|
|
|
|
} else {
|
2019-11-12 14:02:06 +01:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2019-05-02 18:23:48 +02:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"%s: BSM RP not found for the group %pFX",
|
|
|
|
__func__, &group);
|
2019-05-02 18:23:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-22 16:28:36 +01:00
|
|
|
/* Deregister addr with Zebra NHT */
|
2022-02-21 14:44:04 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2022-04-27 10:20:38 +02:00
|
|
|
zlog_debug("%s: Deregister RP addr %pPA with Zebra ", __func__,
|
2020-10-18 13:33:54 +02:00
|
|
|
&nht_p);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-02-17 12:56:20 +01:00
|
|
|
if (!pim_get_all_mcast_group(&g_all))
|
2017-08-25 01:54:21 +02:00
|
|
|
return PIM_RP_BAD_ADDRESS;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_all = pim_rp_find_match_group(pim, &g_all);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
if (rp_all == rp_info) {
|
2019-12-21 05:00:31 +01:00
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
2019-02-22 19:08:59 +01:00
|
|
|
/* Find the upstream (*, G) whose upstream address is
|
|
|
|
* same as the deleted RP
|
|
|
|
*/
|
2022-01-14 19:12:32 +01:00
|
|
|
pim_addr rpf_addr;
|
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
rpf_addr = rp_info->rp.rpf_addr;
|
2022-01-14 19:12:32 +01:00
|
|
|
if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
|
2022-01-04 21:48:13 +01:00
|
|
|
pim_addr_is_any(up->sg.src)) {
|
2019-02-22 19:08:59 +01:00
|
|
|
struct prefix grp;
|
2022-01-05 19:38:17 +01:00
|
|
|
|
|
|
|
pim_addr_to_prefix(&grp, up->sg.grp);
|
2019-02-22 19:08:59 +01:00
|
|
|
trp_info = pim_rp_find_match_group(pim, &grp);
|
|
|
|
if (trp_info == rp_all) {
|
|
|
|
pim_upstream_rpf_clear(pim, up);
|
2022-01-14 19:12:32 +01:00
|
|
|
up->upstream_addr = PIMADDR_ANY;
|
2019-02-22 19:08:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-27 10:20:38 +02:00
|
|
|
rp_all->rp.rpf_addr = PIMADDR_ANY;
|
2016-08-05 19:08:06 +02:00
|
|
|
rp_all->i_am_rp = 0;
|
2016-09-13 21:41:33 +02:00
|
|
|
return PIM_SUCCESS;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_delete(pim->rp_list, rp_info);
|
2017-08-31 15:08:12 +02:00
|
|
|
|
|
|
|
if (!was_plist) {
|
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
if (rn) {
|
|
|
|
if (rn->info != rp_info)
|
2018-08-03 20:03:29 +02:00
|
|
|
flog_err(
|
2018-09-13 21:34:28 +02:00
|
|
|
EC_LIB_DEVELOPMENT,
|
2018-06-18 16:17:36 +02:00
|
|
|
"Expected rn->info to be equal to rp_info");
|
2017-08-31 15:08:12 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
2017-08-31 15:08:12 +02:00
|
|
|
zlog_debug(
|
2020-10-18 13:33:54 +02:00
|
|
|
"%s:Found for Freeing: %p for rp_info: %p(%pFX) Lock: %d",
|
|
|
|
__func__, rn, rp_info, &rp_info->group,
|
2020-10-14 18:44:23 +02:00
|
|
|
route_node_get_lock_count(rn));
|
2020-10-18 13:33:54 +02:00
|
|
|
|
2017-08-31 15:08:12 +02:00
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2017-08-31 15:03:49 +02:00
|
|
|
|
2019-12-21 05:00:31 +01:00
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
2019-02-22 19:08:59 +01:00
|
|
|
/* Find the upstream (*, G) whose upstream address is same as
|
|
|
|
* the deleted RP
|
|
|
|
*/
|
2022-01-14 19:12:32 +01:00
|
|
|
pim_addr rpf_addr;
|
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
rpf_addr = rp_info->rp.rpf_addr;
|
2022-01-14 19:12:32 +01:00
|
|
|
if (!pim_addr_cmp(up->upstream_addr, rpf_addr) &&
|
2022-01-04 21:48:13 +01:00
|
|
|
pim_addr_is_any(up->sg.src)) {
|
2019-02-22 19:08:59 +01:00
|
|
|
struct prefix grp;
|
|
|
|
|
2022-01-05 19:38:17 +01:00
|
|
|
pim_addr_to_prefix(&grp, up->sg.grp);
|
2019-02-22 19:08:59 +01:00
|
|
|
trp_info = pim_rp_find_match_group(pim, &grp);
|
|
|
|
|
|
|
|
/* RP not found for the group grp */
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&trp_info->rp)) {
|
2019-02-22 19:08:59 +01:00
|
|
|
pim_upstream_rpf_clear(pim, up);
|
2019-05-02 18:23:48 +02:00
|
|
|
pim_rp_set_upstream_addr(
|
|
|
|
pim, &up->upstream_addr, up->sg.src,
|
|
|
|
up->sg.grp);
|
2019-02-22 19:08:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* RP found for the group grp */
|
2021-11-05 08:11:57 +01:00
|
|
|
else {
|
2019-02-22 19:08:59 +01:00
|
|
|
pim_upstream_update(pim, up);
|
2021-11-05 08:11:57 +01:00
|
|
|
upstream_updated = true;
|
|
|
|
}
|
2019-02-22 19:08:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-05 08:11:57 +01:00
|
|
|
if (upstream_updated)
|
|
|
|
pim_zebra_update_all_interfaces(pim);
|
|
|
|
|
2017-08-31 15:03:49 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2016-09-13 21:41:33 +02:00
|
|
|
return PIM_SUCCESS;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
2016-07-28 03:17:54 +02:00
|
|
|
|
2022-02-03 12:31:54 +01:00
|
|
|
int pim_rp_change(struct pim_instance *pim, pim_addr new_rp_addr,
|
2019-05-02 18:23:48 +02:00
|
|
|
struct prefix group, enum rp_source rp_src_flag)
|
|
|
|
{
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
2019-05-02 18:23:48 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
int result = 0;
|
|
|
|
struct rp_info *rp_info = NULL;
|
|
|
|
struct pim_upstream *up;
|
2021-11-05 08:11:57 +01:00
|
|
|
bool upstream_updated = false;
|
2022-02-03 12:31:54 +01:00
|
|
|
pim_addr old_rp_addr;
|
2019-05-02 18:23:48 +02:00
|
|
|
|
|
|
|
rn = route_node_lookup(pim->rp_table, &group);
|
|
|
|
if (!rn) {
|
|
|
|
result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
rp_info = rn->info;
|
|
|
|
|
|
|
|
if (!rp_info) {
|
|
|
|
route_unlock_node(rn);
|
|
|
|
result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
old_rp_addr = rp_info->rp.rpf_addr;
|
2022-02-03 12:31:54 +01:00
|
|
|
if (!pim_addr_cmp(new_rp_addr, old_rp_addr)) {
|
2019-05-02 18:23:48 +02:00
|
|
|
if (rp_info->rp_src != rp_src_flag) {
|
|
|
|
rp_info->rp_src = rp_src_flag;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Deregister old RP addr with Zebra NHT */
|
2022-02-03 12:31:54 +01:00
|
|
|
|
|
|
|
if (!pim_addr_is_any(old_rp_addr)) {
|
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2022-04-27 10:20:38 +02:00
|
|
|
zlog_debug("%s: Deregister RP addr %pPA with Zebra ",
|
2020-10-18 13:33:54 +02:00
|
|
|
__func__, &nht_p);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_delete_tracked_nexthop(pim, nht_p, NULL, rp_info);
|
2019-05-02 18:23:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pim_rp_nexthop_del(rp_info);
|
|
|
|
listnode_delete(pim->rp_list, rp_info);
|
|
|
|
/* Update the new RP address*/
|
2022-02-03 12:31:54 +01:00
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
rp_info->rp.rpf_addr = new_rp_addr;
|
2019-05-02 18:23:48 +02:00
|
|
|
rp_info->rp_src = rp_src_flag;
|
|
|
|
rp_info->i_am_rp = 0;
|
|
|
|
|
|
|
|
listnode_add_sort(pim->rp_list, rp_info);
|
|
|
|
|
2019-12-21 05:00:31 +01:00
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
2022-01-04 21:48:13 +01:00
|
|
|
if (pim_addr_is_any(up->sg.src)) {
|
2019-05-02 18:23:48 +02:00
|
|
|
struct prefix grp;
|
|
|
|
struct rp_info *trp_info;
|
|
|
|
|
2022-01-05 19:38:17 +01:00
|
|
|
pim_addr_to_prefix(&grp, up->sg.grp);
|
2019-05-02 18:23:48 +02:00
|
|
|
trp_info = pim_rp_find_match_group(pim, &grp);
|
|
|
|
|
2021-11-05 08:11:57 +01:00
|
|
|
if (trp_info == rp_info) {
|
2019-05-02 18:23:48 +02:00
|
|
|
pim_upstream_update(pim, up);
|
2021-11-05 08:11:57 +01:00
|
|
|
upstream_updated = true;
|
|
|
|
}
|
2019-05-02 18:23:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-05 08:11:57 +01:00
|
|
|
if (upstream_updated)
|
|
|
|
pim_zebra_update_all_interfaces(pim);
|
|
|
|
|
2019-05-02 18:23:48 +02:00
|
|
|
/* Register new RP addr with Zebra NHT */
|
2022-02-03 12:31:54 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2022-04-27 10:20:38 +02:00
|
|
|
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra ",
|
2020-10-18 13:33:54 +02:00
|
|
|
__func__, &nht_p, &rp_info->group);
|
2019-05-02 18:23:48 +02:00
|
|
|
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
|
|
|
|
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, nht_p,
|
2019-05-02 18:23:48 +02:00
|
|
|
&rp_info->group, 1)) {
|
|
|
|
route_unlock_node(rn);
|
|
|
|
return PIM_RP_NO_PATH;
|
|
|
|
}
|
|
|
|
|
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
|
|
|
|
|
|
|
route_unlock_node(rn);
|
|
|
|
|
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
void pim_rp_setup(struct pim_instance *pim)
|
2016-07-28 03:17:54 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
2016-11-15 18:22:34 +01:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-10 14:08:25 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2019-02-22 19:08:59 +01:00
|
|
|
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
|
2019-04-02 15:40:41 +02:00
|
|
|
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
|
2022-10-26 06:25:23 +02:00
|
|
|
nht_p, &rp_info->group, 1)) {
|
2019-04-02 15:40:41 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
zlog_debug(
|
2019-04-02 15:40:41 +02:00
|
|
|
"Unable to lookup nexthop for rp specified");
|
2022-10-26 06:25:23 +02:00
|
|
|
pim_rp_nexthop_del(rp_info);
|
|
|
|
}
|
2016-07-28 03:17:54 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-02 18:30:02 +02:00
|
|
|
/*
|
2016-11-18 18:12:27 +01:00
|
|
|
* Checks to see if we should elect ourself the actual RP when new if
|
|
|
|
* addresses are added against an interface.
|
2015-10-02 18:30:02 +02:00
|
|
|
*/
|
2016-11-18 18:12:27 +01:00
|
|
|
void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
|
2015-10-02 18:30:02 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
2016-11-08 19:34:31 +01:00
|
|
|
bool i_am_rp_changed = false;
|
2017-05-20 19:43:58 +02:00
|
|
|
struct pim_instance *pim = pim_ifp->pim;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim->rp_list == NULL)
|
2015-10-02 18:30:02 +02:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
2016-11-18 18:12:27 +01:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
/* if i_am_rp is already set nothing to be done (adding new
|
|
|
|
* addresses
|
|
|
|
* is not going to make a difference). */
|
|
|
|
if (rp_info->i_am_rp) {
|
|
|
|
continue;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
|
|
|
|
i_am_rp_changed = true;
|
|
|
|
rp_info->i_am_rp = 1;
|
2022-04-27 10:20:38 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug("%s: %pPA: i am rp", __func__,
|
|
|
|
&rp_info->rp.rpf_addr);
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
if (i_am_rp_changed) {
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2019-11-15 20:19:53 +01:00
|
|
|
pim_upstream_reeval_use_rpt(pim);
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
|
|
|
}
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
/* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
|
|
|
|
* are removed. Removing numbers is an uncommon event in an active network
|
|
|
|
* so I have made no attempt to optimize it. */
|
2017-05-19 22:41:25 +02:00
|
|
|
void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
|
2016-11-18 18:12:27 +01:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
bool i_am_rp_changed = false;
|
|
|
|
int old_i_am_rp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim->rp_list == NULL)
|
2016-11-18 18:12:27 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
2016-11-18 18:12:27 +01:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
old_i_am_rp = rp_info->i_am_rp;
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
if (old_i_am_rp != rp_info->i_am_rp) {
|
|
|
|
i_am_rp_changed = true;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2022-04-27 10:20:38 +02:00
|
|
|
if (rp_info->i_am_rp)
|
|
|
|
zlog_debug("%s: %pPA: i am rp",
|
|
|
|
__func__,
|
|
|
|
&rp_info->rp.rpf_addr);
|
|
|
|
else
|
2022-05-23 14:18:28 +02:00
|
|
|
zlog_debug(
|
|
|
|
"%s: %pPA: i am no longer rp",
|
|
|
|
__func__,
|
|
|
|
&rp_info->rp.rpf_addr);
|
2015-10-02 18:30:02 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
if (i_am_rp_changed) {
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2019-11-15 20:19:53 +01:00
|
|
|
pim_upstream_reeval_use_rpt(pim);
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
2015-10-02 18:30:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I_am_RP(G) is true if the group-to-RP mapping indicates that
|
|
|
|
* this router is the RP for the group.
|
|
|
|
*
|
|
|
|
* Since we only have static RP, all groups are part of this RP
|
|
|
|
*/
|
2022-01-05 19:38:17 +01:00
|
|
|
int pim_rp_i_am_rp(struct pim_instance *pim, pim_addr group)
|
2015-10-02 18:30:02 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct prefix g;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
|
|
|
|
memset(&g, 0, sizeof(g));
|
2022-02-21 12:21:48 +01:00
|
|
|
pim_addr_to_prefix(&g, group);
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2016-08-05 19:08:06 +02:00
|
|
|
|
|
|
|
if (rp_info)
|
|
|
|
return rp_info->i_am_rp;
|
|
|
|
return 0;
|
2015-10-02 18:30:02 +02:00
|
|
|
}
|
|
|
|
|
2015-10-02 19:34:11 +02:00
|
|
|
/*
|
|
|
|
* RP(G)
|
|
|
|
*
|
|
|
|
* Return the RP that the Group belongs too.
|
|
|
|
*/
|
2022-01-05 19:38:17 +01:00
|
|
|
struct pim_rpf *pim_rp_g(struct pim_instance *pim, pim_addr group)
|
2015-10-02 19:34:11 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct prefix g;
|
|
|
|
struct rp_info *rp_info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
memset(&g, 0, sizeof(g));
|
2022-02-21 12:27:35 +01:00
|
|
|
pim_addr_to_prefix(&g, group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
if (rp_info) {
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
2019-04-02 15:40:41 +02:00
|
|
|
|
2023-02-25 09:33:13 +01:00
|
|
|
if (pim_addr_is_any(rp_info->rp.rpf_addr)) {
|
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Skipping NHT Register since RP is not configured for the group %pPA",
|
|
|
|
__func__, &group);
|
|
|
|
return &rp_info->rp;
|
|
|
|
}
|
|
|
|
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
/* Register addr with Zebra NHT */
|
2022-02-21 12:27:35 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
2020-10-18 13:33:54 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
pimd: Fix WG/SGRpt & WG J/P processing
During processing of Join/Prune,
for a S,G entry, current state is SGRpt, when only *,G is
received, need to clear SGRpt and add/inherit the *,G OIF to S,G so
it can forward traffic to downstream where *,G is received.
Upon receiving SGRpt prune remove the inherited *,G OIF.
From, downstream router received *,G Prune along with SGRpt
prune. Avoid sending *,G and SGRpt Prune together.
Reset upstream_del reset ifchannel to NULL.
Testing Done:
Run failed smoke test of sending data packets, trigger SPT switchover,
*,G path received SGRpt later data traffic stopped S,G ages out from LHR, sends only
*,G join to upstream, verified S,G entry inherit the OIF.
Upon receiving SGRpt deletes inherited oif and retains in SGRpt state.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-22 00:08:03 +02:00
|
|
|
zlog_debug(
|
2022-04-27 10:20:38 +02:00
|
|
|
"%s: NHT Register RP addr %pPA grp %pFX with Zebra",
|
2020-10-18 13:33:54 +02:00
|
|
|
__func__, &nht_p, &rp_info->group);
|
2022-05-23 14:18:28 +02:00
|
|
|
pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, NULL);
|
2019-04-02 15:40:41 +02:00
|
|
|
pim_rpf_set_refresh_time(pim);
|
|
|
|
(void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
|
2022-05-23 14:18:28 +02:00
|
|
|
nht_p, &rp_info->group, 1);
|
2016-08-05 19:08:06 +02:00
|
|
|
return (&rp_info->rp);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
// About to Go Down
|
|
|
|
return NULL;
|
2015-10-02 19:34:11 +02:00
|
|
|
}
|
|
|
|
|
2015-09-30 14:41:18 +02:00
|
|
|
/*
|
|
|
|
* Set the upstream IP address we want to talk to based upon
|
|
|
|
* the rp configured and the source address
|
|
|
|
*
|
|
|
|
* If we have don't have a RP configured and the source address is *
|
2019-02-22 12:05:29 +01:00
|
|
|
* then set the upstream addr as INADDR_ANY and return failure.
|
2015-09-30 14:41:18 +02:00
|
|
|
*
|
|
|
|
*/
|
2022-01-05 19:38:17 +01:00
|
|
|
int pim_rp_set_upstream_addr(struct pim_instance *pim, pim_addr *up,
|
|
|
|
pim_addr source, pim_addr group)
|
2015-09-30 14:41:18 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct rp_info *rp_info;
|
2024-06-26 16:13:50 +02:00
|
|
|
struct prefix g = {};
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2024-06-26 16:13:50 +02:00
|
|
|
if (!pim_addr_is_any(source)) {
|
|
|
|
*up = source;
|
|
|
|
return 1;
|
|
|
|
}
|
2022-02-17 19:54:47 +01:00
|
|
|
|
|
|
|
pim_addr_to_prefix(&g, group);
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2024-06-26 16:13:50 +02:00
|
|
|
if (!rp_info || pim_rpf_addr_is_inaddr_any(&rp_info->rp)) {
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2015-09-30 14:41:18 +02:00
|
|
|
zlog_debug("%s: Received a (*,G) with no RP configured",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__);
|
2022-02-17 19:54:47 +01:00
|
|
|
*up = PIMADDR_ANY;
|
2015-09-30 14:41:18 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-06-26 16:13:50 +02:00
|
|
|
*up = rp_info->rp.rpf_addr;
|
2015-09-30 14:41:18 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2016-08-05 15:07:46 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
int pim_rp_config_write(struct pim_instance *pim, struct vty *vty)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2016-08-05 19:08:06 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
int count = 0;
|
2022-03-07 08:34:19 +01:00
|
|
|
pim_addr rp_addr;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
2016-08-05 19:08:06 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-09-17 04:32:59 +02:00
|
|
|
if (rp_info->rp_src != RP_SRC_NONE &&
|
|
|
|
rp_info->rp_src != RP_SRC_STATIC)
|
2019-05-02 18:23:48 +02:00
|
|
|
continue;
|
|
|
|
|
2022-04-27 10:20:38 +02:00
|
|
|
rp_addr = rp_info->rp.rpf_addr;
|
2016-09-13 21:41:33 +02:00
|
|
|
if (rp_info->plist)
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, " rp %pPA prefix-list %s\n", &rp_addr,
|
|
|
|
rp_info->plist);
|
2016-09-13 21:41:33 +02:00
|
|
|
else
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, " rp %pPA %pFX\n", &rp_addr,
|
|
|
|
&rp_info->group);
|
2016-09-13 21:41:33 +02:00
|
|
|
count++;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
return count;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
|
|
|
|
2022-03-02 12:30:51 +01:00
|
|
|
void pim_rp_show_information(struct pim_instance *pim, struct prefix *range,
|
2022-02-07 11:13:16 +01:00
|
|
|
struct vty *vty, json_object *json)
|
2016-08-23 22:22:14 +02:00
|
|
|
{
|
|
|
|
struct rp_info *rp_info;
|
2016-09-14 17:12:13 +02:00
|
|
|
struct rp_info *prev_rp_info = NULL;
|
2016-08-23 22:22:14 +02:00
|
|
|
struct listnode *node;
|
2022-08-16 17:21:50 +02:00
|
|
|
struct ttable *tt = NULL;
|
|
|
|
char *table = NULL;
|
2019-05-02 10:35:35 +02:00
|
|
|
char source[7];
|
2022-08-16 17:21:50 +02:00
|
|
|
char grp[INET6_ADDRSTRLEN];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-14 17:12:13 +02:00
|
|
|
json_object *json_rp_rows = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-08-16 17:21:50 +02:00
|
|
|
if (!json) {
|
|
|
|
/* Prepare table. */
|
|
|
|
tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
|
|
|
|
ttable_add_row(
|
|
|
|
tt,
|
|
|
|
"RP address|group/prefix-list|OIF|I am RP|Source|Group-Type");
|
|
|
|
tt->style.cell.rpad = 2;
|
|
|
|
tt->style.corner = '+';
|
|
|
|
ttable_restyle(tt);
|
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-03-11 11:32:26 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-02 12:30:51 +01:00
|
|
|
#if PIM_IPV == 4
|
|
|
|
pim_addr group = rp_info->group.u.prefix4;
|
|
|
|
#else
|
|
|
|
pim_addr group = rp_info->group.u.prefix6;
|
|
|
|
#endif
|
|
|
|
const char *group_type =
|
|
|
|
pim_is_grp_ssm(pim, group) ? "SSM" : "ASM";
|
|
|
|
|
2022-03-17 08:31:05 +01:00
|
|
|
if (range && !prefix_match(&rp_info->group, range))
|
2022-03-02 12:30:51 +01:00
|
|
|
continue;
|
|
|
|
|
2022-03-11 11:32:26 +01:00
|
|
|
if (rp_info->rp_src == RP_SRC_STATIC)
|
|
|
|
strlcpy(source, "Static", sizeof(source));
|
|
|
|
else if (rp_info->rp_src == RP_SRC_BSR)
|
|
|
|
strlcpy(source, "BSR", sizeof(source));
|
2024-09-17 04:32:59 +02:00
|
|
|
else if (rp_info->rp_src == RP_SRC_AUTORP)
|
|
|
|
strlcpy(source, "AutoRP", sizeof(source));
|
2024-10-02 14:22:48 +02:00
|
|
|
#if PIM_IPV == 6
|
|
|
|
else if (rp_info->rp_src == RP_SRC_EMBEDDED_RP)
|
|
|
|
strlcpy(source, "Embedded-RP", sizeof(source));
|
|
|
|
#endif /* PIM_IPV == 6 */
|
2022-03-11 11:32:26 +01:00
|
|
|
else
|
|
|
|
strlcpy(source, "None", sizeof(source));
|
2022-02-07 11:13:16 +01:00
|
|
|
if (json) {
|
2022-03-11 11:32:26 +01:00
|
|
|
/*
|
|
|
|
* If we have moved on to a new RP then add the
|
|
|
|
* entry for the previous RP
|
|
|
|
*/
|
|
|
|
if (prev_rp_info &&
|
2022-04-27 10:20:38 +02:00
|
|
|
(pim_addr_cmp(prev_rp_info->rp.rpf_addr,
|
|
|
|
rp_info->rp.rpf_addr))) {
|
2022-03-10 14:08:25 +01:00
|
|
|
json_object_object_addf(
|
2022-04-27 10:20:38 +02:00
|
|
|
json, json_rp_rows, "%pPA",
|
2022-03-10 14:08:25 +01:00
|
|
|
&prev_rp_info->rp.rpf_addr);
|
2022-03-11 11:32:26 +01:00
|
|
|
json_rp_rows = NULL;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-11 11:32:26 +01:00
|
|
|
if (!json_rp_rows)
|
|
|
|
json_rp_rows = json_object_new_array();
|
|
|
|
|
|
|
|
json_row = json_object_new_object();
|
2022-04-27 10:20:38 +02:00
|
|
|
json_object_string_addf(json_row, "rpAddress", "%pPA",
|
2022-03-10 14:08:25 +01:00
|
|
|
&rp_info->rp.rpf_addr);
|
2022-03-11 11:32:26 +01:00
|
|
|
if (rp_info->rp.source_nexthop.interface)
|
|
|
|
json_object_string_add(
|
|
|
|
json_row, "outboundInterface",
|
|
|
|
rp_info->rp.source_nexthop
|
|
|
|
.interface->name);
|
|
|
|
else
|
|
|
|
json_object_string_add(json_row,
|
|
|
|
"outboundInterface",
|
|
|
|
"Unknown");
|
|
|
|
if (rp_info->i_am_rp)
|
|
|
|
json_object_boolean_true_add(json_row, "iAmRP");
|
|
|
|
else
|
|
|
|
json_object_boolean_false_add(json_row,
|
|
|
|
"iAmRP");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-11 11:32:26 +01:00
|
|
|
if (rp_info->plist)
|
|
|
|
json_object_string_add(json_row, "prefixList",
|
|
|
|
rp_info->plist);
|
|
|
|
else
|
|
|
|
json_object_string_addf(json_row, "group",
|
|
|
|
"%pFX",
|
|
|
|
&rp_info->group);
|
|
|
|
json_object_string_add(json_row, "source", source);
|
2022-03-02 12:30:51 +01:00
|
|
|
json_object_string_add(json_row, "groupType",
|
|
|
|
group_type);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-11 11:32:26 +01:00
|
|
|
json_object_array_add(json_rp_rows, json_row);
|
|
|
|
} else {
|
2022-08-16 17:21:50 +02:00
|
|
|
prefix2str(&rp_info->group, grp, sizeof(grp));
|
|
|
|
ttable_add_row(tt, "%pPA|%s|%s|%s|%s|%s",
|
|
|
|
&rp_info->rp.rpf_addr,
|
|
|
|
rp_info->plist
|
|
|
|
? rp_info->plist
|
|
|
|
: grp,
|
|
|
|
rp_info->rp.source_nexthop.interface
|
|
|
|
? rp_info->rp.source_nexthop
|
|
|
|
.interface->name
|
|
|
|
: "Unknown",
|
|
|
|
rp_info->i_am_rp
|
|
|
|
? "yes"
|
|
|
|
: "no",
|
|
|
|
source, group_type);
|
2016-08-24 21:32:57 +02:00
|
|
|
}
|
2022-03-11 11:32:26 +01:00
|
|
|
prev_rp_info = rp_info;
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-08-16 17:21:50 +02:00
|
|
|
/* Dump the generated table. */
|
|
|
|
if (!json) {
|
|
|
|
table = ttable_dump(tt, "\n");
|
|
|
|
vty_out(vty, "%s\n", table);
|
2024-08-30 15:05:11 +02:00
|
|
|
XFREE(MTYPE_TMP_TTABLE, table);
|
2022-08-16 17:21:50 +02:00
|
|
|
ttable_del(tt);
|
|
|
|
} else {
|
2016-09-14 17:12:13 +02:00
|
|
|
if (prev_rp_info && json_rp_rows)
|
2022-04-27 10:20:38 +02:00
|
|
|
json_object_object_addf(json, json_rp_rows, "%pPA",
|
2022-03-10 14:08:25 +01:00
|
|
|
&prev_rp_info->rp.rpf_addr);
|
2016-09-14 17:12:13 +02:00
|
|
|
}
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2019-04-01 18:31:28 +02:00
|
|
|
void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
{
|
|
|
|
struct listnode *node = NULL;
|
|
|
|
struct rp_info *rp_info = NULL;
|
|
|
|
struct nexthop *nh_node = NULL;
|
2022-04-27 10:20:38 +02:00
|
|
|
pim_addr nht_p;
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
struct pim_nexthop_cache pnc;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2022-02-25 14:17:08 +01:00
|
|
|
if (pim_rpf_addr_is_inaddr_any(&rp_info->rp))
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-10 14:08:25 +01:00
|
|
|
nht_p = rp_info->rp.rpf_addr;
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2022-05-23 14:18:28 +02:00
|
|
|
if (!pim_find_or_track_nexthop(pim, nht_p, NULL, rp_info, &pnc))
|
2017-05-19 04:53:50 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
|
2022-03-10 14:08:25 +01:00
|
|
|
#if PIM_IPV == 4
|
|
|
|
if (!pim_addr_is_any(nh_node->gate.ipv4))
|
|
|
|
continue;
|
|
|
|
#else
|
|
|
|
if (!pim_addr_is_any(nh_node->gate.ipv6))
|
2017-05-19 04:53:50 +02:00
|
|
|
continue;
|
2022-03-10 14:08:25 +01:00
|
|
|
#endif
|
2017-05-19 04:53:50 +02:00
|
|
|
|
|
|
|
struct interface *ifp1 = if_lookup_by_index(
|
2021-05-12 20:31:45 +02:00
|
|
|
nh_node->ifindex, pim->vrf->vrf_id);
|
2019-04-01 18:31:28 +02:00
|
|
|
|
|
|
|
if (nbr->interface != ifp1)
|
2017-05-19 04:53:50 +02:00
|
|
|
continue;
|
|
|
|
|
2022-01-21 16:47:18 +01:00
|
|
|
#if PIM_IPV == 4
|
2017-05-19 04:53:50 +02:00
|
|
|
nh_node->gate.ipv4 = nbr->source_addr;
|
2022-01-14 17:47:14 +01:00
|
|
|
#else
|
|
|
|
nh_node->gate.ipv6 = nbr->source_addr;
|
|
|
|
#endif
|
2022-03-10 14:08:25 +01:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2017-05-19 04:53:50 +02:00
|
|
|
zlog_debug(
|
2022-04-27 10:20:38 +02:00
|
|
|
"%s: addr %pPA new nexthop addr %pPAs interface %s",
|
2022-03-10 14:08:25 +01:00
|
|
|
__func__, &nht_p, &nbr->source_addr,
|
2022-01-14 17:47:14 +01:00
|
|
|
ifp1->name);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-10-02 14:22:48 +02:00
|
|
|
|
|
|
|
#if PIM_IPV == 6
|
|
|
|
DEFINE_MTYPE_STATIC(PIMD, PIM_EMBEDDED_RP_GROUP_LIST, "PIM embedded RP group list");
|
|
|
|
DEFINE_MTYPE_STATIC(PIMD, PIM_EMBEDDED_RP_ENTRY, "PIM embedded RP configuration");
|
|
|
|
|
|
|
|
void pim_embedded_rp_enable(struct pim_instance *pim, bool enable)
|
|
|
|
{
|
|
|
|
struct route_node *rn;
|
|
|
|
|
|
|
|
pim->embedded_rp.enable = enable;
|
|
|
|
if (enable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Remove all learned embedded RPs and reallocate data structure. */
|
|
|
|
for (rn = route_top(pim->embedded_rp.table); rn; rn = route_next(rn)) {
|
|
|
|
pim_embedded_rp_free(pim, rn->info);
|
|
|
|
rn->info = NULL;
|
|
|
|
}
|
|
|
|
route_table_finish(pim->embedded_rp.table);
|
|
|
|
|
|
|
|
pim->embedded_rp.table = route_table_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_embedded_rp_set_group_list(struct pim_instance *pim, const char *group_list)
|
|
|
|
{
|
|
|
|
if (pim->embedded_rp.group_list)
|
|
|
|
XFREE(MTYPE_PIM_EMBEDDED_RP_GROUP_LIST, pim->embedded_rp.group_list);
|
|
|
|
|
|
|
|
if (group_list == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pim->embedded_rp.group_list = XSTRDUP(MTYPE_PIM_EMBEDDED_RP_GROUP_LIST, group_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_embedded_rp_set_maximum_rps(struct pim_instance *pim, uint32_t maximum)
|
|
|
|
{
|
|
|
|
pim->embedded_rp.maximum_rps = maximum;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool pim_embedded_rp_filter_match(const struct pim_instance *pim, const pim_addr *group)
|
|
|
|
{
|
|
|
|
struct prefix_list *list;
|
|
|
|
struct prefix group_prefix = {
|
|
|
|
.family = PIM_AF,
|
|
|
|
.prefixlen = PIM_MAX_BITLEN,
|
|
|
|
.u.prefix6 = *group,
|
|
|
|
};
|
|
|
|
|
|
|
|
list = prefix_list_lookup(PIM_AFI, pim->embedded_rp.group_list);
|
|
|
|
if (list == NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (prefix_list_apply_ext(list, NULL, &group_prefix, true) == PREFIX_DENY) {
|
|
|
|
if (PIM_DEBUG_PIM_TRACE)
|
|
|
|
zlog_debug("filtering embedded-rp group %pPA", group);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool pim_embedded_rp_is_embedded(const pim_addr *group)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Embedded RP basic format:
|
|
|
|
* - First byte: 0xFF
|
|
|
|
* - Third nibble: 0x7 (binary 0111)
|
|
|
|
* - Fourth nibble: Scope
|
|
|
|
* - Fifth nibble: Reserved (zero)
|
|
|
|
* - Sixth nibble: RIID (RP interface ID)
|
|
|
|
* - Fourth byte: Prefix length (1..64)
|
|
|
|
* - Fifth byte and on: RP address prefix
|
|
|
|
* - Last four bytes: Multicast group ID
|
|
|
|
*/
|
|
|
|
if (group->s6_addr[0] != 0xFF)
|
|
|
|
return false;
|
|
|
|
/* Embedded RP flags must all be set. */
|
|
|
|
if ((group->s6_addr[1] & 0xF0) != 0x70)
|
|
|
|
return false;
|
|
|
|
/* Reserved nibble */
|
|
|
|
if ((group->s6_addr[2] & 0xF0) != 0x00)
|
|
|
|
return false;
|
|
|
|
/* RP Interface ID must not be zero */
|
|
|
|
if ((group->s6_addr[2] & 0x0F) == 0x00)
|
|
|
|
return false;
|
|
|
|
/* Prefix length must be between 1 and 64. */
|
|
|
|
if (group->s6_addr[3] == 0 || group->s6_addr[3] > 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool pim_embedded_rp_extract(const pim_addr *group, pim_addr *rp)
|
|
|
|
{
|
|
|
|
struct prefix prefix;
|
|
|
|
|
|
|
|
if (!pim_embedded_rp_is_embedded(group))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Copy at most the prefix bytes length to RP prefix. */
|
|
|
|
prefix = (struct prefix){
|
|
|
|
.family = PIM_AF,
|
|
|
|
.prefixlen = group->s6_addr[3],
|
|
|
|
};
|
|
|
|
memcpy(&prefix.u.prefix6, &group->s6_addr[4],
|
|
|
|
(prefix.prefixlen % 8) == 0 ? (prefix.prefixlen / 8) : (prefix.prefixlen / 8) + 1);
|
|
|
|
/* Zero unused address bits. */
|
|
|
|
apply_mask(&prefix);
|
|
|
|
|
|
|
|
/* Return assembled RP address. */
|
|
|
|
*rp = prefix.u.prefix6;
|
|
|
|
rp->s6_addr[15] = group->s6_addr[2] & 0x0F;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_embedded_rp_new(struct pim_instance *pim, const pim_addr *group, const pim_addr *rp)
|
|
|
|
{
|
|
|
|
struct route_node *rnode;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct prefix group_prefix = {
|
|
|
|
.family = PIM_AF,
|
|
|
|
.prefixlen = PIM_MAX_BITLEN,
|
|
|
|
.u.prefix6 = *group,
|
|
|
|
};
|
|
|
|
|
|
|
|
rnode = route_node_get(pim->embedded_rp.table, &group_prefix);
|
|
|
|
if (rnode->info != NULL) {
|
|
|
|
route_unlock_node(rnode);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pim->embedded_rp.rp_count >= pim->embedded_rp.maximum_rps) {
|
|
|
|
zlog_info("Embedded RP maximum (%u) has been reached. Disregarding new RP %pPA",
|
|
|
|
pim->embedded_rp.maximum_rps, rp);
|
|
|
|
route_unlock_node(rnode);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pim->embedded_rp.rp_count++;
|
|
|
|
|
|
|
|
rnode->info = rp_info = XCALLOC(MTYPE_PIM_EMBEDDED_RP_ENTRY, sizeof(struct rp_info));
|
|
|
|
rp_info->rp.rpf_addr = *rp;
|
|
|
|
prefix_copy(&rp_info->group, &group_prefix);
|
|
|
|
rp_info->rp_src = RP_SRC_EMBEDDED_RP;
|
|
|
|
listnode_add_sort(pim->rp_list, rp_info);
|
|
|
|
if (PIM_DEBUG_TRACE)
|
|
|
|
zlog_debug("add embedded RP %pPA for group %pPA", rp, group);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PIM RP regular maintenance
|
|
|
|
*/
|
|
|
|
pim_zebra_update_all_interfaces(pim);
|
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
|
|
|
if (rp_info->i_am_rp && PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug("new RP %pPA for %pFX is ourselves", &rp_info->rp.rpf_addr,
|
|
|
|
&rp_info->group);
|
|
|
|
|
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
|
|
|
zlog_debug("%s: NHT Register RP addr %pPA grp %pFX with Zebra", __func__,
|
|
|
|
&rp_info->rp.rpf_addr, &rp_info->group);
|
|
|
|
|
|
|
|
pim_find_or_track_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info, NULL);
|
|
|
|
pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, rp_info->rp.rpf_addr,
|
|
|
|
&rp_info->group, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_embedded_rp_delete(struct pim_instance *pim, const pim_addr *group)
|
|
|
|
{
|
|
|
|
struct route_node *rnode;
|
|
|
|
struct prefix group_prefix = {
|
|
|
|
.family = PIM_AF,
|
|
|
|
.prefixlen = PIM_MAX_BITLEN,
|
|
|
|
.u.prefix6 = *group,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Avoid NULL accesses during shutdown */
|
|
|
|
if (pim->embedded_rp.table == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rnode = route_node_lookup(pim->embedded_rp.table, &group_prefix);
|
|
|
|
if (rnode == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pim_embedded_rp_free(pim, rnode->info);
|
|
|
|
rnode->info = NULL;
|
|
|
|
|
|
|
|
/* Unlock twice to remove the node */
|
|
|
|
route_unlock_node(rnode);
|
|
|
|
route_unlock_node(rnode);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_embedded_rp_free(struct pim_instance *pim, struct rp_info *rp_info)
|
|
|
|
{
|
|
|
|
if (pim->embedded_rp.rp_count > 0)
|
|
|
|
pim->embedded_rp.rp_count--;
|
|
|
|
|
|
|
|
if (PIM_DEBUG_TRACE)
|
|
|
|
zlog_debug("delete embedded RP %pPA", &rp_info->rp.rpf_addr);
|
|
|
|
|
|
|
|
pim_delete_tracked_nexthop(pim, rp_info->rp.rpf_addr, NULL, rp_info);
|
|
|
|
listnode_delete(pim->rp_list, rp_info);
|
|
|
|
XFREE(MTYPE_PIM_EMBEDDED_RP_ENTRY, rp_info);
|
|
|
|
}
|
|
|
|
#endif /* PIM_IPV == 6 */
|