2015-09-30 14:41:18 +02:00
|
|
|
/*
|
|
|
|
* PIM for Quagga
|
|
|
|
* Copyright (C) 2015 Cumulus Networks, Inc.
|
|
|
|
* Donald Sharp
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
2017-05-13 10:25:29 +02:00
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2015-09-30 14:41:18 +02:00
|
|
|
*/
|
|
|
|
#include <zebra.h>
|
|
|
|
|
2016-09-14 17:12:13 +02:00
|
|
|
#include "lib/json.h"
|
2015-10-02 18:30:02 +02:00
|
|
|
#include "log.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
#include "network.h"
|
2016-06-18 02:43:21 +02:00
|
|
|
#include "if.h"
|
2016-08-05 19:08:06 +02:00
|
|
|
#include "linklist.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "memory.h"
|
2016-08-23 22:22:14 +02:00
|
|
|
#include "vty.h"
|
|
|
|
#include "vrf.h"
|
2016-09-13 21:41:33 +02:00
|
|
|
#include "plist.h"
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
#include "nexthop.h"
|
2017-08-31 15:08:12 +02:00
|
|
|
#include "table.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
|
|
|
|
#include "pimd.h"
|
2016-08-05 15:07:46 +02:00
|
|
|
#include "pim_vty.h"
|
2015-10-02 18:30:02 +02:00
|
|
|
#include "pim_str.h"
|
2016-11-18 18:12:27 +01:00
|
|
|
#include "pim_iface.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
#include "pim_rp.h"
|
2015-10-28 15:00:31 +01:00
|
|
|
#include "pim_str.h"
|
|
|
|
#include "pim_rpf.h"
|
2016-07-28 03:17:54 +02:00
|
|
|
#include "pim_sock.h"
|
2016-08-05 19:08:06 +02:00
|
|
|
#include "pim_memory.h"
|
2016-08-23 22:22:14 +02:00
|
|
|
#include "pim_iface.h"
|
2016-11-08 19:34:31 +01:00
|
|
|
#include "pim_msdp.h"
|
2017-02-22 16:28:36 +01:00
|
|
|
#include "pim_nht.h"
|
2015-09-30 14:41:18 +02:00
|
|
|
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-19 00:51:31 +02:00
|
|
|
/* Cleanup pim->rpf_hash each node data */
|
|
|
|
void pim_rp_list_hash_clean(void *data)
|
|
|
|
{
|
2017-07-13 00:17:31 +02:00
|
|
|
struct pim_nexthop_cache *pnc = (struct pim_nexthop_cache *)data;
|
2017-05-19 00:51:31 +02:00
|
|
|
|
2017-10-05 16:51:01 +02:00
|
|
|
list_delete_and_null(&pnc->rp_list);
|
2017-07-14 19:29:47 +02:00
|
|
|
|
2017-07-13 00:17:31 +02:00
|
|
|
hash_clean(pnc->upstream_hash, NULL);
|
|
|
|
hash_free(pnc->upstream_hash);
|
|
|
|
pnc->upstream_hash = NULL;
|
2017-07-14 19:29:47 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_PIM_NEXTHOP_CACHE, pnc);
|
2017-05-19 00:51:31 +02:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static void pim_rp_info_free(struct rp_info *rp_info)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
int pim_rp_list_cmp(void *v1, void *v2)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp1 = (struct rp_info *)v1;
|
|
|
|
struct rp_info *rp2 = (struct rp_info *)v2;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sort by RP IP address
|
|
|
|
*/
|
|
|
|
if (rp1->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
< rp2->rp.rpf_addr.u.prefix4.s_addr)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (rp1->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
> rp2->rp.rpf_addr.u.prefix4.s_addr)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sort by group IP address
|
|
|
|
*/
|
|
|
|
if (rp1->group.u.prefix4.s_addr < rp2->group.u.prefix4.s_addr)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (rp1->group.u.prefix4.s_addr > rp2->group.u.prefix4.s_addr)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_init(struct pim_instance *pim)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp_info;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
pim->rp_list = list_new();
|
2017-08-31 15:03:49 +02:00
|
|
|
if (!pim->rp_list) {
|
|
|
|
zlog_err("Unable to alloc rp_list");
|
|
|
|
return;
|
|
|
|
}
|
2017-05-20 19:43:58 +02:00
|
|
|
pim->rp_list->del = (void (*)(void *))pim_rp_info_free;
|
|
|
|
pim->rp_list->cmp = pim_rp_list_cmp;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-08-31 15:08:12 +02:00
|
|
|
pim->rp_table = route_table_init();
|
|
|
|
if (!pim->rp_table) {
|
|
|
|
zlog_err("Unable to alloc rp_table");
|
2017-10-05 16:51:01 +02:00
|
|
|
list_delete_and_null(&pim->rp_list);
|
2017-08-31 15:08:12 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-11 01:54:26 +02:00
|
|
|
if (!str2prefix("224.0.0.0/4", &rp_info->group)) {
|
2017-08-31 15:03:49 +02:00
|
|
|
zlog_err("Unable to convert 224.0.0.0/4 to prefix");
|
2017-10-05 16:51:01 +02:00
|
|
|
list_delete_and_null(&pim->rp_list);
|
2017-08-31 15:08:12 +02:00
|
|
|
route_table_finish(pim->rp_table);
|
2017-05-11 01:54:26 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
rp_info->group.family = AF_INET;
|
|
|
|
rp_info->rp.rpf_addr.family = AF_INET;
|
|
|
|
rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
|
|
|
|
rp_info->rp.rpf_addr.u.prefix4.s_addr = INADDR_NONE;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_add(pim->rp_list, rp_info);
|
2017-08-31 15:08:12 +02:00
|
|
|
|
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
if (!rn) {
|
|
|
|
zlog_err("Failure to get route node for pim->rp_table");
|
2017-10-05 16:51:01 +02:00
|
|
|
list_delete_and_null(&pim->rp_list);
|
2017-08-31 15:08:12 +02:00
|
|
|
route_table_finish(pim->rp_table);
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rn->info = rp_info;
|
|
|
|
if (PIM_DEBUG_TRACE)
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"Allocated: %p for rp_info: %p(224.0.0.0/4) Lock: %d",
|
|
|
|
rn, rp_info, rn->lock);
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_free(struct pim_instance *pim)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim->rp_list)
|
2017-10-05 16:51:01 +02:00
|
|
|
list_delete_and_null(&pim->rp_list);
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Given an RP's prefix-list, return the RP's rp_info for that prefix-list
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static struct rp_info *pim_rp_find_prefix_list(struct pim_instance *pim,
|
|
|
|
struct in_addr rp,
|
2017-07-17 14:03:14 +02:00
|
|
|
const char *plist)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp.s_addr == rp_info->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
&& rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
|
|
|
|
return rp_info;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Return true if plist is used by any rp_info
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static int pim_rp_prefix_list_used(struct pim_instance *pim, const char *plist)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info->plist && strcmp(rp_info->plist, plist) == 0) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-07-17 14:03:14 +02:00
|
|
|
* Given an RP's address, return the RP's rp_info that is an exact match for
|
|
|
|
* 'group'
|
2016-09-13 21:41:33 +02:00
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static struct rp_info *pim_rp_find_exact(struct pim_instance *pim,
|
|
|
|
struct in_addr rp,
|
2017-07-17 14:03:14 +02:00
|
|
|
struct prefix *group)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp.s_addr == rp_info->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
&& prefix_same(&rp_info->group, group))
|
|
|
|
return rp_info;
|
|
|
|
}
|
2015-10-02 18:30:02 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return NULL;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* Given a group, return the rp_info for that group
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
static struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
|
|
|
|
struct prefix *group)
|
2016-08-05 19:08:06 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
2017-08-29 15:46:12 +02:00
|
|
|
struct rp_info *best = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct prefix_list *plist;
|
2017-08-29 15:46:12 +02:00
|
|
|
struct prefix *p, *bp;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-27 13:55:53 +02:00
|
|
|
bp = NULL;
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info->plist) {
|
|
|
|
plist = prefix_list_lookup(AFI_IP, rp_info->plist);
|
|
|
|
|
2018-03-06 20:02:52 +01:00
|
|
|
if (prefix_list_apply_which_prefix(plist, &p, group)
|
|
|
|
== PREFIX_DENY)
|
2017-08-29 15:46:12 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!best) {
|
|
|
|
best = rp_info;
|
|
|
|
bp = p;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-09-27 13:55:53 +02:00
|
|
|
if (bp && bp->prefixlen < p->prefixlen) {
|
2017-08-29 15:46:12 +02:00
|
|
|
best = rp_info;
|
|
|
|
bp = p;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-31 15:08:12 +02:00
|
|
|
rn = route_node_match(pim->rp_table, group);
|
|
|
|
if (!rn) {
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_err(
|
|
|
|
"%s: BUG We should have found default group information\n",
|
|
|
|
__PRETTY_FUNCTION__);
|
2017-08-31 15:08:12 +02:00
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
rp_info = rn->info;
|
|
|
|
if (PIM_DEBUG_TRACE) {
|
|
|
|
char buf[PREFIX_STRLEN];
|
|
|
|
|
|
|
|
route_unlock_node(rn);
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug("Lookedup: %p for rp_info: %p(%s) Lock: %d", rn,
|
|
|
|
rp_info,
|
2017-08-31 15:08:12 +02:00
|
|
|
prefix2str(&rp_info->group, buf, sizeof(buf)),
|
|
|
|
rn->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!best)
|
|
|
|
return rp_info;
|
|
|
|
|
|
|
|
if (rp_info->group.prefixlen < best->group.prefixlen)
|
|
|
|
best = rp_info;
|
|
|
|
|
2017-08-29 15:46:12 +02:00
|
|
|
return best;
|
2016-08-05 19:08:06 +02:00
|
|
|
}
|
2016-08-05 15:07:46 +02:00
|
|
|
|
2016-09-13 21:41:33 +02:00
|
|
|
/*
|
|
|
|
* When the user makes "ip pim rp" configuration changes or if they change the
|
|
|
|
* prefix-list(s) used by these statements we must tickle the upstream state
|
|
|
|
* for each group to make them re-lookup who their RP should be.
|
|
|
|
*
|
|
|
|
* This is a placeholder function for now.
|
|
|
|
*/
|
2017-05-22 16:35:08 +02:00
|
|
|
static void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
void pim_rp_prefix_list_update(struct pim_instance *pim,
|
|
|
|
struct prefix_list *plist)
|
2016-09-13 21:41:33 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
int refresh_needed = 0;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info->plist
|
|
|
|
&& strcmp(rp_info->plist, prefix_list_name(plist)) == 0) {
|
|
|
|
refresh_needed = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (refresh_needed)
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
static int pim_rp_check_interface_addrs(struct rp_info *rp_info,
|
|
|
|
struct pim_interface *pim_ifp)
|
2016-11-18 18:12:27 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct pim_secondary_addr *sec_addr;
|
2016-11-18 18:12:27 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (pim_ifp->primary_address.s_addr
|
|
|
|
== rp_info->rp.rpf_addr.u.prefix4.s_addr)
|
|
|
|
return 1;
|
2016-11-18 18:12:27 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!pim_ifp->sec_addr_list) {
|
|
|
|
return 0;
|
|
|
|
}
|
2016-11-18 18:12:27 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->sec_addr_list, node, sec_addr)) {
|
|
|
|
if (prefix_same(&sec_addr->addr, &rp_info->rp.rpf_addr)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
2016-11-18 18:12:27 +01:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
static void pim_rp_check_interfaces(struct pim_instance *pim,
|
|
|
|
struct rp_info *rp_info)
|
2016-08-23 22:22:14 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct interface *ifp;
|
2016-08-23 22:22:14 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
rp_info->i_am_rp = 0;
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2017-07-17 14:03:14 +02:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2016-08-23 22:22:14 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2016-08-23 22:22:14 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
|
|
|
|
rp_info->i_am_rp = 1;
|
|
|
|
}
|
|
|
|
}
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
int pim_rp_new(struct pim_instance *pim, const char *rp,
|
|
|
|
const char *group_range, const char *plist)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
int result = 0;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct rp_info *rp_all;
|
|
|
|
struct prefix group_all;
|
|
|
|
struct listnode *node, *nnode;
|
|
|
|
struct rp_info *tmp_rp_info;
|
|
|
|
char buffer[BUFSIZ];
|
|
|
|
struct prefix nht_p;
|
|
|
|
struct pim_nexthop_cache pnc;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
|
|
|
|
|
|
|
|
if (group_range == NULL)
|
|
|
|
result = str2prefix("224.0.0.0/4", &rp_info->group);
|
|
|
|
else
|
|
|
|
result = str2prefix(group_range, &rp_info->group);
|
|
|
|
|
|
|
|
if (!result) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
rp_info->rp.rpf_addr.family = AF_INET;
|
|
|
|
rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
|
|
|
|
result = inet_pton(rp_info->rp.rpf_addr.family, rp,
|
|
|
|
&rp_info->rp.rpf_addr.u.prefix4);
|
|
|
|
|
|
|
|
if (result <= 0) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_RP_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (plist) {
|
|
|
|
/*
|
|
|
|
* Return if the prefix-list is already configured for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim_rp_find_prefix_list(pim, rp_info->rp.rpf_addr.u.prefix4,
|
2017-07-17 14:03:14 +02:00
|
|
|
plist)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Barf if the prefix-list is already configured for an RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim_rp_prefix_list_used(pim, plist)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_RP_PFXLIST_IN_USE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free any existing rp_info entries for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
|
2017-07-17 14:03:14 +02:00
|
|
|
tmp_rp_info)) {
|
|
|
|
if (rp_info->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
== tmp_rp_info->rp.rpf_addr.u.prefix4.s_addr) {
|
|
|
|
if (tmp_rp_info->plist)
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_del(pim, rp, NULL,
|
2017-07-17 14:03:14 +02:00
|
|
|
tmp_rp_info->plist);
|
|
|
|
else
|
|
|
|
pim_rp_del(
|
2017-05-19 22:41:25 +02:00
|
|
|
pim, rp,
|
2017-07-17 14:03:14 +02:00
|
|
|
prefix2str(&tmp_rp_info->group,
|
|
|
|
buffer, BUFSIZ),
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rp_info->plist = XSTRDUP(MTYPE_PIM_FILTER_NAME, plist);
|
|
|
|
} else {
|
2017-08-31 15:08:12 +02:00
|
|
|
|
2017-05-11 01:54:26 +02:00
|
|
|
if (!str2prefix("224.0.0.0/4", &group_all)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
|
|
|
}
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_all = pim_rp_find_match_group(pim, &group_all);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Barf if group is a non-multicast subnet
|
|
|
|
*/
|
|
|
|
if (!prefix_match(&rp_all->group, &rp_info->group)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove any prefix-list rp_info entries for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(pim->rp_list, node, nnode,
|
2017-07-17 14:03:14 +02:00
|
|
|
tmp_rp_info)) {
|
|
|
|
if (tmp_rp_info->plist
|
|
|
|
&& rp_info->rp.rpf_addr.u.prefix4.s_addr
|
|
|
|
== tmp_rp_info->rp.rpf_addr.u.prefix4
|
|
|
|
.s_addr) {
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_del(pim, rp, NULL, tmp_rp_info->plist);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take over the 224.0.0.0/4 group if the rp is INADDR_NONE
|
|
|
|
*/
|
|
|
|
if (prefix_same(&rp_all->group, &rp_info->group)
|
|
|
|
&& pim_rpf_addr_is_inaddr_none(&rp_all->rp)) {
|
|
|
|
rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
|
|
|
|
/* Register addr with Zebra NHT */
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 =
|
|
|
|
rp_all->rp.rpf_addr.u.prefix4; // RP address
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
char buf1[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
prefix2str(&rp_all->group, buf1, sizeof(buf1));
|
|
|
|
zlog_debug(
|
|
|
|
"%s: NHT Register rp_all addr %s grp %s ",
|
|
|
|
__PRETTY_FUNCTION__, buf, buf1);
|
|
|
|
}
|
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2017-05-19 22:41:25 +02:00
|
|
|
if (pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
|
|
|
|
&pnc)) {
|
2017-05-19 04:53:50 +02:00
|
|
|
if (!pim_ecmp_nexthop_search(
|
2017-05-19 22:41:25 +02:00
|
|
|
pim, &pnc,
|
2017-05-19 22:00:00 +02:00
|
|
|
&rp_all->rp.source_nexthop, &nht_p,
|
|
|
|
&rp_all->group, 1))
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_RP_NO_PATH;
|
|
|
|
} else {
|
2018-07-07 16:04:30 +02:00
|
|
|
if (!pim_ecmp_nexthop_lookup(
|
2017-05-20 19:43:58 +02:00
|
|
|
pim, &rp_all->rp.source_nexthop,
|
2018-07-07 16:04:30 +02:00
|
|
|
&nht_p, &rp_all->group, 1))
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_RP_NO_PATH;
|
|
|
|
}
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_all);
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return if the group is already configured for this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim_rp_find_exact(pim, rp_info->rp.rpf_addr.u.prefix4,
|
2017-07-17 14:03:14 +02:00
|
|
|
&rp_info->group)) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Barf if this group is already covered by some other RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
tmp_rp_info = pim_rp_find_match_group(pim, &rp_info->group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (tmp_rp_info) {
|
|
|
|
if (tmp_rp_info->plist) {
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_PFXLIST_OVERLAP;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If the only RP that covers this group is an
|
|
|
|
* RP configured for
|
|
|
|
* 224.0.0.0/4 that is fine, ignore that one.
|
|
|
|
* For all others
|
|
|
|
* though we must return PIM_GROUP_OVERLAP
|
|
|
|
*/
|
2017-08-31 15:08:12 +02:00
|
|
|
if (prefix_same(&rp_info->group,
|
|
|
|
&tmp_rp_info->group)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
|
|
|
return PIM_GROUP_OVERLAP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_add_sort(pim->rp_list, rp_info);
|
2017-08-31 15:08:12 +02:00
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
rn->info = rp_info;
|
|
|
|
|
|
|
|
if (PIM_DEBUG_TRACE) {
|
|
|
|
char buf[PREFIX_STRLEN];
|
|
|
|
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug("Allocated: %p for rp_info: %p(%s) Lock: %d", rn,
|
|
|
|
rp_info,
|
2017-08-31 15:08:12 +02:00
|
|
|
prefix2str(&rp_info->group, buf, sizeof(buf)),
|
|
|
|
rn->lock);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/* Register addr with Zebra NHT */
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
char buf1[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
prefix2str(&rp_info->group, buf1, sizeof(buf1));
|
|
|
|
zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
|
|
|
|
__PRETTY_FUNCTION__, buf, buf1);
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2017-05-19 22:41:25 +02:00
|
|
|
if (pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, &pnc)) {
|
|
|
|
if (!pim_ecmp_nexthop_search(pim, &pnc,
|
2017-05-19 22:00:00 +02:00
|
|
|
&rp_info->rp.source_nexthop,
|
2017-05-19 04:53:50 +02:00
|
|
|
&nht_p, &rp_info->group, 1))
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_RP_NO_PATH;
|
|
|
|
} else {
|
2018-07-07 16:04:30 +02:00
|
|
|
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
|
|
|
|
&nht_p, &rp_info->group, 1))
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_RP_NO_PATH;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_SUCCESS;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
int pim_rp_del(struct pim_instance *pim, const char *rp,
|
|
|
|
const char *group_range, const char *plist)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct prefix group;
|
|
|
|
struct in_addr rp_addr;
|
|
|
|
struct prefix g_all;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct rp_info *rp_all;
|
|
|
|
int result;
|
|
|
|
struct prefix nht_p;
|
2017-08-31 15:08:12 +02:00
|
|
|
struct route_node *rn;
|
|
|
|
bool was_plist = false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (group_range == NULL)
|
|
|
|
result = str2prefix("224.0.0.0/4", &group);
|
|
|
|
else
|
|
|
|
result = str2prefix(group_range, &group);
|
|
|
|
|
|
|
|
if (!result)
|
|
|
|
return PIM_GROUP_BAD_ADDRESS;
|
|
|
|
|
|
|
|
result = inet_pton(AF_INET, rp, &rp_addr);
|
|
|
|
if (result <= 0)
|
|
|
|
return PIM_RP_BAD_ADDRESS;
|
|
|
|
|
|
|
|
if (plist)
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
|
2017-07-17 14:03:14 +02:00
|
|
|
else
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_exact(pim, rp_addr, &group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (!rp_info)
|
|
|
|
return PIM_RP_NOT_FOUND;
|
|
|
|
|
|
|
|
if (rp_info->plist) {
|
|
|
|
XFREE(MTYPE_PIM_FILTER_NAME, rp_info->plist);
|
|
|
|
rp_info->plist = NULL;
|
2017-08-31 15:08:12 +02:00
|
|
|
was_plist = true;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Deregister addr with Zebra NHT */
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
zlog_debug("%s: Deregister RP addr %s with Zebra ",
|
|
|
|
__PRETTY_FUNCTION__, buf);
|
|
|
|
}
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-25 01:54:21 +02:00
|
|
|
if (!str2prefix("224.0.0.0/4", &g_all))
|
|
|
|
return PIM_RP_BAD_ADDRESS;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_all = pim_rp_find_match_group(pim, &g_all);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (rp_all == rp_info) {
|
|
|
|
rp_all->rp.rpf_addr.family = AF_INET;
|
|
|
|
rp_all->rp.rpf_addr.u.prefix4.s_addr = INADDR_NONE;
|
|
|
|
rp_all->i_am_rp = 0;
|
|
|
|
return PIM_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
listnode_delete(pim->rp_list, rp_info);
|
2017-08-31 15:08:12 +02:00
|
|
|
|
|
|
|
if (!was_plist) {
|
|
|
|
rn = route_node_get(pim->rp_table, &rp_info->group);
|
|
|
|
if (rn) {
|
|
|
|
if (rn->info != rp_info)
|
|
|
|
zlog_err("WTF matey");
|
|
|
|
|
|
|
|
if (PIM_DEBUG_TRACE) {
|
|
|
|
char buf[PREFIX_STRLEN];
|
|
|
|
|
2018-03-06 20:02:52 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s:Found for Freeing: %p for rp_info: %p(%s) Lock: %d",
|
|
|
|
__PRETTY_FUNCTION__, rn, rp_info,
|
|
|
|
prefix2str(&rp_info->group, buf,
|
|
|
|
sizeof(buf)),
|
|
|
|
rn->lock);
|
2017-08-31 15:08:12 +02:00
|
|
|
}
|
|
|
|
rn->info = NULL;
|
|
|
|
route_unlock_node(rn);
|
|
|
|
route_unlock_node(rn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_rp_refresh_group_to_rp_mapping(pim);
|
2017-08-31 15:03:49 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_PIM_RP, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
return PIM_SUCCESS;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
2016-07-28 03:17:54 +02:00
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
void pim_rp_setup(struct pim_instance *pim)
|
2016-07-28 03:17:54 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct prefix nht_p;
|
|
|
|
struct pim_nexthop_cache pnc;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info->rp.rpf_addr.u.prefix4.s_addr == INADDR_NONE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
|
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2017-05-19 22:41:25 +02:00
|
|
|
if (pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, &pnc))
|
|
|
|
pim_ecmp_nexthop_search(pim, &pnc,
|
2017-05-19 04:53:50 +02:00
|
|
|
&rp_info->rp.source_nexthop,
|
|
|
|
&nht_p, &rp_info->group, 1);
|
|
|
|
else {
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
zlog_debug(
|
|
|
|
"%s: NHT Local Nexthop not found for RP %s ",
|
|
|
|
__PRETTY_FUNCTION__, buf);
|
|
|
|
}
|
2018-07-07 22:00:48 +02:00
|
|
|
if (!pim_ecmp_nexthop_lookup(pim,
|
|
|
|
&rp_info->rp.source_nexthop,
|
|
|
|
&nht_p, &rp_info->group, 1))
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2017-07-17 14:03:14 +02:00
|
|
|
zlog_debug(
|
|
|
|
"Unable to lookup nexthop for rp specified");
|
|
|
|
}
|
|
|
|
}
|
2016-07-28 03:17:54 +02:00
|
|
|
}
|
|
|
|
|
2015-10-02 18:30:02 +02:00
|
|
|
/*
|
2016-11-18 18:12:27 +01:00
|
|
|
* Checks to see if we should elect ourself the actual RP when new if
|
|
|
|
* addresses are added against an interface.
|
2015-10-02 18:30:02 +02:00
|
|
|
*/
|
2017-07-17 14:03:14 +02:00
|
|
|
void pim_rp_check_on_if_add(struct pim_interface *pim_ifp)
|
2015-10-02 18:30:02 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
bool i_am_rp_changed = false;
|
2017-05-20 19:43:58 +02:00
|
|
|
struct pim_instance *pim = pim_ifp->pim;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim->rp_list == NULL)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* if i_am_rp is already set nothing to be done (adding new
|
|
|
|
* addresses
|
|
|
|
* is not going to make a difference). */
|
|
|
|
if (rp_info->i_am_rp) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pim_rp_check_interface_addrs(rp_info, pim_ifp)) {
|
|
|
|
i_am_rp_changed = true;
|
|
|
|
rp_info->i_am_rp = 1;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char rp[PREFIX_STRLEN];
|
|
|
|
pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
|
|
|
|
rp, sizeof(rp));
|
|
|
|
zlog_debug("%s: %s: i am rp", __func__, rp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i_am_rp_changed) {
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-11-18 18:12:27 +01:00
|
|
|
}
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2016-11-18 18:12:27 +01:00
|
|
|
/* up-optimized re-evaluation of "i_am_rp". this is used when ifaddresses
|
|
|
|
* are removed. Removing numbers is an uncommon event in an active network
|
|
|
|
* so I have made no attempt to optimize it. */
|
2017-05-19 22:41:25 +02:00
|
|
|
void pim_i_am_rp_re_evaluate(struct pim_instance *pim)
|
2016-11-18 18:12:27 +01:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
bool i_am_rp_changed = false;
|
|
|
|
int old_i_am_rp;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
if (pim->rp_list == NULL)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
old_i_am_rp = rp_info->i_am_rp;
|
2017-05-19 22:41:25 +02:00
|
|
|
pim_rp_check_interfaces(pim, rp_info);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (old_i_am_rp != rp_info->i_am_rp) {
|
|
|
|
i_am_rp_changed = true;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char rp[PREFIX_STRLEN];
|
|
|
|
pim_addr_dump("<rp?>", &rp_info->rp.rpf_addr,
|
|
|
|
rp, sizeof(rp));
|
|
|
|
if (rp_info->i_am_rp) {
|
|
|
|
zlog_debug("%s: %s: i am rp", __func__,
|
|
|
|
rp);
|
|
|
|
} else {
|
|
|
|
zlog_debug("%s: %s: i am no longer rp",
|
|
|
|
__func__, rp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i_am_rp_changed) {
|
2017-05-22 16:35:08 +02:00
|
|
|
pim_msdp_i_am_rp_changed(pim);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-10-02 18:30:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* I_am_RP(G) is true if the group-to-RP mapping indicates that
|
|
|
|
* this router is the RP for the group.
|
|
|
|
*
|
|
|
|
* Since we only have static RP, all groups are part of this RP
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
int pim_rp_i_am_rp(struct pim_instance *pim, struct in_addr group)
|
2015-10-02 18:30:02 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct prefix g;
|
|
|
|
struct rp_info *rp_info;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
memset(&g, 0, sizeof(g));
|
|
|
|
g.family = AF_INET;
|
|
|
|
g.prefixlen = 32;
|
|
|
|
g.u.prefix4 = group;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info)
|
|
|
|
return rp_info->i_am_rp;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 0;
|
2015-10-02 18:30:02 +02:00
|
|
|
}
|
|
|
|
|
2015-10-02 19:34:11 +02:00
|
|
|
/*
|
|
|
|
* RP(G)
|
|
|
|
*
|
|
|
|
* Return the RP that the Group belongs too.
|
|
|
|
*/
|
2017-05-19 22:41:25 +02:00
|
|
|
struct pim_rpf *pim_rp_g(struct pim_instance *pim, struct in_addr group)
|
2015-10-02 19:34:11 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct prefix g;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
|
|
|
|
memset(&g, 0, sizeof(g));
|
|
|
|
g.family = AF_INET;
|
|
|
|
g.prefixlen = 32;
|
|
|
|
g.u.prefix4 = group;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
if (rp_info) {
|
|
|
|
struct prefix nht_p;
|
|
|
|
struct pim_nexthop_cache pnc;
|
|
|
|
/* Register addr with Zebra NHT */
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
char buf1[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
prefix2str(&rp_info->group, buf1, sizeof(buf1));
|
|
|
|
zlog_debug(
|
|
|
|
"%s: NHT Register RP addr %s grp %s with Zebra",
|
|
|
|
__PRETTY_FUNCTION__, buf, buf1);
|
|
|
|
}
|
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2017-05-19 22:41:25 +02:00
|
|
|
if (pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, &pnc))
|
|
|
|
pim_ecmp_nexthop_search(pim, &pnc,
|
2017-07-17 14:03:14 +02:00
|
|
|
&rp_info->rp.source_nexthop,
|
|
|
|
&nht_p, &rp_info->group, 1);
|
2017-05-19 04:53:50 +02:00
|
|
|
else {
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-07-17 14:03:14 +02:00
|
|
|
char buf[PREFIX2STR_BUFFER];
|
|
|
|
char buf1[PREFIX2STR_BUFFER];
|
|
|
|
prefix2str(&nht_p, buf, sizeof(buf));
|
|
|
|
prefix2str(&g, buf1, sizeof(buf1));
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Nexthop cache not found for RP %s grp %s register with Zebra",
|
|
|
|
__PRETTY_FUNCTION__, buf, buf1);
|
|
|
|
}
|
2018-03-18 02:34:55 +01:00
|
|
|
pim_rpf_set_refresh_time(pim);
|
2018-07-07 22:00:48 +02:00
|
|
|
pim_ecmp_nexthop_lookup(pim,
|
|
|
|
&rp_info->rp.source_nexthop,
|
2018-07-07 16:04:30 +02:00
|
|
|
&nht_p, &rp_info->group, 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
return (&rp_info->rp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// About to Go Down
|
|
|
|
return NULL;
|
2015-10-02 19:34:11 +02:00
|
|
|
}
|
|
|
|
|
2015-09-30 14:41:18 +02:00
|
|
|
/*
|
|
|
|
* Set the upstream IP address we want to talk to based upon
|
|
|
|
* the rp configured and the source address
|
|
|
|
*
|
|
|
|
* If we have don't have a RP configured and the source address is *
|
|
|
|
* then return failure.
|
|
|
|
*
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
int pim_rp_set_upstream_addr(struct pim_instance *pim, struct in_addr *up,
|
|
|
|
struct in_addr source, struct in_addr group)
|
2015-09-30 14:41:18 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct prefix g;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
memset(&g, 0, sizeof(g));
|
|
|
|
g.family = AF_INET;
|
|
|
|
g.prefixlen = 32;
|
|
|
|
g.u.prefix4 = group;
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2016-08-05 19:08:06 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
if ((pim_rpf_addr_is_inaddr_none(&rp_info->rp))
|
|
|
|
&& (source.s_addr == INADDR_ANY)) {
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP)
|
2017-07-17 14:03:14 +02:00
|
|
|
zlog_debug("%s: Received a (*,G) with no RP configured",
|
|
|
|
__PRETTY_FUNCTION__);
|
|
|
|
return 0;
|
|
|
|
}
|
2015-09-30 14:41:18 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
*up = (source.s_addr == INADDR_ANY) ? rp_info->rp.rpf_addr.u.prefix4
|
|
|
|
: source;
|
2015-09-30 14:41:18 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
return 1;
|
2015-09-30 14:41:18 +02:00
|
|
|
}
|
2016-08-05 15:07:46 +02:00
|
|
|
|
2017-05-22 21:35:42 +02:00
|
|
|
int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
|
|
|
|
const char *spaces)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node;
|
|
|
|
struct rp_info *rp_info;
|
|
|
|
char rp_buffer[32];
|
|
|
|
char group_buffer[32];
|
|
|
|
int count = 0;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (rp_info->plist)
|
2017-05-22 21:35:42 +02:00
|
|
|
vty_out(vty, "%sip pim rp %s prefix-list %s\n", spaces,
|
2017-07-17 14:03:14 +02:00
|
|
|
inet_ntop(AF_INET,
|
|
|
|
&rp_info->rp.rpf_addr.u.prefix4,
|
|
|
|
rp_buffer, 32),
|
|
|
|
rp_info->plist);
|
|
|
|
else
|
2017-05-22 21:35:42 +02:00
|
|
|
vty_out(vty, "%sip pim rp %s %s\n", spaces,
|
2017-07-17 14:03:14 +02:00
|
|
|
inet_ntop(AF_INET,
|
|
|
|
&rp_info->rp.rpf_addr.u.prefix4,
|
|
|
|
rp_buffer, 32),
|
|
|
|
prefix2str(&rp_info->group, group_buffer, 32));
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
int pim_rp_check_is_my_ip_address(struct pim_instance *pim,
|
|
|
|
struct in_addr group,
|
2017-07-17 14:03:14 +02:00
|
|
|
struct in_addr dest_addr)
|
2016-08-05 15:07:46 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct prefix g;
|
|
|
|
|
|
|
|
memset(&g, 0, sizeof(g));
|
|
|
|
g.family = AF_INET;
|
|
|
|
g.prefixlen = 32;
|
|
|
|
g.u.prefix4 = group;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
rp_info = pim_rp_find_match_group(pim, &g);
|
2017-07-17 14:03:14 +02:00
|
|
|
/*
|
|
|
|
* See if we can short-cut some?
|
|
|
|
* This might not make sense if we ever leave a static RP
|
|
|
|
* type of configuration.
|
|
|
|
* Note - Premature optimization might bite our patooeys' here.
|
|
|
|
*/
|
2017-05-20 19:43:58 +02:00
|
|
|
if (I_am_RP(pim, group)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (dest_addr.s_addr == rp_info->rp.rpf_addr.u.prefix4.s_addr)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
if (if_lookup_exact_address(&dest_addr, AF_INET, pim->vrf_id))
|
2017-07-17 14:03:14 +02:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2016-08-05 15:07:46 +02:00
|
|
|
}
|
2016-08-23 22:22:14 +02:00
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
void pim_rp_show_information(struct pim_instance *pim, struct vty *vty,
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t uj)
|
2016-08-23 22:22:14 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct rp_info *rp_info;
|
|
|
|
struct rp_info *prev_rp_info = NULL;
|
|
|
|
struct listnode *node;
|
|
|
|
|
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_rp_rows = NULL;
|
|
|
|
json_object *json_row = NULL;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
else
|
|
|
|
vty_out(vty,
|
|
|
|
"RP address group/prefix-list OIF I am RP\n");
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (!pim_rpf_addr_is_inaddr_none(&rp_info->rp)) {
|
|
|
|
char buf[48];
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
/*
|
|
|
|
* If we have moved on to a new RP then add the
|
|
|
|
* entry for the previous RP
|
|
|
|
*/
|
|
|
|
if (prev_rp_info
|
|
|
|
&& prev_rp_info->rp.rpf_addr.u.prefix4
|
|
|
|
.s_addr
|
|
|
|
!= rp_info->rp.rpf_addr.u.prefix4
|
|
|
|
.s_addr) {
|
|
|
|
json_object_object_add(
|
|
|
|
json,
|
|
|
|
inet_ntoa(prev_rp_info->rp
|
|
|
|
.rpf_addr.u
|
|
|
|
.prefix4),
|
|
|
|
json_rp_rows);
|
|
|
|
json_rp_rows = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!json_rp_rows)
|
|
|
|
json_rp_rows = json_object_new_array();
|
|
|
|
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
if (rp_info->rp.source_nexthop.interface)
|
|
|
|
json_object_string_add(
|
|
|
|
json_row, "outboundInterface",
|
|
|
|
rp_info->rp.source_nexthop
|
|
|
|
.interface->name);
|
|
|
|
|
|
|
|
if (rp_info->i_am_rp)
|
|
|
|
json_object_boolean_true_add(json_row,
|
|
|
|
"iAmRP");
|
|
|
|
|
|
|
|
if (rp_info->plist)
|
|
|
|
json_object_string_add(json_row,
|
|
|
|
"prefixList",
|
|
|
|
rp_info->plist);
|
|
|
|
else
|
|
|
|
json_object_string_add(
|
|
|
|
json_row, "group",
|
|
|
|
prefix2str(&rp_info->group, buf,
|
|
|
|
48));
|
|
|
|
|
|
|
|
json_object_array_add(json_rp_rows, json_row);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%-15s ",
|
|
|
|
inet_ntoa(rp_info->rp.rpf_addr.u
|
|
|
|
.prefix4));
|
|
|
|
|
|
|
|
if (rp_info->plist)
|
|
|
|
vty_out(vty, "%-18s ", rp_info->plist);
|
|
|
|
else
|
|
|
|
vty_out(vty, "%-18s ",
|
|
|
|
prefix2str(&rp_info->group, buf,
|
|
|
|
48));
|
|
|
|
|
|
|
|
if (rp_info->rp.source_nexthop.interface)
|
|
|
|
vty_out(vty, "%-10s ",
|
|
|
|
rp_info->rp.source_nexthop
|
|
|
|
.interface->name);
|
|
|
|
else
|
|
|
|
vty_out(vty, "%-10s ", "(Unknown)");
|
|
|
|
|
|
|
|
if (rp_info->i_am_rp)
|
|
|
|
vty_out(vty, "yes\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "no\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_rp_info = rp_info;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
if (prev_rp_info && json_rp_rows)
|
|
|
|
json_object_object_add(
|
|
|
|
json,
|
|
|
|
inet_ntoa(prev_rp_info->rp.rpf_addr.u.prefix4),
|
|
|
|
json_rp_rows);
|
|
|
|
|
2017-07-22 14:52:33 +02:00
|
|
|
vty_out(vty, "%s\n", json_object_to_json_string_ext(
|
|
|
|
json, JSON_C_TO_STRING_PRETTY));
|
2017-07-17 14:03:14 +02:00
|
|
|
json_object_free(json);
|
|
|
|
}
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2017-05-19 22:41:25 +02:00
|
|
|
void pim_resolve_rp_nh(struct pim_instance *pim)
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
{
|
2017-07-17 14:03:14 +02:00
|
|
|
struct listnode *node = NULL;
|
|
|
|
struct rp_info *rp_info = NULL;
|
|
|
|
struct nexthop *nh_node = NULL;
|
|
|
|
struct prefix nht_p;
|
|
|
|
struct pim_nexthop_cache pnc;
|
|
|
|
struct pim_neighbor *nbr = NULL;
|
|
|
|
|
2017-05-20 19:43:58 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
if (rp_info->rp.rpf_addr.u.prefix4.s_addr == INADDR_NONE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nht_p.family = AF_INET;
|
|
|
|
nht_p.prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
|
|
|
|
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
|
2017-05-19 22:41:25 +02:00
|
|
|
if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
|
2017-05-19 21:40:34 +02:00
|
|
|
&pnc))
|
2017-05-19 04:53:50 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
|
|
|
|
if (nh_node->gate.ipv4.s_addr != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
struct interface *ifp1 = if_lookup_by_index(
|
2017-05-19 22:41:25 +02:00
|
|
|
nh_node->ifindex, pim->vrf_id);
|
2017-05-19 04:53:50 +02:00
|
|
|
nbr = pim_neighbor_find_if(ifp1);
|
|
|
|
if (!nbr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nh_node->gate.ipv4 = nbr->source_addr;
|
2017-09-14 16:28:04 +02:00
|
|
|
if (PIM_DEBUG_PIM_NHT_RP) {
|
2017-05-19 04:53:50 +02:00
|
|
|
char str[PREFIX_STRLEN];
|
|
|
|
char str1[INET_ADDRSTRLEN];
|
|
|
|
pim_inet4_dump("<nht_nbr?>", nbr->source_addr,
|
|
|
|
str1, sizeof(str1));
|
|
|
|
pim_addr_dump("<nht_addr?>", &nht_p, str,
|
|
|
|
sizeof(str));
|
|
|
|
zlog_debug(
|
|
|
|
"%s: addr %s new nexthop addr %s interface %s",
|
|
|
|
__PRETTY_FUNCTION__, str, str1,
|
|
|
|
ifp1->name);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|