2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-02-04 07:01:14 +01:00
|
|
|
/*
|
2017-05-13 10:25:29 +02:00
|
|
|
* PIM for Quagga
|
|
|
|
* Copyright (C) 2008 Everton da Silva Marques
|
|
|
|
*/
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
2016-09-14 17:12:13 +02:00
|
|
|
#include "lib/json.h"
|
2015-02-04 07:01:14 +01:00
|
|
|
#include "command.h"
|
|
|
|
#include "if.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "zclient.h"
|
2016-09-13 21:41:33 +02:00
|
|
|
#include "plist.h"
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
#include "hash.h"
|
|
|
|
#include "nexthop.h"
|
2017-05-23 00:14:43 +02:00
|
|
|
#include "vrf.h"
|
2017-09-14 17:15:52 +02:00
|
|
|
#include "ferr.h"
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
#include "pimd.h"
|
2016-07-12 21:09:25 +02:00
|
|
|
#include "pim_mroute.h"
|
2015-02-04 07:01:14 +01:00
|
|
|
#include "pim_cmd.h"
|
|
|
|
#include "pim_iface.h"
|
|
|
|
#include "pim_vty.h"
|
|
|
|
#include "pim_mroute.h"
|
|
|
|
#include "pim_str.h"
|
|
|
|
#include "pim_igmp.h"
|
|
|
|
#include "pim_igmpv3.h"
|
|
|
|
#include "pim_sock.h"
|
|
|
|
#include "pim_time.h"
|
|
|
|
#include "pim_util.h"
|
|
|
|
#include "pim_oil.h"
|
|
|
|
#include "pim_neighbor.h"
|
|
|
|
#include "pim_pim.h"
|
|
|
|
#include "pim_ifchannel.h"
|
|
|
|
#include "pim_hello.h"
|
|
|
|
#include "pim_msg.h"
|
|
|
|
#include "pim_upstream.h"
|
|
|
|
#include "pim_rpf.h"
|
|
|
|
#include "pim_macro.h"
|
|
|
|
#include "pim_ssmpingd.h"
|
|
|
|
#include "pim_zebra.h"
|
2015-06-12 01:29:02 +02:00
|
|
|
#include "pim_static.h"
|
2016-06-22 22:39:31 +02:00
|
|
|
#include "pim_rp.h"
|
2016-08-09 21:04:23 +02:00
|
|
|
#include "pim_zlookup.h"
|
2016-10-25 19:59:48 +02:00
|
|
|
#include "pim_msdp.h"
|
2017-03-17 19:51:13 +01:00
|
|
|
#include "pim_ssm.h"
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
#include "pim_nht.h"
|
2017-04-11 03:01:53 +02:00
|
|
|
#include "pim_bfd.h"
|
2019-03-25 01:39:22 +01:00
|
|
|
#include "pim_vxlan.h"
|
2019-11-12 07:36:17 +01:00
|
|
|
#include "pim_mlag.h"
|
2017-04-11 03:01:53 +02:00
|
|
|
#include "bfd.h"
|
2019-05-02 10:08:53 +02:00
|
|
|
#include "pim_bsm.h"
|
2020-10-23 13:46:39 +02:00
|
|
|
#include "lib/northbound_cli.h"
|
|
|
|
#include "pim_errors.h"
|
|
|
|
#include "pim_nb.h"
|
2022-03-02 05:50:22 +01:00
|
|
|
#include "pim_addr.h"
|
2022-02-10 09:14:41 +01:00
|
|
|
#include "pim_cmd_common.h"
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2019-01-14 16:43:53 +01:00
|
|
|
#include "pimd/pim_cmd_clippy.c"
|
|
|
|
|
2018-09-08 21:46:23 +02:00
|
|
|
static struct cmd_node debug_node = {
|
2018-09-09 00:15:50 +02:00
|
|
|
.name = "debug",
|
2018-09-08 21:46:23 +02:00
|
|
|
.node = DEBUG_NODE,
|
|
|
|
.prompt = "",
|
2018-09-08 22:31:43 +02:00
|
|
|
.config_write = pim_debug_config_write,
|
2018-09-08 21:46:23 +02:00
|
|
|
};
|
2016-10-18 02:10:12 +02:00
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
static void pim_show_assert_helper(struct vty *vty,
|
|
|
|
struct pim_interface *pim_ifp,
|
|
|
|
struct pim_ifchannel *ch, time_t now)
|
|
|
|
{
|
|
|
|
char winner_str[INET_ADDRSTRLEN];
|
|
|
|
struct in_addr ifaddr;
|
|
|
|
char uptime[10];
|
|
|
|
char timer[10];
|
2020-10-22 16:01:20 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2017-07-31 15:58:42 +02:00
|
|
|
|
|
|
|
ifaddr = pim_ifp->primary_address;
|
|
|
|
|
|
|
|
pim_inet4_dump("<assrt_win?>", ch->ifassert_winner, winner_str,
|
|
|
|
sizeof(winner_str));
|
|
|
|
|
|
|
|
pim_time_uptime(uptime, sizeof(uptime), now - ch->ifassert_creation);
|
|
|
|
pim_time_timer_to_mmss(timer, sizeof(timer), ch->t_ifassert_timer);
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-6s %-15s %-8s %-5s\n",
|
2020-10-22 16:01:20 +02:00
|
|
|
ch->interface->name,
|
2022-01-05 19:12:12 +01:00
|
|
|
inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
|
|
|
|
&ch->sg.grp, pim_ifchannel_ifassert_name(ch->ifassert_state),
|
2020-10-22 16:01:20 +02:00
|
|
|
winner_str, uptime, timer);
|
2017-07-31 15:58:42 +02:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void pim_show_assert(struct pim_instance *pim, struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2016-11-07 18:34:44 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct pim_ifchannel *ch;
|
2017-07-31 15:58:42 +02:00
|
|
|
struct interface *ifp;
|
2016-11-07 18:34:44 +01:00
|
|
|
time_t now;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2019-03-27 02:01:00 +01:00
|
|
|
"Interface Address Source Group State Winner Uptime Timer\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_ifp = ifp->info;
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
|
2022-02-08 01:55:00 +01:00
|
|
|
if (ch->ifassert_state == PIM_IFASSERT_NOINFO)
|
|
|
|
continue;
|
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_show_assert_helper(vty, pim_ifp, ch, now);
|
|
|
|
} /* scan interface channels */
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
static void pim_show_assert_internal_helper(struct vty *vty,
|
|
|
|
struct pim_interface *pim_ifp,
|
|
|
|
struct pim_ifchannel *ch)
|
|
|
|
{
|
|
|
|
struct in_addr ifaddr;
|
2020-10-22 16:01:20 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
ifaddr = pim_ifp->primary_address;
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %-3s %-3s %-4s\n",
|
2020-10-22 16:01:20 +02:00
|
|
|
ch->interface->name,
|
2022-01-05 19:12:12 +01:00
|
|
|
inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
|
|
|
|
&ch->sg.grp,
|
2017-07-31 15:58:42 +02:00
|
|
|
PIM_IF_FLAG_TEST_COULD_ASSERT(ch->flags) ? "yes" : "no",
|
|
|
|
pim_macro_ch_could_assert_eval(ch) ? "yes" : "no",
|
|
|
|
PIM_IF_FLAG_TEST_ASSERT_TRACKING_DESIRED(ch->flags) ? "yes"
|
2020-11-20 22:06:34 +01:00
|
|
|
: "no",
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_macro_assert_tracking_desired_eval(ch) ? "yes" : "no");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void pim_show_assert_internal(struct pim_instance *pim, struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2016-11-07 18:34:44 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct pim_ifchannel *ch;
|
2017-07-31 15:58:42 +02:00
|
|
|
struct interface *ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 18:50:29 +02:00
|
|
|
vty_out(vty,
|
2017-07-13 20:17:06 +02:00
|
|
|
"CA: CouldAssert\n"
|
|
|
|
"ECA: Evaluate CouldAssert\n"
|
|
|
|
"ATD: AssertTrackingDesired\n"
|
|
|
|
"eATD: Evaluate AssertTrackingDesired\n\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2019-03-27 02:01:00 +01:00
|
|
|
"Interface Address Source Group CA eCA ATD eATD\n");
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_ifp = ifp->info;
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_show_assert_internal_helper(vty, pim_ifp, ch);
|
|
|
|
} /* scan interface channels */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_assert_metric_helper(struct vty *vty,
|
|
|
|
struct pim_interface *pim_ifp,
|
|
|
|
struct pim_ifchannel *ch)
|
|
|
|
{
|
|
|
|
char addr_str[INET_ADDRSTRLEN];
|
|
|
|
struct pim_assert_metric am;
|
|
|
|
struct in_addr ifaddr;
|
2020-10-22 16:01:20 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2017-07-31 15:58:42 +02:00
|
|
|
|
|
|
|
ifaddr = pim_ifp->primary_address;
|
|
|
|
|
|
|
|
am = pim_macro_spt_assert_metric(&ch->upstream->rpf,
|
|
|
|
pim_ifp->primary_address);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_inet4_dump("<addr?>", am.ip_address, addr_str, sizeof(addr_str));
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %4u %6u %-15s\n",
|
2020-10-22 16:01:20 +02:00
|
|
|
ch->interface->name,
|
2022-01-05 19:12:12 +01:00
|
|
|
inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
|
|
|
|
&ch->sg.grp, am.rpt_bit_flag ? "yes" : "no",
|
2020-10-22 16:01:20 +02:00
|
|
|
am.metric_preference, am.route_metric, addr_str);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void pim_show_assert_metric(struct pim_instance *pim, struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2016-11-07 18:34:44 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct pim_ifchannel *ch;
|
2017-07-31 15:58:42 +02:00
|
|
|
struct interface *ifp;
|
2016-11-07 18:34:44 +01:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2019-03-27 02:01:00 +01:00
|
|
|
"Interface Address Source Group RPT Pref Metric Address \n");
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_ifp = ifp->info;
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_show_assert_metric_helper(vty, pim_ifp, ch);
|
|
|
|
} /* scan interface channels */
|
|
|
|
}
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2017-07-31 15:58:42 +02:00
|
|
|
static void pim_show_assert_winner_metric_helper(struct vty *vty,
|
|
|
|
struct pim_interface *pim_ifp,
|
|
|
|
struct pim_ifchannel *ch)
|
|
|
|
{
|
|
|
|
char addr_str[INET_ADDRSTRLEN];
|
|
|
|
struct pim_assert_metric *am;
|
|
|
|
struct in_addr ifaddr;
|
2018-08-18 04:33:38 +02:00
|
|
|
char pref_str[16];
|
|
|
|
char metr_str[16];
|
2020-10-22 16:01:20 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2017-07-31 15:58:42 +02:00
|
|
|
|
|
|
|
ifaddr = pim_ifp->primary_address;
|
|
|
|
|
|
|
|
am = &ch->ifassert_winner_metric;
|
|
|
|
|
|
|
|
pim_inet4_dump("<addr?>", am->ip_address, addr_str, sizeof(addr_str));
|
|
|
|
|
|
|
|
if (am->metric_preference == PIM_ASSERT_METRIC_PREFERENCE_MAX)
|
|
|
|
snprintf(pref_str, sizeof(pref_str), "INFI");
|
|
|
|
else
|
|
|
|
snprintf(pref_str, sizeof(pref_str), "%4u",
|
|
|
|
am->metric_preference);
|
|
|
|
|
|
|
|
if (am->route_metric == PIM_ASSERT_ROUTE_METRIC_MAX)
|
|
|
|
snprintf(metr_str, sizeof(metr_str), "INFI");
|
|
|
|
else
|
|
|
|
snprintf(metr_str, sizeof(metr_str), "%6u", am->route_metric);
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-16s %-15s %-15pPAs %-15pPAs %-3s %-4s %-6s %-15s\n",
|
2020-10-22 16:01:20 +02:00
|
|
|
ch->interface->name,
|
2022-01-05 19:12:12 +01:00
|
|
|
inet_ntop(AF_INET, &ifaddr, buf, sizeof(buf)), &ch->sg.src,
|
|
|
|
&ch->sg.grp, am->rpt_bit_flag ? "yes" : "no", pref_str,
|
|
|
|
metr_str, addr_str);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void pim_show_assert_winner_metric(struct pim_instance *pim,
|
|
|
|
struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2016-11-07 18:34:44 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct pim_ifchannel *ch;
|
2017-07-31 15:58:42 +02:00
|
|
|
struct interface *ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2019-03-27 02:01:00 +01:00
|
|
|
"Interface Address Source Group RPT Pref Metric Address \n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_ifp = ifp->info;
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
|
2017-07-31 15:58:42 +02:00
|
|
|
pim_show_assert_winner_metric_helper(vty, pim_ifp, ch);
|
|
|
|
} /* scan interface channels */
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void igmp_show_interfaces(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
bool uj)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
2020-10-22 16:01:20 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
else
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2021-07-05 18:24:19 +02:00
|
|
|
"Interface State Address V Querier QuerierIp Query Timer Uptime\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2015-02-04 07:01:14 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct listnode *sock_node;
|
2021-12-03 19:23:23 +01:00
|
|
|
struct gm_sock *igmp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_ifp = ifp->info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
|
2015-02-04 07:01:14 +01:00
|
|
|
igmp)) {
|
|
|
|
char uptime[10];
|
2016-09-15 19:16:36 +02:00
|
|
|
char query_hhmmss[10];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_time_uptime(uptime, sizeof(uptime),
|
|
|
|
now - igmp->sock_creation);
|
2016-09-15 19:16:36 +02:00
|
|
|
pim_time_timer_to_hhmmss(query_hhmmss,
|
|
|
|
sizeof(query_hhmmss),
|
|
|
|
igmp->t_igmp_query_timer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (uj) {
|
|
|
|
json_row = json_object_new_object();
|
2016-09-17 04:22:02 +02:00
|
|
|
json_object_pim_ifp_add(json_row, ifp);
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_string_add(json_row, "upTime",
|
|
|
|
uptime);
|
2016-10-20 15:34:29 +02:00
|
|
|
json_object_int_add(json_row, "version",
|
2021-12-14 17:33:24 +01:00
|
|
|
pim_ifp->igmp_version);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (igmp->t_igmp_query_timer) {
|
|
|
|
json_object_boolean_true_add(json_row,
|
|
|
|
"querier");
|
|
|
|
json_object_string_add(json_row,
|
|
|
|
"queryTimer",
|
|
|
|
query_hhmmss);
|
|
|
|
}
|
2021-11-18 09:58:09 +01:00
|
|
|
json_object_string_addf(json_row, "querierIp",
|
|
|
|
"%pI4",
|
|
|
|
&igmp->querier_addr);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_object_add(json, ifp->name,
|
|
|
|
json_row);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-02-27 23:16:45 +01:00
|
|
|
if (igmp->mtrace_only) {
|
|
|
|
json_object_boolean_true_add(
|
|
|
|
json_row, "mtraceOnly");
|
|
|
|
}
|
2016-09-15 19:16:36 +02:00
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2021-07-05 18:24:19 +02:00
|
|
|
"%-16s %5s %15s %d %7s %17pI4 %11s %8s\n",
|
2016-09-15 19:16:36 +02:00
|
|
|
ifp->name,
|
2018-02-27 23:16:45 +01:00
|
|
|
if_is_up(ifp)
|
2021-07-05 18:24:19 +02:00
|
|
|
? (igmp->mtrace_only ? "mtrc"
|
|
|
|
: "up")
|
|
|
|
: "down",
|
|
|
|
inet_ntop(AF_INET, &igmp->ifaddr, buf,
|
|
|
|
sizeof(buf)),
|
2021-12-14 17:33:24 +01:00
|
|
|
pim_ifp->igmp_version,
|
2016-09-15 19:16:36 +02:00
|
|
|
igmp->t_igmp_query_timer ? "local"
|
2021-07-05 18:24:19 +02:00
|
|
|
: "other",
|
|
|
|
&igmp->querier_addr, query_hhmmss,
|
|
|
|
uptime);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-09-15 19:16:36 +02:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-09-15 19:16:36 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void igmp_show_interfaces_single(struct pim_instance *pim,
|
|
|
|
struct vty *vty, const char *ifname,
|
2018-09-04 19:39:04 +02:00
|
|
|
bool uj)
|
2016-09-15 19:16:36 +02:00
|
|
|
{
|
2021-12-03 19:23:23 +01:00
|
|
|
struct gm_sock *igmp;
|
2016-09-15 19:16:36 +02:00
|
|
|
struct interface *ifp;
|
|
|
|
struct listnode *sock_node;
|
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
char uptime[10];
|
|
|
|
char query_hhmmss[10];
|
|
|
|
char other_hhmmss[10];
|
|
|
|
int found_ifname = 0;
|
|
|
|
int sqi;
|
|
|
|
long gmi_msec; /* Group Membership Interval */
|
2017-07-17 14:03:14 +02:00
|
|
|
long lmqt_msec;
|
2016-09-15 19:16:36 +02:00
|
|
|
long ohpi_msec;
|
|
|
|
long oqpi_msec; /* Other Querier Present Interval */
|
|
|
|
long qri_msec;
|
2015-02-04 07:01:14 +01:00
|
|
|
time_t now;
|
2019-05-20 19:40:12 +02:00
|
|
|
int lmqc;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2016-09-15 19:16:36 +02:00
|
|
|
pim_ifp = ifp->info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (strcmp(ifname, "detail") && strcmp(ifname, ifp->name))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
|
2016-09-15 19:16:36 +02:00
|
|
|
igmp)) {
|
|
|
|
found_ifname = 1;
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_time_uptime(uptime, sizeof(uptime),
|
2016-09-15 19:16:36 +02:00
|
|
|
now - igmp->sock_creation);
|
|
|
|
pim_time_timer_to_hhmmss(query_hhmmss,
|
|
|
|
sizeof(query_hhmmss),
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
igmp->t_igmp_query_timer);
|
|
|
|
pim_time_timer_to_hhmmss(other_hhmmss,
|
|
|
|
sizeof(other_hhmmss),
|
2016-09-15 19:16:36 +02:00
|
|
|
igmp->t_other_querier_timer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
gmi_msec = PIM_IGMP_GMI_MSEC(
|
|
|
|
igmp->querier_robustness_variable,
|
|
|
|
igmp->querier_query_interval,
|
2022-01-04 15:00:50 +01:00
|
|
|
pim_ifp->gm_query_max_response_time_dsec);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
sqi = PIM_IGMP_SQI(pim_ifp->gm_default_query_interval);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
oqpi_msec = PIM_IGMP_OQPI_MSEC(
|
|
|
|
igmp->querier_robustness_variable,
|
|
|
|
igmp->querier_query_interval,
|
2022-01-04 15:00:50 +01:00
|
|
|
pim_ifp->gm_query_max_response_time_dsec);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
lmqt_msec = PIM_IGMP_LMQT_MSEC(
|
2022-01-04 15:00:50 +01:00
|
|
|
pim_ifp->gm_specific_query_max_response_time_dsec,
|
|
|
|
pim_ifp->gm_last_member_query_count);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
ohpi_msec =
|
|
|
|
PIM_IGMP_OHPI_DSEC(
|
|
|
|
igmp->querier_robustness_variable,
|
|
|
|
igmp->querier_query_interval,
|
2022-01-04 15:00:50 +01:00
|
|
|
pim_ifp->gm_query_max_response_time_dsec) *
|
|
|
|
100;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
qri_msec =
|
|
|
|
pim_ifp->gm_query_max_response_time_dsec * 100;
|
|
|
|
lmqc = pim_ifp->gm_last_member_query_count;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
if (uj) {
|
|
|
|
json_row = json_object_new_object();
|
2016-09-17 04:22:02 +02:00
|
|
|
json_object_pim_ifp_add(json_row, ifp);
|
2016-11-07 18:34:44 +01:00
|
|
|
json_object_string_add(json_row, "upTime",
|
2017-07-17 14:03:14 +02:00
|
|
|
uptime);
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_string_add(json_row, "querier",
|
|
|
|
igmp->t_igmp_query_timer
|
2020-11-20 22:06:34 +01:00
|
|
|
? "local"
|
|
|
|
: "other");
|
2021-11-18 09:58:09 +01:00
|
|
|
json_object_string_addf(json_row, "querierIp",
|
|
|
|
"%pI4",
|
|
|
|
&igmp->querier_addr);
|
2016-09-17 04:22:02 +02:00
|
|
|
json_object_int_add(json_row, "queryStartCount",
|
|
|
|
igmp->startup_query_count);
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_string_add(json_row,
|
|
|
|
"queryQueryTimer",
|
|
|
|
query_hhmmss);
|
|
|
|
json_object_string_add(json_row,
|
|
|
|
"queryOtherTimer",
|
|
|
|
other_hhmmss);
|
2016-10-20 15:34:29 +02:00
|
|
|
json_object_int_add(json_row, "version",
|
2021-12-14 17:33:24 +01:00
|
|
|
pim_ifp->igmp_version);
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_int_add(
|
|
|
|
json_row,
|
|
|
|
"timerGroupMembershipIntervalMsec",
|
|
|
|
gmi_msec);
|
2019-05-20 19:40:12 +02:00
|
|
|
json_object_int_add(json_row,
|
|
|
|
"lastMemberQueryCount",
|
|
|
|
lmqc);
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_int_add(json_row,
|
|
|
|
"timerLastMemberQueryMsec",
|
|
|
|
lmqt_msec);
|
|
|
|
json_object_int_add(
|
|
|
|
json_row,
|
|
|
|
"timerOlderHostPresentIntervalMsec",
|
|
|
|
ohpi_msec);
|
|
|
|
json_object_int_add(
|
|
|
|
json_row,
|
|
|
|
"timerOtherQuerierPresentIntervalMsec",
|
|
|
|
oqpi_msec);
|
|
|
|
json_object_int_add(
|
|
|
|
json_row, "timerQueryInterval",
|
|
|
|
igmp->querier_query_interval);
|
|
|
|
json_object_int_add(
|
|
|
|
json_row,
|
|
|
|
"timerQueryResponseIntervalMsec",
|
|
|
|
qri_msec);
|
|
|
|
json_object_int_add(
|
|
|
|
json_row, "timerRobustnessVariable",
|
|
|
|
igmp->querier_robustness_variable);
|
|
|
|
json_object_int_add(json_row,
|
|
|
|
"timerStartupQueryInterval",
|
|
|
|
sqi);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-15 19:16:36 +02:00
|
|
|
json_object_object_add(json, ifp->name,
|
|
|
|
json_row);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-02-27 23:16:45 +01:00
|
|
|
if (igmp->mtrace_only) {
|
|
|
|
json_object_boolean_true_add(
|
|
|
|
json_row, "mtraceOnly");
|
|
|
|
}
|
2016-09-15 19:16:36 +02:00
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Interface : %s\n", ifp->name);
|
|
|
|
vty_out(vty, "State : %s\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
if_is_up(ifp) ? (igmp->mtrace_only ?
|
|
|
|
"mtrace"
|
|
|
|
: "up")
|
|
|
|
: "down");
|
2020-10-22 16:01:20 +02:00
|
|
|
vty_out(vty, "Address : %pI4\n",
|
|
|
|
&pim_ifp->primary_address);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Uptime : %s\n", uptime);
|
|
|
|
vty_out(vty, "Version : %d\n",
|
2021-12-14 17:33:24 +01:00
|
|
|
pim_ifp->igmp_version);
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
|
|
|
vty_out(vty, "\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Querier\n");
|
|
|
|
vty_out(vty, "-------\n");
|
|
|
|
vty_out(vty, "Querier : %s\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
igmp->t_igmp_query_timer ? "local"
|
2020-11-20 22:06:34 +01:00
|
|
|
: "other");
|
2021-07-05 18:24:19 +02:00
|
|
|
vty_out(vty, "QuerierIp : %pI4",
|
|
|
|
&igmp->querier_addr);
|
|
|
|
if (pim_ifp->primary_address.s_addr
|
|
|
|
== igmp->querier_addr.s_addr)
|
|
|
|
vty_out(vty, " (this router)\n");
|
|
|
|
else
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Start Count : %d\n",
|
|
|
|
igmp->startup_query_count);
|
|
|
|
vty_out(vty, "Query Timer : %s\n",
|
|
|
|
query_hhmmss);
|
|
|
|
vty_out(vty, "Other Timer : %s\n",
|
|
|
|
other_hhmmss);
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
|
|
|
vty_out(vty, "\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Timers\n");
|
|
|
|
vty_out(vty, "------\n");
|
|
|
|
vty_out(vty,
|
|
|
|
"Group Membership Interval : %lis\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
gmi_msec / 1000);
|
2019-05-20 19:40:12 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Last Member Query Count : %d\n",
|
|
|
|
lmqc);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Last Member Query Time : %lis\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
lmqt_msec / 1000);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Older Host Present Interval : %lis\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
ohpi_msec / 1000);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Other Querier Present Interval : %lis\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
oqpi_msec / 1000);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Query Interval : %ds\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
igmp->querier_query_interval);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Query Response Interval : %lis\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
qri_msec / 1000);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Robustness Variable : %d\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
igmp->querier_robustness_variable);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Startup Query Interval : %ds\n",
|
|
|
|
sqi);
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
|
|
|
vty_out(vty, "\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-12 12:52:34 +01:00
|
|
|
pim_print_ifp_flags(vty, ifp);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
else if (!found_ifname)
|
|
|
|
vty_out(vty, "%% No such interface\n");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2021-02-09 08:29:50 +01:00
|
|
|
static void igmp_show_interface_join(struct pim_instance *pim, struct vty *vty,
|
2024-09-17 23:21:05 +02:00
|
|
|
bool uj, enum gm_join_type join_type)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
2021-02-09 08:29:50 +01:00
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_iface = NULL;
|
|
|
|
json_object *json_grp = NULL;
|
|
|
|
json_object *json_grp_arr = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-02-09 08:29:50 +01:00
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
json_object_string_add(json, "vrf",
|
2021-05-12 20:31:45 +02:00
|
|
|
vrf_id_to_name(pim->vrf->vrf_id));
|
2021-02-09 08:29:50 +01:00
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Address Source Group Socket Uptime \n");
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2015-02-04 07:01:14 +01:00
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct listnode *join_node;
|
2021-12-03 18:25:20 +01:00
|
|
|
struct gm_join *ij;
|
2015-02-04 07:01:14 +01:00
|
|
|
struct in_addr pri_addr;
|
2016-10-20 16:09:30 +02:00
|
|
|
char pri_addr_str[INET_ADDRSTRLEN];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_ifp = ifp->info;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
if (!pim_ifp->gm_join_list)
|
2015-02-04 07:01:14 +01:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pri_addr = pim_find_primary_addr(ifp);
|
|
|
|
pim_inet4_dump("<pri?>", pri_addr, pri_addr_str,
|
|
|
|
sizeof(pri_addr_str));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_join_list, join_node,
|
2021-12-14 17:33:24 +01:00
|
|
|
ij)) {
|
2016-10-20 16:09:30 +02:00
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
char source_str[INET_ADDRSTRLEN];
|
2015-02-04 07:01:14 +01:00
|
|
|
char uptime[10];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-09-17 23:21:05 +02:00
|
|
|
if (ij->join_type != join_type &&
|
|
|
|
ij->join_type != GM_JOIN_BOTH)
|
|
|
|
continue;
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_time_uptime(uptime, sizeof(uptime),
|
|
|
|
now - ij->sock_creation);
|
|
|
|
pim_inet4_dump("<grp?>", ij->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
|
|
|
pim_inet4_dump("<src?>", ij->source_addr, source_str,
|
|
|
|
sizeof(source_str));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-02-09 08:29:50 +01:00
|
|
|
if (uj) {
|
|
|
|
json_object_object_get_ex(json, ifp->name,
|
|
|
|
&json_iface);
|
|
|
|
|
|
|
|
if (!json_iface) {
|
|
|
|
json_iface = json_object_new_object();
|
|
|
|
json_object_string_add(
|
|
|
|
json_iface, "name", ifp->name);
|
|
|
|
json_object_object_add(json, ifp->name,
|
|
|
|
json_iface);
|
|
|
|
json_grp_arr = json_object_new_array();
|
|
|
|
json_object_object_add(json_iface,
|
|
|
|
"groups",
|
|
|
|
json_grp_arr);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_grp = json_object_new_object();
|
|
|
|
json_object_string_add(json_grp, "source",
|
|
|
|
source_str);
|
|
|
|
json_object_string_add(json_grp, "group",
|
|
|
|
group_str);
|
|
|
|
json_object_string_add(json_grp, "primaryAddr",
|
|
|
|
pri_addr_str);
|
|
|
|
json_object_int_add(json_grp, "sockFd",
|
|
|
|
ij->sock_fd);
|
|
|
|
json_object_string_add(json_grp, "upTime",
|
|
|
|
uptime);
|
|
|
|
json_object_array_add(json_grp_arr, json_grp);
|
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
"%-16s %-15s %-15s %-15s %6d %8s\n",
|
|
|
|
ifp->name, pri_addr_str, source_str,
|
|
|
|
group_str, ij->sock_fd, uptime);
|
|
|
|
}
|
2022-01-04 15:00:50 +01:00
|
|
|
} /* for (pim_ifp->gm_join_list) */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
} /* for (iflist) */
|
2021-02-09 08:29:50 +01:00
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2024-06-26 19:41:45 +02:00
|
|
|
static void igmp_show_interface_static_group(struct pim_instance *pim,
|
|
|
|
struct vty *vty, bool uj)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_iface = NULL;
|
|
|
|
json_object *json_grp = NULL;
|
|
|
|
json_object *json_grp_arr = NULL;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
json_object_string_add(json, "vrf",
|
|
|
|
vrf_id_to_name(pim->vrf->vrf_id));
|
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Address Source Group\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct listnode *node;
|
|
|
|
struct static_group *stgrp;
|
|
|
|
struct in_addr pri_addr;
|
|
|
|
char pri_addr_str[INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
pim_ifp = ifp->info;
|
|
|
|
|
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!pim_ifp->static_group_list)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pri_addr = pim_find_primary_addr(ifp);
|
|
|
|
pim_inet4_dump("<pri?>", pri_addr, pri_addr_str,
|
|
|
|
sizeof(pri_addr_str));
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->static_group_list, node,
|
|
|
|
stgrp)) {
|
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
char source_str[INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
pim_inet4_dump("<grp?>", stgrp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
|
|
|
pim_inet4_dump("<src?>", stgrp->source_addr, source_str,
|
|
|
|
sizeof(source_str));
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object_object_get_ex(json, ifp->name,
|
|
|
|
&json_iface);
|
|
|
|
|
|
|
|
if (!json_iface) {
|
|
|
|
json_iface = json_object_new_object();
|
|
|
|
json_object_string_add(json_iface,
|
|
|
|
"name",
|
|
|
|
ifp->name);
|
|
|
|
json_object_object_add(json, ifp->name,
|
|
|
|
json_iface);
|
|
|
|
json_grp_arr = json_object_new_array();
|
|
|
|
json_object_object_add(json_iface,
|
|
|
|
"groups",
|
|
|
|
json_grp_arr);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_grp = json_object_new_object();
|
|
|
|
json_object_string_add(json_grp, "source",
|
|
|
|
source_str);
|
|
|
|
json_object_string_add(json_grp, "group",
|
|
|
|
group_str);
|
|
|
|
json_object_string_add(json_grp, "primaryAddr",
|
|
|
|
pri_addr_str);
|
|
|
|
json_object_array_add(json_grp_arr, json_grp);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%-16s %-15s %-15s %-15s\n",
|
|
|
|
ifp->name, pri_addr_str, source_str,
|
|
|
|
group_str);
|
|
|
|
}
|
|
|
|
} /* for (pim_ifp->static_group_list) */
|
|
|
|
|
|
|
|
} /* for (iflist) */
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
}
|
|
|
|
|
2018-05-04 13:25:38 +02:00
|
|
|
static void igmp_show_statistics(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
const char *ifname, bool uj)
|
2018-05-04 13:25:38 +02:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
struct igmp_stats igmp_stats;
|
2022-03-14 22:06:16 +01:00
|
|
|
bool found_ifname = false;
|
|
|
|
json_object *json = NULL;
|
2018-05-04 13:25:38 +02:00
|
|
|
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats_init(&igmp_stats);
|
2018-05-04 13:25:38 +02:00
|
|
|
|
2022-03-14 22:06:16 +01:00
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
|
2018-05-04 13:25:38 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
|
|
|
struct pim_interface *pim_ifp;
|
2022-03-08 17:34:34 +01:00
|
|
|
struct listnode *sock_node, *source_node, *group_node;
|
2021-12-03 19:23:23 +01:00
|
|
|
struct gm_sock *igmp;
|
2022-03-08 17:34:34 +01:00
|
|
|
struct gm_group *group;
|
|
|
|
struct gm_source *src;
|
2018-05-04 13:25:38 +02:00
|
|
|
|
|
|
|
pim_ifp = ifp->info;
|
|
|
|
|
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (ifname && strcmp(ifname, ifp->name))
|
|
|
|
continue;
|
|
|
|
|
2022-03-14 22:06:16 +01:00
|
|
|
found_ifname = true;
|
|
|
|
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.joins_failed += pim_ifp->igmp_ifstat_joins_failed;
|
|
|
|
igmp_stats.joins_sent += pim_ifp->igmp_ifstat_joins_sent;
|
|
|
|
igmp_stats.total_groups +=
|
2022-03-08 17:34:34 +01:00
|
|
|
pim_ifp->gm_group_list
|
|
|
|
? listcount(pim_ifp->gm_group_list)
|
|
|
|
: 0;
|
pimd: Add additional IGMP stats (peak number of groups)
```
exit1-debian-11# sh ip igmp statistics interface eth2
IGMP statistics
Interface : eth2
V1 query : 0
V2 query : 0
V3 query : 25
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 34
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 13
general queries sent : 2
group queries sent : 24
peak groups : 9
total groups : 4
total source groups : 1
exit1-debian-11# sh ip igmp statistics interface eth2 json
{
"eth2":{
"name":"eth2",
"queryV1":0,
"queryV2":0,
"queryV3":25,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":34,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"peakGroups":9,
"totalGroups":4,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":13,
"generalQueriesSent":2,
"groupQueriesSent":24
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 16:33:11 +01:00
|
|
|
igmp_stats.peak_groups += pim_ifp->igmp_peak_group_count;
|
|
|
|
|
2022-03-08 17:34:34 +01:00
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, group_node,
|
|
|
|
group)) {
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(group->group_source_list,
|
|
|
|
source_node, src)) {
|
|
|
|
if (pim_addr_is_any(src->source_addr))
|
|
|
|
continue;
|
|
|
|
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.total_source_groups++;
|
2022-03-08 17:34:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_socket_list, sock_node,
|
2018-05-04 13:25:38 +02:00
|
|
|
igmp)) {
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats_add(&igmp_stats, &igmp->igmp_stats);
|
2018-05-04 13:25:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-14 22:06:16 +01:00
|
|
|
if (!found_ifname) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
else
|
|
|
|
vty_out(vty, "%% No such interface\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object *json_row = json_object_new_object();
|
2018-05-04 13:25:38 +02:00
|
|
|
|
2022-03-14 22:06:16 +01:00
|
|
|
json_object_string_add(json_row, "name",
|
|
|
|
ifname ? ifname : "global");
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
json_object_int_add(json_row, "queryV1", igmp_stats.query_v1);
|
|
|
|
json_object_int_add(json_row, "queryV2", igmp_stats.query_v2);
|
|
|
|
json_object_int_add(json_row, "queryV3", igmp_stats.query_v3);
|
|
|
|
json_object_int_add(json_row, "leaveV2", igmp_stats.leave_v2);
|
|
|
|
json_object_int_add(json_row, "reportV1", igmp_stats.report_v1);
|
|
|
|
json_object_int_add(json_row, "reportV2", igmp_stats.report_v2);
|
|
|
|
json_object_int_add(json_row, "reportV3", igmp_stats.report_v3);
|
2018-05-04 13:25:38 +02:00
|
|
|
json_object_int_add(json_row, "mtraceResponse",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.mtrace_rsp);
|
2018-05-04 13:25:38 +02:00
|
|
|
json_object_int_add(json_row, "mtraceRequest",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.mtrace_req);
|
2018-05-04 13:25:38 +02:00
|
|
|
json_object_int_add(json_row, "unsupported",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.unsupported);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
json_object_int_add(json_row, "totalReceivedMessages",
|
|
|
|
igmp_stats.total_recv_messages);
|
pimd: Add additional IGMP stats (peak number of groups)
```
exit1-debian-11# sh ip igmp statistics interface eth2
IGMP statistics
Interface : eth2
V1 query : 0
V2 query : 0
V3 query : 25
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 34
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 13
general queries sent : 2
group queries sent : 24
peak groups : 9
total groups : 4
total source groups : 1
exit1-debian-11# sh ip igmp statistics interface eth2 json
{
"eth2":{
"name":"eth2",
"queryV1":0,
"queryV2":0,
"queryV3":25,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":34,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"peakGroups":9,
"totalGroups":4,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":13,
"generalQueriesSent":2,
"groupQueriesSent":24
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 16:33:11 +01:00
|
|
|
json_object_int_add(json_row, "peakGroups",
|
|
|
|
igmp_stats.peak_groups);
|
2022-03-08 17:34:34 +01:00
|
|
|
json_object_int_add(json_row, "totalGroups",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.total_groups);
|
2022-03-08 17:34:34 +01:00
|
|
|
json_object_int_add(json_row, "totalSourceGroups",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.total_source_groups);
|
|
|
|
json_object_int_add(json_row, "joinsFailed",
|
|
|
|
igmp_stats.joins_failed);
|
|
|
|
json_object_int_add(json_row, "joinsSent",
|
|
|
|
igmp_stats.joins_sent);
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
json_object_int_add(json_row, "generalQueriesSent",
|
|
|
|
igmp_stats.general_queries_sent);
|
|
|
|
json_object_int_add(json_row, "groupQueriesSent",
|
|
|
|
igmp_stats.group_queries_sent);
|
2018-05-04 13:25:38 +02:00
|
|
|
json_object_object_add(json, ifname ? ifname : "global",
|
|
|
|
json_row);
|
2022-01-31 20:20:41 +01:00
|
|
|
vty_json(vty, json);
|
2018-05-04 13:25:38 +02:00
|
|
|
} else {
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
vty_out(vty, "IGMP statistics\n");
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "Interface : %s\n",
|
2018-05-04 13:25:38 +02:00
|
|
|
ifname ? ifname : "global");
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V1 query : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.query_v1);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V2 query : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.query_v2);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V3 query : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.query_v3);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V2 leave : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.leave_v2);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V1 report : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.report_v1);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V2 report : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.report_v2);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "V3 report : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.report_v3);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "mtrace response : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.mtrace_rsp);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "mtrace request : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.mtrace_req);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "unsupported : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.unsupported);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "total received messages : %u\n",
|
|
|
|
igmp_stats.total_recv_messages);
|
|
|
|
vty_out(vty, "joins failed : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.joins_failed);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "joins sent : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.joins_sent);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "general queries sent : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.general_queries_sent);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "group queries sent : %u\n",
|
pimd: Add additional IGMP stats (generic/group specific queries sent)
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 6
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 14
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 4
total groups : 5
total source groups : 1
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":6,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":5,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 08:26:22 +01:00
|
|
|
igmp_stats.group_queries_sent);
|
pimd: Add additional IGMP stats (peak number of groups)
```
exit1-debian-11# sh ip igmp statistics interface eth2
IGMP statistics
Interface : eth2
V1 query : 0
V2 query : 0
V3 query : 25
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 34
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 13
general queries sent : 2
group queries sent : 24
peak groups : 9
total groups : 4
total source groups : 1
exit1-debian-11# sh ip igmp statistics interface eth2 json
{
"eth2":{
"name":"eth2",
"queryV1":0,
"queryV2":0,
"queryV3":25,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":34,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"peakGroups":9,
"totalGroups":4,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":13,
"generalQueriesSent":2,
"groupQueriesSent":24
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 16:33:11 +01:00
|
|
|
vty_out(vty, "peak groups : %u\n",
|
|
|
|
igmp_stats.peak_groups);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "total groups : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.total_groups);
|
pimd: Show total received messages IGMP stats
```
exit1-debian-11# do sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 3
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 18
mtrace response : 0
mtrace request : 0
unsupported : 0
total received messages : 21
joins failed : 0
joins sent : 16
general queries sent : 6
group queries sent : 3
total groups : 4
total source groups : 1
exit1-debian-11# do sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":4,
"leaveV2":0,
"reportV1":0,
"reportV2":0,
"reportV3":18,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalReceivedMessage":22,
"totalGroups":3,
"totalSourceGroups":1,
"joinsFailed":0,
"joinsSent":16,
"generalQueriesSent":6,
"groupQueriesSent":4
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-14 21:45:28 +01:00
|
|
|
vty_out(vty, "total source groups : %u\n",
|
pimd: Add IGMP join sent/failed statistics
```
exit1-debian-11# sh ip igmp statistics
IGMP statistics
Interface : global
V1 query : 0
V2 query : 0
V3 query : 0
V2 leave : 0
V1 report : 0
V2 report : 0
V3 report : 16
mtrace response : 0
mtrace request : 0
unsupported : 0
joins failed : 0
joins sent : 11
total groups : 4
total source groups : 0
exit1-debian-11# sh ip igmp statistics json
{
"global":{
"name":"global",
"queryV1":0,
"queryV2":0,
"queryV3":0,
"leaveV3":0,
"reportV1":0,
"reportV2":0,
"reportV3":16,
"mtraceResponse":0,
"mtraceRequest":0,
"unsupported":0,
"totalGroups":4,
"totalSourceGroups":0,
"joinsFailed":0,
"joinsSent":11
}
}
```
Signed-off-by: Donatas Abraitis <donatas@opensourcerouting.org>
2022-03-10 17:10:43 +01:00
|
|
|
igmp_stats.total_source_groups);
|
2018-05-04 13:25:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
static void igmp_source_json_helper(struct gm_source *src,
|
|
|
|
json_object *json_sources, char *source_str,
|
|
|
|
char *mmss, char *uptime)
|
|
|
|
{
|
|
|
|
json_object *json_source = NULL;
|
|
|
|
|
|
|
|
json_source = json_object_new_object();
|
|
|
|
if (!json_source)
|
|
|
|
return;
|
|
|
|
|
|
|
|
json_object_string_add(json_source, "source", source_str);
|
|
|
|
json_object_string_add(json_source, "timer", mmss);
|
|
|
|
json_object_boolean_add(json_source, "forwarded",
|
|
|
|
IGMP_SOURCE_TEST_FORWARDING(src->source_flags));
|
|
|
|
json_object_string_add(json_source, "uptime", uptime);
|
|
|
|
json_object_array_add(json_sources, json_source);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp_group_print(struct interface *ifp, struct vty *vty, bool uj,
|
|
|
|
json_object *json, struct gm_group *grp,
|
|
|
|
time_t now, bool detail)
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
json_object *json_iface = NULL;
|
2020-04-01 07:13:13 +02:00
|
|
|
json_object *json_group = NULL;
|
|
|
|
json_object *json_groups = NULL;
|
2023-04-10 23:34:35 +02:00
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
char hhmmss[PIM_TIME_STRLEN];
|
|
|
|
char uptime[PIM_TIME_STRLEN];
|
|
|
|
|
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
|
|
|
pim_time_timer_to_hhmmss(hhmmss, sizeof(hhmmss), grp->t_group_timer);
|
|
|
|
pim_time_uptime(uptime, sizeof(uptime), now - grp->group_creation);
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object_object_get_ex(json, ifp->name, &json_iface);
|
|
|
|
if (!json_iface) {
|
|
|
|
json_iface = json_object_new_object();
|
|
|
|
if (!json_iface)
|
|
|
|
return;
|
|
|
|
json_object_pim_ifp_add(json_iface, ifp);
|
|
|
|
json_object_object_add(json, ifp->name, json_iface);
|
|
|
|
json_groups = json_object_new_array();
|
|
|
|
if (!json_groups)
|
|
|
|
return;
|
|
|
|
json_object_object_add(json_iface, "groups",
|
|
|
|
json_groups);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_object_object_get_ex(json_iface, "groups", &json_groups);
|
|
|
|
if (json_groups) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
if (!json_group)
|
|
|
|
return;
|
|
|
|
|
|
|
|
json_object_string_add(json_group, "group", group_str);
|
|
|
|
if (grp->igmp_version == IGMP_DEFAULT_VERSION)
|
|
|
|
json_object_string_add(
|
|
|
|
json_group, "mode",
|
|
|
|
grp->group_filtermode_isexcl
|
|
|
|
? "EXCLUDE"
|
|
|
|
: "INCLUDE");
|
|
|
|
|
|
|
|
json_object_string_add(json_group, "timer", hhmmss);
|
|
|
|
json_object_int_add(
|
|
|
|
json_group, "sourcesCount",
|
|
|
|
grp->group_source_list
|
|
|
|
? listcount(grp->group_source_list)
|
|
|
|
: 0);
|
|
|
|
json_object_int_add(json_group, "version",
|
|
|
|
grp->igmp_version);
|
|
|
|
json_object_string_add(json_group, "uptime", uptime);
|
|
|
|
json_object_array_add(json_groups, json_group);
|
|
|
|
|
|
|
|
if (detail) {
|
|
|
|
struct listnode *srcnode;
|
|
|
|
struct gm_source *src;
|
|
|
|
json_object *json_sources = NULL;
|
|
|
|
|
|
|
|
json_sources = json_object_new_array();
|
|
|
|
if (!json_sources)
|
|
|
|
return;
|
|
|
|
|
|
|
|
json_object_object_add(json_group, "sources",
|
|
|
|
json_sources);
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(
|
|
|
|
grp->group_source_list, srcnode,
|
|
|
|
src)) {
|
|
|
|
char source_str[INET_ADDRSTRLEN];
|
|
|
|
char mmss[PIM_TIME_STRLEN];
|
|
|
|
char src_uptime[PIM_TIME_STRLEN];
|
|
|
|
|
|
|
|
pim_inet4_dump(
|
|
|
|
"<source?>", src->source_addr,
|
|
|
|
source_str, sizeof(source_str));
|
|
|
|
pim_time_timer_to_mmss(
|
|
|
|
mmss, sizeof(mmss),
|
|
|
|
src->t_source_timer);
|
|
|
|
pim_time_uptime(
|
|
|
|
src_uptime, sizeof(src_uptime),
|
|
|
|
now - src->source_creation);
|
|
|
|
|
|
|
|
igmp_source_json_helper(
|
|
|
|
src, json_sources, source_str,
|
|
|
|
mmss, src_uptime);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (detail) {
|
|
|
|
struct listnode *srcnode;
|
|
|
|
struct gm_source *src;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
|
|
|
|
srcnode, src)) {
|
|
|
|
char source_str[INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
pim_inet4_dump("<source?>", src->source_addr,
|
|
|
|
source_str, sizeof(source_str));
|
|
|
|
|
|
|
|
vty_out(vty,
|
|
|
|
"%-16s %-15s %4s %8s %-15s %d %8s\n",
|
|
|
|
ifp->name, group_str,
|
|
|
|
grp->igmp_version == 3
|
|
|
|
? (grp->group_filtermode_isexcl
|
|
|
|
? "EXCL"
|
|
|
|
: "INCL")
|
|
|
|
: "----",
|
|
|
|
hhmmss, source_str, grp->igmp_version,
|
|
|
|
uptime);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
vty_out(vty, "%-16s %-15s %4s %8s %4d %d %8s\n", ifp->name,
|
|
|
|
group_str,
|
|
|
|
grp->igmp_version == 3
|
|
|
|
? (grp->group_filtermode_isexcl ? "EXCL"
|
|
|
|
: "INCL")
|
|
|
|
: "----",
|
|
|
|
hhmmss,
|
|
|
|
grp->group_source_list
|
|
|
|
? listcount(grp->group_source_list)
|
|
|
|
: 0,
|
|
|
|
grp->igmp_version, uptime);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp_show_groups_interface_single(struct pim_instance *pim,
|
|
|
|
struct vty *vty, bool uj,
|
|
|
|
const char *ifname,
|
|
|
|
const char *grp_str, bool detail)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
|
|
|
json_object *json = NULL;
|
|
|
|
struct pim_interface *pim_ifp = NULL;
|
|
|
|
struct gm_group *grp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
if (uj) {
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
json = json_object_new_object();
|
2023-04-10 23:34:35 +02:00
|
|
|
if (!json)
|
|
|
|
return;
|
2022-08-02 12:55:09 +02:00
|
|
|
json_object_int_add(json, "totalGroups", pim->gm_group_count);
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
json_object_int_add(json, "watermarkLimit",
|
2022-06-30 10:17:26 +02:00
|
|
|
pim->gm_watermark_limit);
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
} else {
|
2022-08-02 12:55:09 +02:00
|
|
|
vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
vty_out(vty, "Watermark warn limit(%s): %u\n",
|
2022-06-30 10:17:26 +02:00
|
|
|
pim->gm_watermark_limit ? "Set" : "Not Set",
|
|
|
|
pim->gm_watermark_limit);
|
2023-04-10 23:34:35 +02:00
|
|
|
|
|
|
|
if (!detail)
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Group Mode Timer Srcs V Uptime\n");
|
|
|
|
else
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Group Mode Timer Source V Uptime\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp = if_lookup_by_name(ifname, pim->vrf->vrf_id);
|
|
|
|
if (!ifp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pim_ifp = ifp->info;
|
|
|
|
if (!pim_ifp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (grp_str) {
|
|
|
|
struct in_addr group_addr;
|
|
|
|
struct gm_sock *igmp;
|
|
|
|
|
|
|
|
if (inet_pton(AF_INET, grp_str, &group_addr) == 1) {
|
|
|
|
igmp = pim_igmp_sock_lookup_ifaddr(
|
|
|
|
pim_ifp->gm_socket_list,
|
|
|
|
pim_ifp->primary_address);
|
|
|
|
if (igmp) {
|
|
|
|
grp = find_group_by_addr(igmp, group_addr);
|
|
|
|
if (grp)
|
|
|
|
igmp_group_print(ifp, vty, uj, json,
|
|
|
|
grp, now, detail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct listnode *grpnode;
|
|
|
|
|
|
|
|
/* scan igmp groups */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode, grp))
|
|
|
|
igmp_group_print(ifp, vty, uj, json, grp, now, detail);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
if (detail)
|
|
|
|
vty_json_no_pretty(vty, json);
|
|
|
|
else
|
|
|
|
vty_json(vty, json);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj,
|
|
|
|
const char *grp_str, bool detail)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
|
|
|
json_object *json = NULL;
|
|
|
|
|
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
if (!json)
|
|
|
|
return;
|
|
|
|
json_object_int_add(json, "totalGroups", pim->gm_group_count);
|
|
|
|
json_object_int_add(json, "watermarkLimit",
|
|
|
|
pim->gm_watermark_limit);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Total IGMP groups: %u\n", pim->gm_group_count);
|
|
|
|
vty_out(vty, "Watermark warn limit(%s): %u\n",
|
|
|
|
pim->gm_watermark_limit ? "Set" : "Not Set",
|
|
|
|
pim->gm_watermark_limit);
|
|
|
|
if (!detail)
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Group Mode Timer Srcs V Uptime\n");
|
|
|
|
else
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Group Mode Timer Source V Uptime\n");
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
/* scan interfaces */
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2021-08-24 15:25:48 +02:00
|
|
|
struct listnode *grpnode;
|
2021-12-03 18:41:52 +01:00
|
|
|
struct gm_group *grp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-04-03 22:11:58 +02:00
|
|
|
if (!pim_ifp)
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
if (grp_str) {
|
|
|
|
struct in_addr group_addr;
|
|
|
|
struct gm_sock *igmp;
|
|
|
|
|
|
|
|
if (inet_pton(AF_INET, grp_str, &group_addr) == 1) {
|
|
|
|
igmp = pim_igmp_sock_lookup_ifaddr(
|
|
|
|
pim_ifp->gm_socket_list,
|
|
|
|
pim_ifp->primary_address);
|
|
|
|
if (igmp) {
|
|
|
|
grp = find_group_by_addr(igmp,
|
|
|
|
group_addr);
|
|
|
|
if (grp)
|
|
|
|
igmp_group_print(ifp, vty, uj,
|
|
|
|
json, grp, now,
|
|
|
|
detail);
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
}
|
2021-08-24 15:25:48 +02:00
|
|
|
}
|
2023-04-10 23:34:35 +02:00
|
|
|
} else {
|
|
|
|
/* scan igmp groups */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list,
|
|
|
|
grpnode, grp))
|
|
|
|
igmp_group_print(ifp, vty, uj, json, grp, now,
|
|
|
|
detail);
|
|
|
|
}
|
|
|
|
} /* scan interfaces */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
if (uj) {
|
|
|
|
if (detail)
|
|
|
|
vty_json_no_pretty(vty, json);
|
|
|
|
else
|
|
|
|
vty_json(vty, json);
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void igmp_show_group_retransmission(struct pim_instance *pim,
|
|
|
|
struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2021-08-24 15:25:48 +02:00
|
|
|
"Interface Group RetTimer Counter RetSrcs\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
/* scan interfaces */
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2015-02-04 07:01:14 +01:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2021-08-24 15:25:48 +02:00
|
|
|
struct listnode *grpnode;
|
2021-12-03 18:41:52 +01:00
|
|
|
struct gm_group *grp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
/* scan igmp groups */
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
|
2021-12-14 17:33:24 +01:00
|
|
|
grp)) {
|
2021-08-24 15:25:48 +02:00
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
char grp_retr_mmss[10];
|
|
|
|
struct listnode *src_node;
|
2021-12-03 18:33:53 +01:00
|
|
|
struct gm_source *src;
|
2021-08-24 15:25:48 +02:00
|
|
|
int grp_retr_sources = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
2021-08-24 15:25:48 +02:00
|
|
|
pim_time_timer_to_mmss(
|
|
|
|
grp_retr_mmss, sizeof(grp_retr_mmss),
|
|
|
|
grp->t_group_query_retransmit_timer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
|
|
|
|
/* count group sources with retransmission state
|
|
|
|
*/
|
2021-08-27 10:45:16 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
|
|
|
|
src_node, src)) {
|
|
|
|
if (src->source_query_retransmit_count > 0) {
|
2021-08-24 15:25:48 +02:00
|
|
|
++grp_retr_sources;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2021-08-24 15:25:48 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
vty_out(vty, "%-16s %-15s %-8s %7d %7d\n", ifp->name,
|
|
|
|
group_str, grp_retr_mmss,
|
2021-08-24 15:25:48 +02:00
|
|
|
grp->group_specific_query_retransmit_count,
|
|
|
|
grp_retr_sources);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
} /* scan igmp groups */
|
|
|
|
} /* scan interfaces */
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
static void igmp_sources_print(struct interface *ifp, char *group_str,
|
|
|
|
struct gm_source *src, time_t now,
|
|
|
|
json_object *json, struct vty *vty, bool uj)
|
|
|
|
{
|
|
|
|
json_object *json_iface = NULL;
|
|
|
|
json_object *json_group = NULL;
|
|
|
|
json_object *json_sources = NULL;
|
|
|
|
char source_str[INET_ADDRSTRLEN];
|
|
|
|
char mmss[PIM_TIME_STRLEN];
|
|
|
|
char uptime[PIM_TIME_STRLEN];
|
|
|
|
|
|
|
|
pim_inet4_dump("<source?>", src->source_addr, source_str,
|
|
|
|
sizeof(source_str));
|
|
|
|
pim_time_timer_to_mmss(mmss, sizeof(mmss), src->t_source_timer);
|
|
|
|
pim_time_uptime(uptime, sizeof(uptime), now - src->source_creation);
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object_object_get_ex(json, ifp->name, &json_iface);
|
|
|
|
if (!json_iface) {
|
|
|
|
json_iface = json_object_new_object();
|
|
|
|
if (!json_iface)
|
|
|
|
return;
|
|
|
|
json_object_string_add(json_iface, "name", ifp->name);
|
|
|
|
json_object_object_add(json, ifp->name, json_iface);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_object_object_get_ex(json_iface, group_str, &json_group);
|
|
|
|
if (!json_group) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
if (!json_group)
|
|
|
|
return;
|
|
|
|
json_object_string_add(json_group, "group", group_str);
|
|
|
|
json_object_object_add(json_iface, group_str,
|
|
|
|
json_group);
|
|
|
|
json_sources = json_object_new_array();
|
|
|
|
if (!json_sources)
|
|
|
|
return;
|
|
|
|
json_object_object_add(json_group, "sources",
|
|
|
|
json_sources);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_object_object_get_ex(json_group, "sources", &json_sources);
|
|
|
|
if (json_sources)
|
|
|
|
igmp_source_json_helper(src, json_sources, source_str,
|
|
|
|
mmss, uptime);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%-16s %-15s %-15s %5s %3s %8s\n", ifp->name,
|
|
|
|
group_str, source_str, mmss,
|
|
|
|
IGMP_SOURCE_TEST_FORWARDING(src->source_flags) ? "Y"
|
|
|
|
: "N",
|
|
|
|
uptime);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp_show_sources_interface_single(struct pim_instance *pim,
|
|
|
|
struct vty *vty, bool uj,
|
|
|
|
const char *ifname,
|
|
|
|
const char *grp_str)
|
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
|
|
|
json_object *json = NULL;
|
|
|
|
struct pim_interface *pim_ifp;
|
|
|
|
struct gm_group *grp;
|
|
|
|
|
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
if (!json)
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
"Interface Group Source Timer Fwd Uptime \n");
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp = if_lookup_by_name(ifname, pim->vrf->vrf_id);
|
|
|
|
if (!ifp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pim_ifp = ifp->info;
|
|
|
|
if (!pim_ifp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (grp_str) {
|
|
|
|
struct in_addr group_addr;
|
|
|
|
struct gm_sock *igmp;
|
|
|
|
struct listnode *srcnode;
|
|
|
|
struct gm_source *src;
|
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
int res;
|
|
|
|
|
|
|
|
res = inet_pton(AF_INET, grp_str, &group_addr);
|
|
|
|
if (res <= 0) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list,
|
|
|
|
pim_ifp->primary_address);
|
|
|
|
if (!igmp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
grp = find_group_by_addr(igmp, group_addr);
|
|
|
|
if (!grp) {
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
|
|
|
|
|
|
|
/* scan group sources */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list, srcnode, src))
|
|
|
|
igmp_sources_print(ifp, group_str, src, now, json, vty,
|
|
|
|
uj);
|
|
|
|
} else {
|
|
|
|
struct listnode *grpnode;
|
|
|
|
|
|
|
|
/* scan igmp groups */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
|
|
|
|
grp)) {
|
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
struct listnode *srcnode;
|
|
|
|
struct gm_source *src;
|
|
|
|
|
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
|
|
|
|
|
|
|
/* scan group sources */
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
|
|
|
|
srcnode, src))
|
|
|
|
igmp_sources_print(ifp, group_str, src, now,
|
|
|
|
json, vty, uj);
|
|
|
|
|
|
|
|
} /* scan igmp groups */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
}
|
|
|
|
|
2021-11-05 12:19:35 +01:00
|
|
|
static void igmp_show_sources(struct pim_instance *pim, struct vty *vty,
|
|
|
|
bool uj)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
time_t now;
|
2021-11-05 12:19:35 +01:00
|
|
|
json_object *json = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
if (uj) {
|
2021-11-05 12:19:35 +01:00
|
|
|
json = json_object_new_object();
|
2023-04-10 23:34:35 +02:00
|
|
|
if (!json)
|
|
|
|
return;
|
|
|
|
} else {
|
2021-11-05 12:19:35 +01:00
|
|
|
vty_out(vty,
|
2023-04-10 23:34:35 +02:00
|
|
|
"Interface Group Source Timer Fwd Uptime\n");
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
/* scan interfaces */
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2015-02-04 07:01:14 +01:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2021-08-24 15:25:48 +02:00
|
|
|
struct listnode *grpnode;
|
2021-12-03 18:41:52 +01:00
|
|
|
struct gm_group *grp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
/* scan igmp groups */
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
|
2021-12-14 17:33:24 +01:00
|
|
|
grp)) {
|
2021-08-24 15:25:48 +02:00
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
struct listnode *srcnode;
|
2021-12-03 18:33:53 +01:00
|
|
|
struct gm_source *src;
|
2021-08-24 15:25:48 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
2021-08-24 15:25:48 +02:00
|
|
|
|
|
|
|
/* scan group sources */
|
2021-08-27 10:45:16 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
|
2023-04-10 23:34:35 +02:00
|
|
|
srcnode, src))
|
|
|
|
igmp_sources_print(ifp, group_str, src, now,
|
|
|
|
json, vty, uj);
|
2022-01-04 15:00:50 +01:00
|
|
|
} /* scan igmp groups */
|
2021-08-24 15:25:48 +02:00
|
|
|
} /* scan interfaces */
|
2023-04-10 23:34:35 +02:00
|
|
|
|
2021-11-05 12:19:35 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void igmp_show_source_retransmission(struct pim_instance *pim,
|
|
|
|
struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
2021-08-24 15:25:48 +02:00
|
|
|
"Interface Group Source Counter\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
/* scan interfaces */
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp) {
|
2015-02-04 07:01:14 +01:00
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2021-08-24 15:25:48 +02:00
|
|
|
struct listnode *grpnode;
|
2021-12-03 18:41:52 +01:00
|
|
|
struct gm_group *grp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
if (!pim_ifp)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
/* scan igmp groups */
|
2022-01-04 15:00:50 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_ifp->gm_group_list, grpnode,
|
2021-12-14 17:33:24 +01:00
|
|
|
grp)) {
|
2021-08-24 15:25:48 +02:00
|
|
|
char group_str[INET_ADDRSTRLEN];
|
|
|
|
struct listnode *srcnode;
|
2021-12-03 18:33:53 +01:00
|
|
|
struct gm_source *src;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
pim_inet4_dump("<group?>", grp->group_addr, group_str,
|
|
|
|
sizeof(group_str));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
/* scan group sources */
|
2021-08-27 10:45:16 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(grp->group_source_list,
|
|
|
|
srcnode, src)) {
|
2021-08-24 15:25:48 +02:00
|
|
|
char source_str[INET_ADDRSTRLEN];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
pim_inet4_dump("<source?>", src->source_addr,
|
|
|
|
source_str, sizeof(source_str));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-27 10:45:16 +02:00
|
|
|
vty_out(vty, "%-16s %-15s %-15s %7d\n",
|
2021-08-24 15:25:48 +02:00
|
|
|
ifp->name, group_str, source_str,
|
|
|
|
src->source_query_retransmit_count);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-08-24 15:25:48 +02:00
|
|
|
} /* scan group sources */
|
2022-01-04 15:00:50 +01:00
|
|
|
} /* scan igmp groups */
|
2021-08-24 15:25:48 +02:00
|
|
|
} /* scan interfaces */
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-05-23 14:57:11 +02:00
|
|
|
static void clear_igmp_interfaces(struct pim_instance *pim)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp)
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_if_addr_del_all_igmp(ifp);
|
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (pim->vrf, ifp)
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_if_addr_add_all(ifp);
|
|
|
|
}
|
|
|
|
|
2017-05-23 14:57:11 +02:00
|
|
|
static void clear_interfaces(struct pim_instance *pim)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
clear_igmp_interfaces(pim);
|
|
|
|
clear_pim_interfaces(pim);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2020-11-20 22:06:34 +01:00
|
|
|
#define PIM_GET_PIM_INTERFACE(pim_ifp, ifp) \
|
|
|
|
pim_ifp = ifp->info; \
|
|
|
|
if (!pim_ifp) { \
|
|
|
|
vty_out(vty, \
|
2017-09-22 23:15:33 +02:00
|
|
|
"%% Enable PIM and/or IGMP on this interface first\n"); \
|
2020-11-20 22:06:34 +01:00
|
|
|
return CMD_WARNING_CONFIG_FAILED; \
|
2017-09-22 23:15:33 +02:00
|
|
|
}
|
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/**
|
|
|
|
* Compatibility function to keep the legacy mesh group CLI behavior:
|
|
|
|
* Delete group when there are no more configurations in it.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
* Don't forget to call `nb_cli_apply_changes` after this.
|
|
|
|
*/
|
|
|
|
static void pim_cli_legacy_mesh_group_behavior(struct vty *vty,
|
|
|
|
const char *gname)
|
|
|
|
{
|
2024-07-22 18:19:50 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN + 26];
|
2021-04-20 19:54:09 +02:00
|
|
|
char xpath_member_value[XPATH_MAXLEN];
|
|
|
|
const struct lyd_node *member_dnode;
|
|
|
|
|
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"%s/msdp-mesh-groups[name='%s']", VTY_CURR_XPATH, gname);
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Group must exists, otherwise just quit. */
|
|
|
|
if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Group members check: */
|
|
|
|
strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "/members", sizeof(xpath_member_value));
|
2024-06-12 18:26:48 +02:00
|
|
|
if (yang_dnode_exists(vty->candidate_config->dnode, xpath_member_value)) {
|
2021-04-20 19:54:09 +02:00
|
|
|
member_dnode = yang_dnode_get(vty->candidate_config->dnode,
|
|
|
|
xpath_member_value);
|
2021-09-28 04:38:41 +02:00
|
|
|
if (!member_dnode || !yang_is_last_list_dnode(member_dnode))
|
2021-04-20 19:54:09 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Source address check: */
|
|
|
|
strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "/source", sizeof(xpath_member_value));
|
|
|
|
if (yang_dnode_exists(vty->candidate_config->dnode, xpath_member_value))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* No configurations found: delete it. */
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (clear_ip_interfaces,
|
|
|
|
clear_ip_interfaces_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"clear ip interfaces [vrf NAME]",
|
2015-02-04 07:01:14 +01:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
"Reset interfaces\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
clear_interfaces(vrf->info);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (clear_ip_igmp_interfaces,
|
|
|
|
clear_ip_igmp_interfaces_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"clear ip igmp [vrf NAME] interfaces",
|
2015-02-04 07:01:14 +01:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_IGMP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"Reset IGMP interfaces\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
clear_igmp_interfaces(vrf->info);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-04-13 13:16:59 +02:00
|
|
|
DEFPY (clear_ip_pim_statistics,
|
2019-05-05 07:19:25 +02:00
|
|
|
clear_ip_pim_statistics_cmd,
|
2022-04-13 13:16:59 +02:00
|
|
|
"clear ip pim statistics [vrf NAME]$name",
|
2019-05-05 07:19:25 +02:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Reset PIM statistics\n")
|
|
|
|
{
|
2022-04-13 13:16:59 +02:00
|
|
|
struct vrf *v = pim_cmd_lookup(vty, name);
|
2019-05-05 07:19:25 +02:00
|
|
|
|
2022-04-13 13:16:59 +02:00
|
|
|
if (!v)
|
2019-05-05 07:19:25 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-04-13 13:16:59 +02:00
|
|
|
clear_pim_statistics(v->info);
|
|
|
|
|
2019-05-05 07:19:25 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-04-13 15:14:02 +02:00
|
|
|
DEFPY (clear_ip_mroute,
|
2015-02-04 07:01:14 +01:00
|
|
|
clear_ip_mroute_cmd,
|
2022-04-13 15:14:02 +02:00
|
|
|
"clear ip mroute [vrf NAME]$name",
|
2015-02-04 07:01:14 +01:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
2022-07-07 15:06:06 +02:00
|
|
|
MROUTE_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-04-13 15:14:02 +02:00
|
|
|
struct vrf *v = pim_cmd_lookup(vty, name);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
2022-04-13 15:14:02 +02:00
|
|
|
if (!v)
|
2017-05-23 14:57:11 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-04-13 15:14:02 +02:00
|
|
|
clear_mroute(v->info);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-07-19 08:44:47 +02:00
|
|
|
DEFPY (clear_ip_pim_interfaces,
|
2015-02-04 07:01:14 +01:00
|
|
|
clear_ip_pim_interfaces_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"clear ip pim [vrf NAME] interfaces",
|
2015-02-04 07:01:14 +01:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"Reset PIM interfaces\n")
|
|
|
|
{
|
2022-07-19 08:44:47 +02:00
|
|
|
struct vrf *v = pim_cmd_lookup(vty, vrf);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
2022-07-19 08:44:47 +02:00
|
|
|
if (!v)
|
2017-05-23 14:57:11 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-07-19 08:44:47 +02:00
|
|
|
clear_pim_interfaces(v->info);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-07-18 15:08:19 +02:00
|
|
|
DEFPY (clear_ip_pim_interface_traffic,
|
2017-04-03 22:11:58 +02:00
|
|
|
clear_ip_pim_interface_traffic_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"clear ip pim [vrf NAME] interface traffic",
|
2022-07-18 15:08:19 +02:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-04-03 22:11:58 +02:00
|
|
|
"Reset PIM interfaces\n"
|
|
|
|
"Reset Protocol Packet counters\n")
|
|
|
|
{
|
2022-07-18 15:08:19 +02:00
|
|
|
return clear_pim_interface_traffic(vrf, vty);
|
2017-04-03 22:11:58 +02:00
|
|
|
}
|
|
|
|
|
2022-04-13 16:41:46 +02:00
|
|
|
DEFPY (clear_ip_pim_oil,
|
2015-02-04 07:01:14 +01:00
|
|
|
clear_ip_pim_oil_cmd,
|
2022-04-13 16:41:46 +02:00
|
|
|
"clear ip pim [vrf NAME]$name oil",
|
2015-02-04 07:01:14 +01:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"Rescan PIM OIL (output interface list)\n")
|
|
|
|
{
|
2022-04-13 16:41:46 +02:00
|
|
|
struct vrf *v = pim_cmd_lookup(vty, name);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
2022-04-13 16:41:46 +02:00
|
|
|
if (!v)
|
2017-05-23 14:57:11 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-04-13 16:41:46 +02:00
|
|
|
pim_scan_oil(v->info);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-12-21 11:24:11 +01:00
|
|
|
DEFUN (clear_ip_pim_bsr_db,
|
|
|
|
clear_ip_pim_bsr_db_cmd,
|
|
|
|
"clear ip pim [vrf NAME] bsr-data",
|
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
CLEAR_IP_PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Reset pim bsr data\n")
|
|
|
|
{
|
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2020-12-21 11:24:11 +01:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2021-07-25 15:48:03 +02:00
|
|
|
pim_bsm_clear(vrf->info);
|
2020-12-21 11:24:11 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (show_ip_igmp_interface,
|
|
|
|
show_ip_igmp_interface_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip igmp [vrf NAME] interface [detail|WORD] [json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"IGMP interface information\n"
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
"Detailed output\n"
|
2016-09-15 19:16:36 +02:00
|
|
|
"interface name\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
2017-01-26 15:10:54 +01:00
|
|
|
|
|
|
|
if (argv_find(argv, argc, "detail", &idx)
|
|
|
|
|| argv_find(argv, argc, "WORD", &idx))
|
2017-06-29 16:45:38 +02:00
|
|
|
igmp_show_interfaces_single(vrf->info, vty, argv[idx]->arg, uj);
|
2016-09-15 19:16:36 +02:00
|
|
|
else
|
2017-06-29 16:45:38 +02:00
|
|
|
igmp_show_interfaces(vrf->info, vty, uj);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
DEFUN (show_ip_igmp_interface_vrf_all,
|
|
|
|
show_ip_igmp_interface_vrf_all_cmd,
|
|
|
|
"show ip igmp vrf all interface [detail|WORD] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP interface information\n"
|
|
|
|
"Detailed output\n"
|
|
|
|
"interface name\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
if (argv_find(argv, argc, "detail", &idx)
|
|
|
|
|| argv_find(argv, argc, "WORD", &idx))
|
|
|
|
igmp_show_interfaces_single(vrf->info, vty,
|
|
|
|
argv[idx]->arg, uj);
|
|
|
|
else
|
|
|
|
igmp_show_interfaces(vrf->info, vty, uj);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (show_ip_igmp_join,
|
|
|
|
show_ip_igmp_join_cmd,
|
2021-02-09 08:29:50 +01:00
|
|
|
"show ip igmp [vrf NAME] join [json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2021-02-09 08:29:50 +01:00
|
|
|
"IGMP static join information\n"
|
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2021-02-09 08:29:50 +01:00
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2024-09-17 23:21:05 +02:00
|
|
|
igmp_show_interface_join(vrf->info, vty, uj, GM_JOIN_STATIC);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2024-06-26 19:41:45 +02:00
|
|
|
ALIAS (show_ip_igmp_join,
|
|
|
|
show_ip_igmp_join_group_cmd,
|
|
|
|
"show ip igmp [vrf NAME] join-group [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP static join information\n"
|
|
|
|
JSON_STR);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
DEFUN (show_ip_igmp_join_vrf_all,
|
|
|
|
show_ip_igmp_join_vrf_all_cmd,
|
2021-02-09 08:29:50 +01:00
|
|
|
"show ip igmp vrf all join [json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2021-02-09 08:29:50 +01:00
|
|
|
"IGMP static join information\n"
|
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
2024-09-17 23:21:05 +02:00
|
|
|
igmp_show_interface_join(vrf->info, vty, uj, GM_JOIN_STATIC);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_igmp_proxy,
|
|
|
|
show_ip_igmp_proxy_cmd,
|
|
|
|
"show ip igmp [vrf NAME] proxy [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP proxy join information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
igmp_show_interface_join(vrf->info, vty, uj, GM_JOIN_PROXY);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_igmp_proxy_vrf_all,
|
|
|
|
show_ip_igmp_proxy_vrf_all_cmd,
|
|
|
|
"show ip igmp vrf all proxy [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP proxy join information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
igmp_show_interface_join(vrf->info, vty, uj, GM_JOIN_PROXY);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2024-06-26 19:41:45 +02:00
|
|
|
ALIAS (show_ip_igmp_join_vrf_all,
|
|
|
|
show_ip_igmp_join_group_vrf_all_cmd,
|
|
|
|
"show ip igmp vrf all join-group [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP static join information\n"
|
|
|
|
JSON_STR);
|
|
|
|
|
|
|
|
DEFUN (show_ip_igmp_static_group,
|
|
|
|
show_ip_igmp_static_group_cmd,
|
|
|
|
"show ip igmp [vrf NAME] static-group [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Static group information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
igmp_show_interface_static_group(vrf->info, vty, uj);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_igmp_static_group_vrf_all,
|
|
|
|
show_ip_igmp_static_group_vrf_all_cmd,
|
|
|
|
"show ip igmp vrf all static-group [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Static group information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
igmp_show_interface_static_group(vrf->info, vty, uj);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-07 17:29:53 +02:00
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
DEFPY(show_ip_igmp_groups,
|
|
|
|
show_ip_igmp_groups_cmd,
|
|
|
|
"show ip igmp [vrf NAME$vrf_name] groups [INTERFACE$ifname [GROUP$grp_str]] [detail$detail] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
IGMP_GROUP_STR
|
|
|
|
"Interface name\n"
|
|
|
|
"Group address\n"
|
|
|
|
"Detailed Information\n"
|
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
if (ifname)
|
|
|
|
igmp_show_groups_interface_single(vrf->info, vty, !!json,
|
|
|
|
ifname, grp_str, !!detail);
|
|
|
|
else
|
|
|
|
igmp_show_groups(vrf->info, vty, !!json, NULL, !!detail);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
DEFPY(show_ip_igmp_groups_vrf_all,
|
|
|
|
show_ip_igmp_groups_vrf_all_cmd,
|
|
|
|
"show ip igmp vrf all groups [GROUP$grp_str] [detail$detail] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
IGMP_GROUP_STR
|
|
|
|
"Group address\n"
|
|
|
|
"Detailed Information\n"
|
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2023-04-10 23:34:35 +02:00
|
|
|
bool uj = !!json;
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
2023-04-10 23:34:35 +02:00
|
|
|
igmp_show_groups(vrf->info, vty, uj, grp_str, !!detail);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (show_ip_igmp_groups_retransmissions,
|
|
|
|
show_ip_igmp_groups_retransmissions_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip igmp [vrf NAME] groups retransmissions",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
IGMP_GROUP_STR
|
|
|
|
"IGMP group retransmissions\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
igmp_show_group_retransmission(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
DEFPY(show_ip_igmp_sources,
|
|
|
|
show_ip_igmp_sources_cmd,
|
|
|
|
"show ip igmp [vrf NAME$vrf_name] sources [INTERFACE$ifname [GROUP$grp_str]] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
IGMP_SOURCE_STR
|
|
|
|
"Interface name\n"
|
|
|
|
"Group address\n"
|
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
if (ifname)
|
|
|
|
igmp_show_sources_interface_single(vrf->info, vty, !!json,
|
|
|
|
ifname, grp_str);
|
|
|
|
else
|
|
|
|
igmp_show_sources(vrf->info, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_igmp_sources_retransmissions,
|
|
|
|
show_ip_igmp_sources_retransmissions_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip igmp [vrf NAME] sources retransmissions",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
IGMP_SOURCE_STR
|
|
|
|
"IGMP source retransmissions\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
igmp_show_source_retransmission(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-05-04 13:25:38 +02:00
|
|
|
DEFUN (show_ip_igmp_statistics,
|
|
|
|
show_ip_igmp_statistics_cmd,
|
|
|
|
"show ip igmp [vrf NAME] statistics [interface WORD] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"IGMP statistics\n"
|
|
|
|
"interface\n"
|
|
|
|
"IGMP interface\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2018-05-04 13:25:38 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
if (argv_find(argv, argc, "WORD", &idx))
|
|
|
|
igmp_show_statistics(vrf->info, vty, argv[idx]->arg, uj);
|
|
|
|
else
|
|
|
|
igmp_show_statistics(vrf->info, vty, NULL, uj);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-02-19 15:52:17 +01:00
|
|
|
DEFUN (show_ip_pim_mlag_summary,
|
|
|
|
show_ip_pim_mlag_summary_cmd,
|
|
|
|
"show ip pim mlag summary [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"MLAG\n"
|
|
|
|
"status and stats\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
char role_buf[MLAG_ROLE_STRSIZE];
|
|
|
|
char addr_buf[INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_stat = NULL;
|
|
|
|
|
|
|
|
json = json_object_new_object();
|
2024-04-03 00:12:10 +02:00
|
|
|
json_object_boolean_add(json, "mlagConnUp",
|
|
|
|
CHECK_FLAG(router->mlag_flags,
|
|
|
|
PIM_MLAGF_LOCAL_CONN_UP));
|
|
|
|
json_object_boolean_add(json, "mlagPeerConnUp",
|
|
|
|
CHECK_FLAG(router->mlag_flags,
|
|
|
|
PIM_MLAGF_PEER_CONN_UP));
|
|
|
|
json_object_boolean_add(json, "mlagPeerZebraUp",
|
|
|
|
CHECK_FLAG(router->mlag_flags,
|
|
|
|
PIM_MLAGF_PEER_ZEBRA_UP));
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_string_add(json, "mlagRole",
|
2020-11-20 22:06:34 +01:00
|
|
|
mlag_role2str(router->mlag_role,
|
|
|
|
role_buf, sizeof(role_buf)));
|
2020-02-19 15:52:17 +01:00
|
|
|
inet_ntop(AF_INET, &router->local_vtep_ip,
|
2020-11-20 22:06:34 +01:00
|
|
|
addr_buf, INET_ADDRSTRLEN);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_string_add(json, "localVtepIp", addr_buf);
|
|
|
|
inet_ntop(AF_INET, &router->anycast_vtep_ip,
|
2020-11-20 22:06:34 +01:00
|
|
|
addr_buf, INET_ADDRSTRLEN);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_string_add(json, "anycastVtepIp", addr_buf);
|
|
|
|
json_object_string_add(json, "peerlinkRif",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->peerlink_rif);
|
2020-02-19 15:52:17 +01:00
|
|
|
|
|
|
|
json_stat = json_object_new_object();
|
|
|
|
json_object_int_add(json_stat, "mlagConnFlaps",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.mlagd_session_downs);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mlagPeerConnFlaps",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.peer_session_downs);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mlagPeerZebraFlaps",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.peer_zebra_downs);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mrouteAddRx",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_add_rx);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mrouteAddTx",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_add_tx);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mrouteDelRx",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_del_rx);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mrouteDelTx",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_del_tx);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "mlagStatusUpdates",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mlag_status_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "peerZebraStatusUpdates",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.peer_zebra_status_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "pimStatusUpdates",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.pim_status_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_int_add(json_stat, "vxlanUpdates",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.vxlan_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
json_object_object_add(json, "connStats", json_stat);
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
vty_json(vty, json);
|
2020-02-19 15:52:17 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
vty_out(vty, "MLAG daemon connection: %s\n",
|
|
|
|
(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)
|
2020-11-20 22:06:34 +01:00
|
|
|
? "up" : "down");
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "MLAG peer state: %s\n",
|
|
|
|
(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)
|
2020-11-20 22:06:34 +01:00
|
|
|
? "up" : "down");
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "Zebra peer state: %s\n",
|
|
|
|
(router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)
|
2020-11-20 22:06:34 +01:00
|
|
|
? "up" : "down");
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "MLAG role: %s\n",
|
|
|
|
mlag_role2str(router->mlag_role, role_buf, sizeof(role_buf)));
|
|
|
|
inet_ntop(AF_INET, &router->local_vtep_ip,
|
2020-11-20 22:06:34 +01:00
|
|
|
addr_buf, INET_ADDRSTRLEN);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "Local VTEP IP: %s\n", addr_buf);
|
|
|
|
inet_ntop(AF_INET, &router->anycast_vtep_ip,
|
2020-11-20 22:06:34 +01:00
|
|
|
addr_buf, INET_ADDRSTRLEN);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "Anycast VTEP IP: %s\n", addr_buf);
|
|
|
|
vty_out(vty, "Peerlink: %s\n", router->peerlink_rif);
|
|
|
|
vty_out(vty, "Session flaps: mlagd: %d mlag-peer: %d zebra-peer: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.mlagd_session_downs,
|
|
|
|
router->mlag_stats.peer_session_downs,
|
|
|
|
router->mlag_stats.peer_zebra_downs);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, "Message Statistics:\n");
|
|
|
|
vty_out(vty, " mroute adds: rx: %d, tx: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_add_rx,
|
|
|
|
router->mlag_stats.msg.mroute_add_tx);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, " mroute dels: rx: %d, tx: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.mroute_del_rx,
|
|
|
|
router->mlag_stats.msg.mroute_del_tx);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, " peer zebra status updates: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.peer_zebra_status_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, " PIM status updates: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.pim_status_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
vty_out(vty, " VxLAN updates: %d\n",
|
2020-11-20 22:06:34 +01:00
|
|
|
router->mlag_stats.msg.vxlan_updates);
|
2020-02-19 15:52:17 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (show_ip_pim_assert,
|
|
|
|
show_ip_pim_assert_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] assert",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"PIM interface assert\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
pim_show_assert(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_pim_assert_internal,
|
|
|
|
show_ip_pim_assert_internal_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] assert-internal",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"PIM interface internal assert state\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
pim_show_assert_internal(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_pim_assert_metric,
|
|
|
|
show_ip_pim_assert_metric_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] assert-metric",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"PIM interface assert metric\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
pim_show_assert_metric(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_pim_assert_winner_metric,
|
|
|
|
show_ip_pim_assert_winner_metric_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] assert-winner-metric",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"PIM interface assert winner metric\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
pim_show_assert_winner_metric(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-02-28 11:28:14 +01:00
|
|
|
DEFPY (show_ip_pim_interface,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_interface_cmd,
|
2022-02-28 11:28:14 +01:00
|
|
|
"show ip pim [mlag$mlag] [vrf NAME] interface [detail|WORD]$interface [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2020-02-19 15:52:17 +01:00
|
|
|
"MLAG\n"
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM interface information\n"
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
"Detailed output\n"
|
2016-09-15 19:16:36 +02:00
|
|
|
"interface name\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 08:10:22 +02:00
|
|
|
return pim_show_interface_cmd_helper(vrf, vty, !!json, !!mlag,
|
|
|
|
interface);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-28 11:28:14 +01:00
|
|
|
DEFPY (show_ip_pim_interface_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_interface_vrf_all_cmd,
|
2022-02-28 11:28:14 +01:00
|
|
|
"show ip pim [mlag$mlag] vrf all interface [detail|WORD]$interface [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2020-02-19 15:52:17 +01:00
|
|
|
"MLAG\n"
|
2017-07-07 17:29:53 +02:00
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM interface information\n"
|
|
|
|
"Detailed output\n"
|
|
|
|
"interface name\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 08:56:17 +02:00
|
|
|
return pim_show_interface_vrf_all_cmd_helper(vty, !!json, !!mlag,
|
|
|
|
interface);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2019-04-18 22:23:02 +02:00
|
|
|
DEFPY (show_ip_pim_join,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_join_cmd,
|
2019-04-18 22:23:02 +02:00
|
|
|
"show ip pim [vrf NAME] join [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-01-25 15:13:46 +01:00
|
|
|
"PIM interface join information\n"
|
2019-04-18 22:23:02 +02:00
|
|
|
"The Source or Group\n"
|
|
|
|
"The Group\n"
|
2017-01-25 15:13:46 +01:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-10 07:43:52 +02:00
|
|
|
return pim_show_join_cmd_helper(vrf, vty, s_or_g, g, json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-03-02 05:50:22 +01:00
|
|
|
DEFPY (show_ip_pim_join_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_join_vrf_all_cmd,
|
2022-03-02 05:50:22 +01:00
|
|
|
"show ip pim vrf all join [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM interface join information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-06-10 07:43:52 +02:00
|
|
|
return pim_show_join_vrf_all_cmd_helper(vty, json);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2019-11-15 20:39:12 +01:00
|
|
|
DEFPY (show_ip_pim_jp_agg,
|
|
|
|
show_ip_pim_jp_agg_cmd,
|
|
|
|
"show ip pim [vrf NAME] jp-agg",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"join prune aggregation list\n")
|
|
|
|
{
|
2022-06-09 09:15:11 +02:00
|
|
|
return pim_show_jp_agg_list_cmd_helper(vrf, vty);
|
2019-11-15 20:39:12 +01:00
|
|
|
}
|
|
|
|
|
2022-03-02 07:15:32 +01:00
|
|
|
DEFPY (show_ip_pim_local_membership,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_local_membership_cmd,
|
2022-03-02 07:15:32 +01:00
|
|
|
"show ip pim [vrf NAME] local-membership [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-01-25 15:13:46 +01:00
|
|
|
"PIM interface local-membership\n"
|
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 09:31:33 +02:00
|
|
|
return pim_show_membership_cmd_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2020-02-06 18:31:03 +01:00
|
|
|
static void pim_show_mlag_up_entry_detail(struct vrf *vrf,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty,
|
|
|
|
struct pim_upstream *up,
|
|
|
|
char *src_str, char *grp_str,
|
|
|
|
json_object *json)
|
2020-02-06 18:31:03 +01:00
|
|
|
{
|
|
|
|
if (json) {
|
|
|
|
json_object *json_row = NULL;
|
|
|
|
json_object *own_list = NULL;
|
|
|
|
json_object *json_group = NULL;
|
|
|
|
|
|
|
|
|
|
|
|
json_object_object_get_ex(json, grp_str, &json_group);
|
|
|
|
if (!json_group) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
json_object_object_add(json, grp_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_group);
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "source", src_str);
|
|
|
|
json_object_string_add(json_row, "group", grp_str);
|
|
|
|
|
|
|
|
own_list = json_object_new_array();
|
|
|
|
if (pim_up_mlag_is_local(up))
|
|
|
|
json_object_array_add(own_list,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_object_new_string("local"));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
|
|
|
|
json_object_array_add(own_list,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_object_new_string("peer"));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
|
|
|
|
json_object_array_add(
|
|
|
|
own_list, json_object_new_string("Interface"));
|
|
|
|
json_object_object_add(json_row, "owners", own_list);
|
|
|
|
|
|
|
|
json_object_int_add(json_row, "localCost",
|
2020-11-20 22:06:34 +01:00
|
|
|
pim_up_mlag_local_cost(up));
|
2020-02-06 18:31:03 +01:00
|
|
|
json_object_int_add(json_row, "peerCost",
|
2020-11-20 22:06:34 +01:00
|
|
|
pim_up_mlag_peer_cost(up));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))
|
|
|
|
json_object_boolean_false_add(json_row, "df");
|
|
|
|
else
|
|
|
|
json_object_boolean_true_add(json_row, "df");
|
|
|
|
json_object_object_add(json_group, src_str, json_row);
|
|
|
|
} else {
|
|
|
|
char own_str[6];
|
|
|
|
|
|
|
|
own_str[0] = '\0';
|
|
|
|
if (pim_up_mlag_is_local(up))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "L", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "P", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "I", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
/* XXX - fixup, print paragraph output */
|
|
|
|
vty_out(vty,
|
2020-11-20 22:06:34 +01:00
|
|
|
"%-15s %-15s %-6s %-11u %-10d %2s\n",
|
|
|
|
src_str, grp_str, own_str,
|
|
|
|
pim_up_mlag_local_cost(up),
|
|
|
|
pim_up_mlag_peer_cost(up),
|
|
|
|
PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)
|
|
|
|
? "n" : "y");
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_mlag_up_detail(struct vrf *vrf,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty, const char *src_or_group,
|
|
|
|
const char *group, bool uj)
|
2020-02-06 18:31:03 +01:00
|
|
|
{
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
2020-02-06 18:31:03 +01:00
|
|
|
struct pim_upstream *up;
|
|
|
|
struct pim_instance *pim = vrf->info;
|
|
|
|
json_object *json = NULL;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
else
|
|
|
|
vty_out(vty,
|
|
|
|
"Source Group Owner Local-cost Peer-cost DF\n");
|
|
|
|
|
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
|
|
|
if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
|
|
|
|
&& !(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
|
|
|
|
&& !pim_up_mlag_is_local(up))
|
|
|
|
continue;
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &up->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs", &up->sg.src);
|
|
|
|
|
2020-02-06 18:31:03 +01:00
|
|
|
/* XXX: strcmps are clearly inefficient. we should do uint comps
|
|
|
|
* here instead.
|
|
|
|
*/
|
|
|
|
if (group) {
|
|
|
|
if (strcmp(src_str, src_or_group) ||
|
2020-02-20 14:41:57 +01:00
|
|
|
strcmp(grp_str, group))
|
2020-02-06 18:31:03 +01:00
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
if (strcmp(src_str, src_or_group) &&
|
2020-02-20 14:41:57 +01:00
|
|
|
strcmp(grp_str, src_or_group))
|
2020-02-06 18:31:03 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
pim_show_mlag_up_entry_detail(vrf, vty, up,
|
2020-02-20 14:41:57 +01:00
|
|
|
src_str, grp_str, json);
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_mlag_up_vrf(struct vrf *vrf, struct vty *vty, bool uj)
|
|
|
|
{
|
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_row;
|
|
|
|
struct pim_upstream *up;
|
|
|
|
struct pim_instance *pim = vrf->info;
|
|
|
|
json_object *json_group = NULL;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
"Source Group Owner Local-cost Peer-cost DF\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
|
|
|
|
if (!(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)
|
|
|
|
&& !(up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE)
|
|
|
|
&& !pim_up_mlag_is_local(up))
|
|
|
|
continue;
|
|
|
|
if (uj) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
2020-02-06 18:31:03 +01:00
|
|
|
json_object *own_list = NULL;
|
|
|
|
|
2022-01-05 19:12:12 +01:00
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
|
|
|
|
&up->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs",
|
|
|
|
&up->sg.src);
|
|
|
|
|
2020-02-06 18:31:03 +01:00
|
|
|
json_object_object_get_ex(json, grp_str, &json_group);
|
|
|
|
if (!json_group) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
json_object_object_add(json, grp_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_group);
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "vrf", vrf->name);
|
|
|
|
json_object_string_add(json_row, "source", src_str);
|
|
|
|
json_object_string_add(json_row, "group", grp_str);
|
|
|
|
|
|
|
|
own_list = json_object_new_array();
|
|
|
|
if (pim_up_mlag_is_local(up)) {
|
|
|
|
|
|
|
|
json_object_array_add(own_list,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_object_new_string(
|
|
|
|
"local"));
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER)) {
|
|
|
|
json_object_array_add(own_list,
|
2020-11-20 22:06:34 +01:00
|
|
|
json_object_new_string(
|
|
|
|
"peer"));
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
json_object_object_add(json_row, "owners", own_list);
|
|
|
|
|
|
|
|
json_object_int_add(json_row, "localCost",
|
2020-11-20 22:06:34 +01:00
|
|
|
pim_up_mlag_local_cost(up));
|
2020-02-06 18:31:03 +01:00
|
|
|
json_object_int_add(json_row, "peerCost",
|
2020-11-20 22:06:34 +01:00
|
|
|
pim_up_mlag_peer_cost(up));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))
|
|
|
|
json_object_boolean_false_add(json_row, "df");
|
|
|
|
else
|
|
|
|
json_object_boolean_true_add(json_row, "df");
|
|
|
|
json_object_object_add(json_group, src_str, json_row);
|
|
|
|
} else {
|
|
|
|
char own_str[6];
|
|
|
|
|
|
|
|
own_str[0] = '\0';
|
|
|
|
if (pim_up_mlag_is_local(up))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "L", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_PEER))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "P", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
if (up->flags & (PIM_UPSTREAM_FLAG_MASK_MLAG_INTERFACE))
|
2020-02-20 14:41:57 +01:00
|
|
|
strlcat(own_str, "I", sizeof(own_str));
|
2020-02-06 18:31:03 +01:00
|
|
|
vty_out(vty,
|
2022-01-05 19:12:12 +01:00
|
|
|
"%-15pPAs %-15pPAs %-6s %-11u %-10u %2s\n",
|
|
|
|
&up->sg.src, &up->sg.grp, own_str,
|
2020-02-06 18:31:03 +01:00
|
|
|
pim_up_mlag_local_cost(up),
|
|
|
|
pim_up_mlag_peer_cost(up),
|
|
|
|
PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags)
|
2020-11-20 22:06:34 +01:00
|
|
|
? "n" : "y");
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
}
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_mlag_help_string(struct vty *vty, bool uj)
|
|
|
|
{
|
|
|
|
if (!uj) {
|
|
|
|
vty_out(vty, "Owner codes:\n");
|
|
|
|
vty_out(vty,
|
2020-03-27 12:35:23 +01:00
|
|
|
"L: EVPN-MLAG Entry, I:PIM-MLAG Entry, P: Peer Entry\n");
|
2020-02-06 18:31:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DEFUN(show_ip_pim_mlag_up, show_ip_pim_mlag_up_cmd,
|
|
|
|
"show ip pim [vrf NAME] mlag upstream [A.B.C.D [A.B.C.D]] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"MLAG\n"
|
|
|
|
"upstream\n"
|
|
|
|
"Unicast or Multicast address\n"
|
|
|
|
"Multicast address\n" JSON_STR)
|
|
|
|
{
|
|
|
|
const char *src_or_group = NULL;
|
|
|
|
const char *group = NULL;
|
|
|
|
int idx = 2;
|
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2020-02-06 18:31:03 +01:00
|
|
|
|
|
|
|
if (!vrf || !vrf->info) {
|
|
|
|
vty_out(vty, "%s: VRF or Info missing\n", __func__);
|
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
argc--;
|
|
|
|
|
|
|
|
if (argv_find(argv, argc, "A.B.C.D", &idx)) {
|
|
|
|
src_or_group = argv[idx]->arg;
|
|
|
|
if (idx + 1 < argc)
|
|
|
|
group = argv[idx + 1]->arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
pim_show_mlag_help_string(vty, uj);
|
|
|
|
|
2020-02-20 14:41:57 +01:00
|
|
|
if (src_or_group)
|
2020-02-06 18:31:03 +01:00
|
|
|
pim_show_mlag_up_detail(vrf, vty, src_or_group, group, uj);
|
|
|
|
else
|
|
|
|
pim_show_mlag_up_vrf(vrf, vty, uj);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DEFUN(show_ip_pim_mlag_up_vrf_all, show_ip_pim_mlag_up_vrf_all_cmd,
|
|
|
|
"show ip pim vrf all mlag upstream [json]",
|
|
|
|
SHOW_STR IP_STR PIM_STR VRF_CMD_HELP_STR
|
|
|
|
"MLAG\n"
|
|
|
|
"upstream\n" JSON_STR)
|
|
|
|
{
|
|
|
|
struct vrf *vrf;
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
|
|
|
|
pim_show_mlag_help_string(vty, uj);
|
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
|
|
|
pim_show_mlag_up_vrf(vrf, vty, uj);
|
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-02 10:22:13 +01:00
|
|
|
DEFPY (show_ip_pim_neighbor,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_neighbor_cmd,
|
2022-03-02 10:22:13 +01:00
|
|
|
"show ip pim [vrf NAME] neighbor [detail|WORD]$interface [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM neighbor information\n"
|
pimd: Added json to 'show ip igmp group'
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group'
Interface Address Group Mode Timer Srcs V Uptime
br1 20.0.13.1 230.40.40.1 EXCL 00:03:24 1 3 00:01:28
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]#
root@superm-redxp-05[quagga-pim]# vtysh -c 'show ip igmp group json' |
python -m json.tool
{
"br1": {
"230.40.40.1": {
"group": "230.40.40.1",
"mode": "EXCLUDE",
"source": "20.0.13.1",
"sourcesCount": 1,
"timer": "00:03:23",
"uptime": "00:01:30",
"version": 3
},
"address": "20.0.13.1",
"flagBroadcast": true,
"flagMulticast": true,
"index": 96,
"lanDelayEnabled": true,
"name": "br1",
"state": "up"
}
}
root@superm-redxp-05[quagga-pim]#
2016-09-27 18:37:38 +02:00
|
|
|
"Detailed output\n"
|
2016-09-15 19:16:36 +02:00
|
|
|
"Name of interface or neighbor\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 10:34:06 +02:00
|
|
|
return pim_show_neighbors_cmd_helper(vrf, vty, json, interface);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-03-02 10:22:13 +01:00
|
|
|
DEFPY (show_ip_pim_neighbor_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_neighbor_vrf_all_cmd,
|
2022-03-02 10:22:13 +01:00
|
|
|
"show ip pim vrf all neighbor [detail|WORD]$interface [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM neighbor information\n"
|
|
|
|
"Detailed output\n"
|
|
|
|
"Name of interface or neighbor\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 11:36:08 +02:00
|
|
|
return pim_show_neighbors_vrf_all_cmd_helper(vty, json, interface);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2022-02-09 05:32:14 +01:00
|
|
|
DEFPY (show_ip_pim_secondary,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_secondary_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] secondary",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"PIM neighbor addresses\n")
|
|
|
|
{
|
2022-06-09 10:33:01 +02:00
|
|
|
return pim_show_secondary_helper(vrf, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 11:19:39 +01:00
|
|
|
DEFPY (show_ip_pim_state,
|
2016-09-28 03:33:33 +02:00
|
|
|
show_ip_pim_state_cmd,
|
2022-02-15 11:19:39 +01:00
|
|
|
"show ip pim [vrf NAME] state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
|
2016-09-28 03:33:33 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-28 03:33:33 +02:00
|
|
|
"PIM state information\n"
|
|
|
|
"Unicast or Multicast address\n"
|
|
|
|
"Multicast address\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-09-28 03:33:33 +02:00
|
|
|
{
|
2022-06-09 12:29:02 +02:00
|
|
|
return pim_show_state_helper(vrf, vty, s_or_g_str, g_str, !!json);
|
2016-09-28 03:33:33 +02:00
|
|
|
}
|
|
|
|
|
2022-02-15 11:19:39 +01:00
|
|
|
DEFPY (show_ip_pim_state_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_state_vrf_all_cmd,
|
2022-02-15 11:19:39 +01:00
|
|
|
"show ip pim vrf all state [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM state information\n"
|
|
|
|
"Unicast or Multicast address\n"
|
|
|
|
"Multicast address\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 12:29:02 +02:00
|
|
|
return pim_show_state_vrf_all_helper(vty, s_or_g_str, g_str, !!json);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2019-04-18 22:09:03 +02:00
|
|
|
DEFPY (show_ip_pim_upstream,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_upstream_cmd,
|
2019-04-18 22:09:03 +02:00
|
|
|
"show ip pim [vrf NAME] upstream [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM upstream information\n"
|
2019-04-18 22:09:03 +02:00
|
|
|
"The Source or Group\n"
|
|
|
|
"The Group\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 11:40:21 +02:00
|
|
|
return pim_show_upstream_helper(vrf, vty, s_or_g, g, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-11 06:15:31 +01:00
|
|
|
DEFPY (show_ip_pim_upstream_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_upstream_vrf_all_cmd,
|
2022-02-11 06:15:31 +01:00
|
|
|
"show ip pim vrf all upstream [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM upstream information\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-06-09 11:40:21 +02:00
|
|
|
return pim_show_upstream_vrf_all_helper(vty, !!json);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2022-02-25 10:47:35 +01:00
|
|
|
DEFPY (show_ip_pim_channel,
|
2019-11-15 20:58:27 +01:00
|
|
|
show_ip_pim_channel_cmd,
|
2022-02-25 10:47:35 +01:00
|
|
|
"show ip pim [vrf NAME] channel [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2019-11-15 20:58:27 +01:00
|
|
|
"PIM downstream channel info\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-02 06:54:44 +02:00
|
|
|
return pim_show_channel_cmd_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 08:24:31 +01:00
|
|
|
DEFPY (show_ip_pim_upstream_join_desired,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_upstream_join_desired_cmd,
|
2022-02-15 08:24:31 +01:00
|
|
|
"show ip pim [vrf NAME] upstream-join-desired [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM upstream join-desired\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 11:57:49 +02:00
|
|
|
return pim_show_upstream_join_desired_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 08:42:40 +01:00
|
|
|
DEFPY (show_ip_pim_upstream_rpf,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_upstream_rpf_cmd,
|
2022-02-15 08:42:40 +01:00
|
|
|
"show ip pim [vrf NAME] upstream-rpf [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM upstream source rpf\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 12:10:21 +02:00
|
|
|
return pim_show_upstream_rpf_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-02-07 11:13:16 +01:00
|
|
|
DEFPY (show_ip_pim_rp,
|
2016-08-23 22:22:14 +02:00
|
|
|
show_ip_pim_rp_cmd,
|
2022-02-07 11:13:16 +01:00
|
|
|
"show ip pim [vrf NAME] rp-info [A.B.C.D/M$group] [json$json]",
|
2016-08-23 22:22:14 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM RP information\n"
|
2022-03-02 12:30:51 +01:00
|
|
|
"Multicast Group range\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-08-23 22:22:14 +02:00
|
|
|
{
|
2022-06-09 09:49:38 +02:00
|
|
|
return pim_show_rp_helper(vrf, vty, group_str, (struct prefix *)group,
|
|
|
|
!!json);
|
2016-08-23 22:22:14 +02:00
|
|
|
}
|
|
|
|
|
2022-02-07 11:13:16 +01:00
|
|
|
DEFPY (show_ip_pim_rp_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_rp_vrf_all_cmd,
|
2022-02-07 11:13:16 +01:00
|
|
|
"show ip pim vrf all rp-info [A.B.C.D/M$group] [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM RP information\n"
|
2022-03-02 12:30:51 +01:00
|
|
|
"Multicast Group range\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 09:49:38 +02:00
|
|
|
return pim_show_rp_vrf_all_helper(vty, group_str,
|
|
|
|
(struct prefix *)group, !!json);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2024-09-17 04:10:03 +02:00
|
|
|
DEFPY (show_ip_pim_autorp,
|
|
|
|
show_ip_pim_autorp_cmd,
|
2024-11-01 20:11:14 +01:00
|
|
|
"show ip pim [vrf <NAME|all>] autorp [discovery|candidate|mapping-agent]$component [json$json]",
|
2024-09-17 04:10:03 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2024-11-01 20:11:14 +01:00
|
|
|
"All VRF's\n"
|
2024-09-17 04:10:03 +02:00
|
|
|
"PIM AutoRP information\n"
|
2024-11-01 20:11:14 +01:00
|
|
|
"RP Discovery details\n"
|
|
|
|
"Candidate RP details\n"
|
|
|
|
"Mapping Agent details\n"
|
2024-09-17 04:10:03 +02:00
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
json_object *json_parent = NULL;
|
2024-11-01 20:11:14 +01:00
|
|
|
struct vrf *v;
|
2024-09-17 04:10:03 +02:00
|
|
|
|
|
|
|
if (json)
|
|
|
|
json_parent = json_object_new_object();
|
|
|
|
|
2024-11-01 20:11:14 +01:00
|
|
|
if (vrf && strmatch(vrf, "all")) {
|
|
|
|
json_object *json_vrf = NULL;
|
2024-09-17 04:10:03 +02:00
|
|
|
|
2024-11-01 20:11:14 +01:00
|
|
|
RB_FOREACH (v, vrf_name_head, &vrfs_by_name) {
|
|
|
|
if (!v || !v->info)
|
|
|
|
continue;
|
2024-09-17 04:10:03 +02:00
|
|
|
|
2024-11-01 20:11:14 +01:00
|
|
|
if (json)
|
2024-09-17 04:10:03 +02:00
|
|
|
json_vrf = json_object_new_object();
|
2024-11-01 20:11:14 +01:00
|
|
|
else
|
|
|
|
vty_out(vty, "VRF: %s\n", v->name);
|
2024-09-17 04:10:03 +02:00
|
|
|
|
2024-11-01 20:11:14 +01:00
|
|
|
pim_autorp_show_autorp(vty, v->info, component, json_vrf);
|
2024-09-17 04:10:03 +02:00
|
|
|
|
|
|
|
if (json)
|
2024-11-01 20:11:14 +01:00
|
|
|
json_object_object_add(json_parent, v->name, json_vrf);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
|
|
|
|
if (!v || !v->info) {
|
|
|
|
if (!json)
|
|
|
|
vty_out(vty, "%% Unable to find pim instance\n");
|
|
|
|
return CMD_WARNING;
|
2024-09-17 04:10:03 +02:00
|
|
|
}
|
2024-11-01 20:11:14 +01:00
|
|
|
pim_autorp_show_autorp(vty, v->info, component, json_parent);
|
2024-09-17 04:10:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (json)
|
|
|
|
vty_json(vty, json_parent);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-04 08:56:19 +01:00
|
|
|
DEFPY (show_ip_pim_rpf,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_pim_rpf_cmd,
|
2022-03-04 08:56:19 +01:00
|
|
|
"show ip pim [vrf NAME] rpf [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-09-15 19:16:36 +02:00
|
|
|
"PIM cached source rpf information\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 07:49:16 +02:00
|
|
|
return pim_show_rpf_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-03-04 08:56:19 +01:00
|
|
|
DEFPY (show_ip_pim_rpf_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_pim_rpf_vrf_all_cmd,
|
2022-03-04 08:56:19 +01:00
|
|
|
"show ip pim vrf all rpf [json$json]",
|
2017-07-07 17:29:53 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM cached source rpf information\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 07:49:16 +02:00
|
|
|
return pim_show_rpf_vrf_all_helper(vty, !!json);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2022-03-02 11:33:49 +01:00
|
|
|
DEFPY (show_ip_pim_nexthop,
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
show_ip_pim_nexthop_cmd,
|
2021-08-06 09:47:52 +02:00
|
|
|
"show ip pim [vrf NAME] nexthop [json$json]",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2021-08-06 09:47:52 +02:00
|
|
|
"PIM cached nexthop rpf information\n"
|
|
|
|
JSON_STR)
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
{
|
2022-06-13 10:49:03 +02:00
|
|
|
return pim_show_nexthop_cmd_helper(vrf, vty, !!json);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
|
2022-03-02 11:33:49 +01:00
|
|
|
DEFPY (show_ip_pim_nexthop_lookup,
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
show_ip_pim_nexthop_lookup_cmd,
|
2024-10-23 21:00:57 +02:00
|
|
|
"show ip pim [vrf NAME] nexthop-lookup A.B.C.D$source [A.B.C.D$group]",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
"PIM cached nexthop rpf lookup\n"
|
|
|
|
"Source/RP address\n"
|
|
|
|
"Multicast Group address\n")
|
|
|
|
{
|
2022-06-09 13:22:42 +02:00
|
|
|
return pim_show_nexthop_lookup_cmd_helper(vrf, vty, source, group);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
|
2024-10-23 21:00:57 +02:00
|
|
|
ALIAS_DEPRECATED (show_ip_pim_nexthop_lookup,
|
|
|
|
show_ip_rpf_source_cmd,
|
|
|
|
"show ip rpf A.B.C.D$source",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
"Display RPF information for multicast source\n"
|
|
|
|
"Nexthop lookup for specific source address\n");
|
|
|
|
|
2022-06-01 17:58:33 +02:00
|
|
|
DEFPY (show_ip_pim_interface_traffic,
|
2017-04-03 22:11:58 +02:00
|
|
|
show_ip_pim_interface_traffic_cmd,
|
2022-06-01 17:58:33 +02:00
|
|
|
"show ip pim [vrf NAME] interface traffic [WORD$if_name] [json$json]",
|
2017-04-03 22:11:58 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-04-03 22:11:58 +02:00
|
|
|
"PIM interface information\n"
|
|
|
|
"Protocol Packet counters\n"
|
2017-05-02 19:15:23 +02:00
|
|
|
"Interface name\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-04-03 22:11:58 +02:00
|
|
|
{
|
2022-06-01 17:58:33 +02:00
|
|
|
return pim_show_interface_traffic_helper(vrf, if_name, vty, !!json);
|
2017-04-03 22:11:58 +02:00
|
|
|
}
|
|
|
|
|
2022-07-12 15:39:56 +02:00
|
|
|
DEFPY (show_ip_pim_bsm_db,
|
2019-05-05 06:02:31 +02:00
|
|
|
show_ip_pim_bsm_db_cmd,
|
2022-07-12 15:39:56 +02:00
|
|
|
"show ip pim bsm-database [vrf NAME] [json$json]",
|
2019-05-05 06:02:31 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"PIM cached bsm packets information\n"
|
2019-07-06 02:15:27 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2019-05-05 06:02:31 +02:00
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-07-12 15:39:56 +02:00
|
|
|
return pim_show_bsm_db_helper(vrf, vty, !!json);
|
2019-05-05 06:02:31 +02:00
|
|
|
}
|
2019-05-05 06:34:59 +02:00
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
DEFPY_HIDDEN (show_ip_pim_bsrp,
|
2019-05-05 06:34:59 +02:00
|
|
|
show_ip_pim_bsrp_cmd,
|
2022-07-12 16:15:07 +02:00
|
|
|
"show ip pim bsrp-info [vrf NAME] [json$json]",
|
2019-05-05 06:34:59 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"PIM cached group-rp mappings information\n"
|
2019-07-06 02:15:27 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2019-05-05 06:34:59 +02:00
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-07-12 16:15:07 +02:00
|
|
|
return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
|
2019-05-05 06:34:59 +02:00
|
|
|
}
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
DEFPY (show_ip_pim_bsr_rpinfo,
|
|
|
|
show_ip_pim_bsr_rpinfo_cmd,
|
|
|
|
"show ip pim bsr rp-info [vrf NAME] [json$json]",
|
2024-07-23 07:45:02 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2024-07-23 07:57:08 +02:00
|
|
|
BSR_STR
|
|
|
|
"PIM cached group-rp mappings information received from BSR\n"
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (show_ip_pim_bsr_cand_bsr,
|
|
|
|
show_ip_pim_bsr_cand_bsr_cmd,
|
|
|
|
"show ip pim bsr candidate-bsr [vrf NAME$vrfname] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
BSR_STR
|
|
|
|
"Current PIM router candidate BSR state\n"
|
2024-07-23 07:45:02 +02:00
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
2024-07-23 07:57:08 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
|
2024-07-23 07:45:02 +02:00
|
|
|
|
|
|
|
if (!vrf || !vrf->info)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
return pim_show_bsr_cand_bsr(vrf, vty, !!json);
|
|
|
|
}
|
2024-07-23 07:45:02 +02:00
|
|
|
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
DEFPY (show_ip_pim_bsr_cand_rp,
|
|
|
|
show_ip_pim_bsr_cand_rp_cmd,
|
|
|
|
"show ip pim bsr candidate-rp [vrf NAME$vrfname] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
BSR_STR
|
|
|
|
"Current PIM router candidate RP state\n"
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json);
|
2024-07-23 07:45:02 +02:00
|
|
|
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
if (!vrf || !vrf->info)
|
|
|
|
return CMD_WARNING;
|
2024-07-23 07:45:02 +02:00
|
|
|
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
return pim_show_bsr_cand_rp(vrf, vty, !!json);
|
2024-07-23 07:45:02 +02:00
|
|
|
}
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
DEFPY (show_ip_pim_bsr_rpdb,
|
2024-07-26 16:57:44 +02:00
|
|
|
show_ip_pim_bsr_rpdb_cmd,
|
2024-07-23 07:57:08 +02:00
|
|
|
"show ip pim bsr candidate-rp-database [vrf NAME$vrfname] [json$json]",
|
2024-07-26 16:57:44 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2024-07-23 07:57:08 +02:00
|
|
|
BSR_STR
|
|
|
|
"Candidate RPs database on this router (if it is the BSR)\n"
|
2024-07-26 16:57:44 +02:00
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
|
|
|
|
|
|
|
if (!vrf || !vrf->info)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
struct pim_instance *pim = vrf->info;
|
|
|
|
struct bsm_scope *scope = &pim->global_scope;
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
return pim_crp_db_show(vty, scope, !!json);
|
2024-07-26 16:57:44 +02:00
|
|
|
}
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
DEFPY (show_ip_pim_bsr_groups,
|
2024-07-26 16:57:44 +02:00
|
|
|
show_ip_pim_bsr_groups_cmd,
|
2024-07-23 07:57:08 +02:00
|
|
|
"show ip pim bsr groups [vrf NAME$vrfname] [json$json]",
|
2024-07-26 16:57:44 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"boot-strap router information\n"
|
|
|
|
"Candidate RP groups\n"
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
int idx = 2;
|
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
|
|
|
|
|
|
|
if (!vrf || !vrf->info)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
struct pim_instance *pim = vrf->info;
|
|
|
|
struct bsm_scope *scope = &pim->global_scope;
|
|
|
|
|
2024-07-23 07:57:08 +02:00
|
|
|
return pim_crp_groups_show(vty, scope, !!json);
|
2024-07-26 16:57:44 +02:00
|
|
|
}
|
|
|
|
|
2022-02-09 08:19:25 +01:00
|
|
|
DEFPY (show_ip_pim_statistics,
|
2019-05-05 07:19:25 +02:00
|
|
|
show_ip_pim_statistics_cmd,
|
2022-02-09 08:19:25 +01:00
|
|
|
"show ip pim [vrf NAME] statistics [interface WORD$word] [json$json]",
|
2019-05-05 07:19:25 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"PIM statistics\n"
|
2019-07-06 02:15:27 +02:00
|
|
|
INTERFACE_STR
|
2019-05-05 07:19:25 +02:00
|
|
|
"PIM interface\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-06-09 10:59:58 +02:00
|
|
|
return pim_show_statistics_helper(vrf, vty, word, !!json);
|
2019-05-05 07:19:25 +02:00
|
|
|
}
|
|
|
|
|
2022-04-14 08:30:28 +02:00
|
|
|
DEFPY (show_ip_multicast,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_multicast_cmd,
|
|
|
|
"show ip multicast [vrf NAME]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
2022-04-14 08:30:28 +02:00
|
|
|
"Multicast global information\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 12:51:41 +02:00
|
|
|
return pim_show_multicast_helper(vrf, vty);
|
2017-07-07 17:29:53 +02:00
|
|
|
}
|
|
|
|
|
2022-04-14 08:30:28 +02:00
|
|
|
DEFPY (show_ip_multicast_vrf_all,
|
2017-07-07 17:29:53 +02:00
|
|
|
show_ip_multicast_vrf_all_cmd,
|
|
|
|
"show ip multicast vrf all",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
2022-04-14 08:30:28 +02:00
|
|
|
"Multicast global information\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2022-06-09 12:51:41 +02:00
|
|
|
return pim_show_multicast_vrf_all_helper(vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-04-14 09:07:41 +02:00
|
|
|
DEFPY (show_ip_multicast_count,
|
|
|
|
show_ip_multicast_count_cmd,
|
|
|
|
"show ip multicast count [vrf NAME] [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
"Multicast global information\n"
|
|
|
|
"Data packet count\n"
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
2020-03-02 10:05:54 +01:00
|
|
|
{
|
2022-06-09 13:04:14 +02:00
|
|
|
return pim_show_multicast_count_helper(vrf, vty, !!json);
|
2020-03-02 10:05:54 +01:00
|
|
|
}
|
|
|
|
|
2022-04-14 09:07:41 +02:00
|
|
|
DEFPY (show_ip_multicast_count_vrf_all,
|
|
|
|
show_ip_multicast_count_vrf_all_cmd,
|
|
|
|
"show ip multicast count vrf all [json$json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
"Multicast global information\n"
|
|
|
|
"Data packet count\n"
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
JSON_STR)
|
2020-03-02 10:05:54 +01:00
|
|
|
{
|
2022-06-09 13:04:14 +02:00
|
|
|
return pim_show_multicast_count_vrf_all_helper(vty, !!json);
|
2020-03-02 10:05:54 +01:00
|
|
|
}
|
|
|
|
|
2019-04-23 01:51:20 +02:00
|
|
|
DEFPY (show_ip_mroute,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_mroute_cmd,
|
2019-04-23 01:51:20 +02:00
|
|
|
"show ip mroute [vrf NAME] [A.B.C.D$s_or_g [A.B.C.D$g]] [fill$fill] [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
2017-01-25 15:13:46 +01:00
|
|
|
MROUTE_STR
|
2017-05-23 13:25:30 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2019-04-23 01:51:20 +02:00
|
|
|
"The Source or Group\n"
|
|
|
|
"The Group\n"
|
2017-09-28 14:05:20 +02:00
|
|
|
"Fill in Assumed data\n"
|
2017-01-25 15:13:46 +01:00
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 13:27:20 +02:00
|
|
|
return pim_show_mroute_helper(vrf, vty, s_or_g, g, !!fill, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-04-08 08:52:28 +02:00
|
|
|
DEFPY (show_ip_mroute_vrf_all,
|
2017-06-22 01:44:20 +02:00
|
|
|
show_ip_mroute_vrf_all_cmd,
|
2022-04-08 08:52:28 +02:00
|
|
|
"show ip mroute vrf all [fill$fill] [json$json]",
|
2017-06-22 01:44:20 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2017-09-28 14:05:20 +02:00
|
|
|
"Fill in Assumed data\n"
|
2017-06-22 01:44:20 +02:00
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-06-09 13:27:20 +02:00
|
|
|
return pim_show_mroute_vrf_all_helper(vty, !!fill, !!json);
|
2017-06-22 01:44:20 +02:00
|
|
|
}
|
|
|
|
|
2022-04-13 17:31:45 +02:00
|
|
|
DEFPY (clear_ip_mroute_count,
|
2019-06-03 16:39:23 +02:00
|
|
|
clear_ip_mroute_count_cmd,
|
2022-04-13 17:31:45 +02:00
|
|
|
"clear ip mroute [vrf NAME]$name count",
|
2019-06-03 16:39:23 +02:00
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Route and packet count data\n")
|
|
|
|
{
|
2022-04-13 17:31:45 +02:00
|
|
|
return clear_ip_mroute_count_command(vty, name);
|
2019-06-03 16:39:23 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(clear_ip_msdp_peer, clear_ip_msdp_peer_cmd,
|
2022-12-07 14:13:39 +01:00
|
|
|
"clear ip msdp peer A.B.C.D$peer [vrf WORD$vrfname]",
|
|
|
|
CLEAR_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
|
|
|
"Restart MSDP peer\n"
|
|
|
|
"MSDP peer address\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
|
|
|
{
|
|
|
|
const struct pim_instance *pim;
|
|
|
|
const struct listnode *node;
|
|
|
|
const struct vrf *vrf;
|
|
|
|
struct pim_msdp_peer *mp;
|
|
|
|
|
2024-07-31 15:18:47 +02:00
|
|
|
if (vrfname)
|
2022-12-07 14:13:39 +01:00
|
|
|
vrf = vrf_lookup_by_name(vrfname);
|
2024-07-31 15:18:47 +02:00
|
|
|
else
|
2022-12-07 14:13:39 +01:00
|
|
|
vrf = vrf_lookup_by_id(VRF_DEFAULT);
|
|
|
|
|
2024-07-31 15:18:47 +02:00
|
|
|
if (vrf == NULL || vrf->info == NULL)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-12-07 14:13:39 +01:00
|
|
|
pim = vrf->info;
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, mp)) {
|
|
|
|
if (mp->peer.s_addr != peer.s_addr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pim_msdp_peer_restart(mp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-04-08 09:20:53 +02:00
|
|
|
DEFPY (show_ip_mroute_count,
|
2015-02-04 07:01:14 +01:00
|
|
|
show_ip_mroute_count_cmd,
|
2022-04-08 09:20:53 +02:00
|
|
|
"show ip mroute [vrf NAME] count [json$json]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2020-03-30 09:02:32 +02:00
|
|
|
"Route and packet count data\n"
|
|
|
|
JSON_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-06-09 13:43:28 +02:00
|
|
|
return pim_show_mroute_count_helper(vrf, vty, !!json);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-04-08 09:20:53 +02:00
|
|
|
DEFPY (show_ip_mroute_count_vrf_all,
|
2017-07-07 23:55:10 +02:00
|
|
|
show_ip_mroute_count_vrf_all_cmd,
|
2022-04-08 09:20:53 +02:00
|
|
|
"show ip mroute vrf all count [json$json]",
|
2017-07-07 23:55:10 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2020-03-30 09:02:32 +02:00
|
|
|
"Route and packet count data\n"
|
|
|
|
JSON_STR)
|
2017-07-07 23:55:10 +02:00
|
|
|
{
|
2022-06-09 13:43:28 +02:00
|
|
|
return pim_show_mroute_count_vrf_all_helper(vty, !!json);
|
2019-05-16 18:58:28 +02:00
|
|
|
}
|
|
|
|
|
2022-04-08 09:34:02 +02:00
|
|
|
DEFPY (show_ip_mroute_summary,
|
2019-05-16 18:58:28 +02:00
|
|
|
show_ip_mroute_summary_cmd,
|
2022-04-08 09:34:02 +02:00
|
|
|
"show ip mroute [vrf NAME] summary [json$json]",
|
2019-05-16 18:58:28 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2020-03-30 09:02:32 +02:00
|
|
|
"Summary of all mroutes\n"
|
|
|
|
JSON_STR)
|
2019-05-16 18:58:28 +02:00
|
|
|
{
|
2022-06-09 13:53:12 +02:00
|
|
|
return pim_show_mroute_summary_helper(vrf, vty, !!json);
|
2019-05-16 18:58:28 +02:00
|
|
|
}
|
|
|
|
|
2022-04-08 09:34:02 +02:00
|
|
|
DEFPY (show_ip_mroute_summary_vrf_all,
|
2019-05-16 18:58:28 +02:00
|
|
|
show_ip_mroute_summary_vrf_all_cmd,
|
2022-04-08 09:34:02 +02:00
|
|
|
"show ip mroute vrf all summary [json$json]",
|
2019-05-16 18:58:28 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MROUTE_STR
|
|
|
|
VRF_CMD_HELP_STR
|
2020-03-30 09:02:32 +02:00
|
|
|
"Summary of all mroutes\n"
|
|
|
|
JSON_STR)
|
2019-05-16 18:58:28 +02:00
|
|
|
{
|
2022-06-09 13:53:12 +02:00
|
|
|
return pim_show_mroute_summary_vrf_all_helper(vty, !!json);
|
2019-05-16 18:58:28 +02:00
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (show_ip_rib,
|
|
|
|
show_ip_rib_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip rib [vrf NAME] A.B.C.D",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
RIB_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
"Unicast address\n")
|
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2015-02-04 07:01:14 +01:00
|
|
|
struct in_addr addr;
|
|
|
|
const char *addr_str;
|
|
|
|
struct pim_nexthop nexthop;
|
|
|
|
int result;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-23 14:57:11 +02:00
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2016-10-28 00:16:47 +02:00
|
|
|
memset(&nexthop, 0, sizeof(nexthop));
|
2017-05-23 14:57:11 +02:00
|
|
|
argv_find(argv, argc, "A.B.C.D", &idx);
|
|
|
|
addr_str = argv[idx]->arg;
|
2015-02-04 07:01:14 +01:00
|
|
|
result = inet_pton(AF_INET, addr_str, &addr);
|
|
|
|
if (result <= 0) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Bad unicast address %s: errno=%d: %s\n", addr_str,
|
2017-06-21 05:10:57 +02:00
|
|
|
errno, safe_strerror(errno));
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
2024-10-30 22:21:50 +01:00
|
|
|
if (!pim_nht_lookup(vrf->info, &nexthop, addr, PIMADDR_ANY, false)) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Failure querying RIB nexthop for unicast address %s\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
addr_str);
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Address NextHop Interface Metric Preference\n");
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-04-06 16:33:41 +02:00
|
|
|
vty_out(vty, "%-15s %-15pPAs %-9s %6d %10d\n", addr_str,
|
|
|
|
&nexthop.mrib_nexthop_addr,
|
2015-02-04 07:01:14 +01:00
|
|
|
nexthop.interface ? nexthop.interface->name : "<ifname?>",
|
2017-06-21 05:10:57 +02:00
|
|
|
nexthop.mrib_route_metric, nexthop.mrib_metric_preference);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void show_ssmpingd(struct pim_instance *pim, struct vty *vty)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct ssmpingd_sock *ss;
|
|
|
|
time_t now;
|
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Source Socket Address Port Uptime Requests\n");
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2017-05-22 17:09:13 +02:00
|
|
|
if (!pim->ssmpingd_list)
|
2015-02-04 07:01:14 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
|
2017-05-22 17:09:13 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->ssmpingd_list, node, ss)) {
|
2016-10-20 16:09:30 +02:00
|
|
|
char source_str[INET_ADDRSTRLEN];
|
2015-02-04 07:01:14 +01:00
|
|
|
char ss_uptime[10];
|
|
|
|
struct sockaddr_in bind_addr;
|
|
|
|
socklen_t len = sizeof(bind_addr);
|
2016-10-20 16:09:30 +02:00
|
|
|
char bind_addr_str[INET_ADDRSTRLEN];
|
2015-02-04 07:01:14 +01:00
|
|
|
|
|
|
|
pim_inet4_dump("<src?>", ss->source_addr, source_str,
|
|
|
|
sizeof(source_str));
|
|
|
|
|
|
|
|
if (pim_socket_getsockname(
|
|
|
|
ss->sock_fd, (struct sockaddr *)&bind_addr, &len)) {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"%% Failure reading socket name for ssmpingd source %s on fd=%d\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
source_str, ss->sock_fd);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pim_inet4_dump("<addr?>", bind_addr.sin_addr, bind_addr_str,
|
|
|
|
sizeof(bind_addr_str));
|
|
|
|
pim_time_uptime(ss_uptime, sizeof(ss_uptime),
|
|
|
|
now - ss->creation);
|
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "%-15s %6d %-15s %5d %8s %8lld\n", source_str,
|
2015-02-04 07:01:14 +01:00
|
|
|
ss->sock_fd, bind_addr_str, ntohs(bind_addr.sin_port),
|
2017-06-21 05:10:57 +02:00
|
|
|
ss_uptime, (long long)ss->requests);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_ssmpingd,
|
|
|
|
show_ip_ssmpingd_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip ssmpingd [vrf NAME]",
|
2015-02-04 07:01:14 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
SHOW_SSMPINGD_STR
|
|
|
|
VRF_CMD_HELP_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
show_ssmpingd(vrf->info, vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_NOSH (router_pim,
|
|
|
|
router_pim_cmd,
|
|
|
|
"router pim [vrf NAME]",
|
|
|
|
"Enable a routing process\n"
|
|
|
|
"Start PIM configuration\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
|
|
|
{
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
const char *vrf_name;
|
|
|
|
|
|
|
|
if (vrf)
|
|
|
|
vrf_name = vrf;
|
|
|
|
else
|
|
|
|
vrf_name = VRF_DEFAULT_NAME;
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim",
|
|
|
|
vrf_name, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) != CMD_SUCCESS)
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_router_pim,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_router_pim_cmd,
|
|
|
|
"no router pim [vrf NAME]",
|
|
|
|
NO_STR
|
|
|
|
"Enable a routing process\n"
|
|
|
|
"Start PIM configuration\n"
|
|
|
|
VRF_CMD_HELP_STR)
|
|
|
|
{
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
const char *vrf_name;
|
|
|
|
|
|
|
|
if (vrf)
|
|
|
|
vrf_name = vrf;
|
|
|
|
else
|
|
|
|
vrf_name = VRF_DEFAULT_NAME;
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH, "frr-pim:pimd", "pim",
|
|
|
|
vrf_name, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_spt_switchover_infinity,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_spt_switchover_infinity_cmd,
|
|
|
|
"spt-switchover infinity-and-beyond",
|
2017-04-05 18:08:53 +02:00
|
|
|
"SPT-Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n")
|
|
|
|
{
|
2022-01-19 10:35:35 +01:00
|
|
|
return pim_process_spt_switchover_infinity_cmd(vty);
|
2017-04-07 16:16:23 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_spt_switchover_infinity,
|
|
|
|
ip_pim_spt_switchover_infinity_cmd,
|
|
|
|
"ip pim spt-switchover infinity-and-beyond",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"SPT-Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_spt_switchover_infinity_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-04-05 18:08:53 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (pim_spt_switchover_infinity_plist,
|
|
|
|
pim_spt_switchover_infinity_plist_cmd,
|
|
|
|
"spt-switchover infinity-and-beyond prefix-list PREFIXLIST4_NAME$plist",
|
2017-04-07 16:16:23 +02:00
|
|
|
"SPT-Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n"
|
|
|
|
"Prefix-List to control which groups to switch\n"
|
|
|
|
"Prefix-List name\n")
|
|
|
|
{
|
2022-01-19 10:35:35 +01:00
|
|
|
return pim_process_spt_switchover_prefixlist_cmd(vty, plist);
|
2017-04-05 18:08:53 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_spt_switchover_infinity_plist,
|
|
|
|
ip_pim_spt_switchover_infinity_plist_cmd,
|
|
|
|
"ip pim spt-switchover infinity-and-beyond prefix-list PREFIXLIST4_NAME$plist",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"SPT-Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n"
|
|
|
|
"Prefix-List to control which groups to switch\n"
|
|
|
|
"Prefix-List name\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_spt_switchover_prefixlist_cmd(vty, plist);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-04-05 18:08:53 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_spt_switchover_infinity,
|
|
|
|
no_pim_spt_switchover_infinity_cmd,
|
|
|
|
"no spt-switchover infinity-and-beyond",
|
2017-04-05 18:08:53 +02:00
|
|
|
NO_STR
|
|
|
|
"SPT_Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n")
|
|
|
|
{
|
2022-01-19 10:35:35 +01:00
|
|
|
return pim_process_no_spt_switchover_cmd(vty);
|
2017-04-07 16:16:23 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_spt_switchover_infinity,
|
|
|
|
no_ip_pim_spt_switchover_infinity_cmd,
|
|
|
|
"no ip pim spt-switchover infinity-and-beyond",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"SPT_Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_spt_switchover_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-04-05 18:08:53 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_spt_switchover_infinity_plist,
|
|
|
|
no_pim_spt_switchover_infinity_plist_cmd,
|
|
|
|
"no spt-switchover infinity-and-beyond prefix-list PREFIXLIST4_NAME",
|
2017-04-07 16:16:23 +02:00
|
|
|
NO_STR
|
|
|
|
"SPT_Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n"
|
|
|
|
"Prefix-List to control which groups to switch\n"
|
|
|
|
"Prefix-List name\n")
|
|
|
|
{
|
2022-01-19 10:35:35 +01:00
|
|
|
return pim_process_no_spt_switchover_cmd(vty);
|
2017-04-05 18:08:53 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_spt_switchover_infinity_plist,
|
|
|
|
no_ip_pim_spt_switchover_infinity_plist_cmd,
|
|
|
|
"no ip pim spt-switchover infinity-and-beyond prefix-list PREFIXLIST4_NAME",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"SPT_Switchover\n"
|
|
|
|
"Never switch to SPT Tree\n"
|
|
|
|
"Prefix-List to control which groups to switch\n"
|
|
|
|
"Prefix-List name\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_spt_switchover_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-04-05 18:08:53 +02:00
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_register_accept_list,
|
2020-02-20 18:14:03 +01:00
|
|
|
pim_register_accept_list_cmd,
|
2024-06-12 18:26:48 +02:00
|
|
|
"[no] register-accept-list PREFIXLIST4_NAME$word",
|
2020-02-20 18:14:03 +01:00
|
|
|
NO_STR
|
|
|
|
"Only accept registers from a specific source prefix list\n"
|
|
|
|
"Prefix-List name\n")
|
|
|
|
{
|
2020-10-23 16:39:22 +02:00
|
|
|
char reg_alist_xpath[XPATH_MAXLEN];
|
|
|
|
|
|
|
|
snprintf(reg_alist_xpath, sizeof(reg_alist_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./register-accept-list");
|
2020-02-20 18:14:03 +01:00
|
|
|
|
|
|
|
if (no)
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, reg_alist_xpath,
|
2020-11-20 22:06:34 +01:00
|
|
|
NB_OP_DESTROY, NULL);
|
2020-10-23 16:39:22 +02:00
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, reg_alist_xpath,
|
2020-11-20 22:06:34 +01:00
|
|
|
NB_OP_MODIFY, word);
|
2020-10-23 16:39:22 +02:00
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
2020-02-20 18:14:03 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_register_accept_list,
|
|
|
|
ip_pim_register_accept_list_cmd,
|
|
|
|
"[no] ip pim register-accept-list PREFIXLIST4_NAME$word",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Only accept registers from a specific source prefix list\n"
|
|
|
|
"Prefix-List name\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char reg_alist_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2020-02-20 18:14:03 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(reg_alist_xpath, sizeof(reg_alist_xpath),
|
|
|
|
"./register-accept-list");
|
|
|
|
|
|
|
|
if (no)
|
|
|
|
nb_cli_enqueue_change(vty, reg_alist_xpath, NB_OP_DESTROY, NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, reg_alist_xpath, NB_OP_MODIFY, word);
|
|
|
|
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_joinprune_time,
|
|
|
|
pim_joinprune_time_cmd,
|
|
|
|
"join-prune-interval (1-65535)$jpi",
|
2016-12-06 21:48:42 +01:00
|
|
|
"Join Prune Send Interval\n"
|
|
|
|
"Seconds\n")
|
|
|
|
{
|
2022-01-19 09:06:41 +01:00
|
|
|
return pim_process_join_prune_cmd(vty, jpi_str);
|
2016-12-06 21:48:42 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_joinprune_time,
|
|
|
|
ip_pim_joinprune_time_cmd,
|
|
|
|
"ip pim join-prune-interval (1-65535)$jpi",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Join Prune Send Interval\n"
|
|
|
|
"Seconds\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_join_prune_cmd(vty, jpi_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-12-06 21:48:42 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_joinprune_time,
|
|
|
|
no_pim_joinprune_time_cmd,
|
|
|
|
"no join-prune-interval [(1-65535)]",
|
2016-12-06 21:48:42 +01:00
|
|
|
NO_STR
|
|
|
|
"Join Prune Send Interval\n"
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2016-12-06 21:48:42 +01:00
|
|
|
{
|
2022-01-19 09:06:41 +01:00
|
|
|
return pim_process_no_join_prune_cmd(vty);
|
2016-12-06 21:48:42 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_joinprune_time,
|
|
|
|
no_ip_pim_joinprune_time_cmd,
|
|
|
|
"no ip pim join-prune-interval [(1-65535)]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Join Prune Send Interval\n"
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2016-12-06 21:48:42 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_join_prune_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_register_suppress,
|
|
|
|
pim_register_suppress_cmd,
|
|
|
|
"register-suppress-time (1-65535)$rst",
|
2016-11-17 00:11:49 +01:00
|
|
|
"Register Suppress Timer\n"
|
|
|
|
"Seconds\n")
|
2016-08-18 15:07:14 +02:00
|
|
|
{
|
2022-01-19 14:50:02 +01:00
|
|
|
return pim_process_register_suppress_cmd(vty, rst_str);
|
2016-08-18 15:07:14 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_register_suppress,
|
|
|
|
ip_pim_register_suppress_cmd,
|
|
|
|
"ip pim register-suppress-time (1-65535)$rst",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Register Suppress Timer\n"
|
|
|
|
"Seconds\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_register_suppress_cmd(vty, rst_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-18 15:07:14 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_register_suppress,
|
|
|
|
no_pim_register_suppress_cmd,
|
|
|
|
"no register-suppress-time [(1-65535)]",
|
2016-08-18 15:07:14 +02:00
|
|
|
NO_STR
|
2016-11-17 00:11:49 +01:00
|
|
|
"Register Suppress Timer\n"
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2016-08-18 15:07:14 +02:00
|
|
|
{
|
2022-01-19 14:50:02 +01:00
|
|
|
return pim_process_no_register_suppress_cmd(vty);
|
2016-08-18 15:07:14 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_register_suppress,
|
|
|
|
no_ip_pim_register_suppress_cmd,
|
|
|
|
"no ip pim register-suppress-time [(1-65535)]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Register Suppress Timer\n"
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_register_suppress_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-18 15:07:14 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (pim_rp_keep_alive,
|
|
|
|
pim_rp_keep_alive_cmd,
|
|
|
|
"rp keep-alive-timer (1-65535)$kat",
|
2022-04-19 08:13:17 +02:00
|
|
|
"Rendezvous Point\n"
|
2017-08-02 14:15:45 +02:00
|
|
|
"Keep alive Timer\n"
|
|
|
|
"Seconds\n")
|
|
|
|
{
|
2022-01-19 14:36:05 +01:00
|
|
|
return pim_process_rp_kat_cmd(vty, kat_str);
|
2017-08-02 14:15:45 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_rp_keep_alive,
|
|
|
|
ip_pim_rp_keep_alive_cmd,
|
|
|
|
"ip pim rp keep-alive-timer (1-65535)$kat",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"Keep alive Timer\n"
|
|
|
|
"Seconds\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_rp_kat_cmd(vty, kat_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-08-02 14:15:45 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_rp_keep_alive,
|
|
|
|
no_pim_rp_keep_alive_cmd,
|
|
|
|
"no rp keep-alive-timer [(1-65535)]",
|
2017-08-02 14:15:45 +02:00
|
|
|
NO_STR
|
2022-04-19 08:13:17 +02:00
|
|
|
"Rendezvous Point\n"
|
2017-08-02 14:15:45 +02:00
|
|
|
"Keep alive Timer\n"
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2017-08-02 14:15:45 +02:00
|
|
|
{
|
2022-01-19 14:36:05 +01:00
|
|
|
return pim_process_no_rp_kat_cmd(vty);
|
2017-08-02 14:15:45 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_rp_keep_alive,
|
|
|
|
no_ip_pim_rp_keep_alive_cmd,
|
|
|
|
"no ip pim rp keep-alive-timer [(1-65535)]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"Keep alive Timer\n"
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2017-08-02 14:15:45 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_rp_kat_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_keep_alive,
|
|
|
|
pim_keep_alive_cmd,
|
|
|
|
"keep-alive-timer (1-65535)$kat",
|
2016-08-18 16:00:02 +02:00
|
|
|
"Keep alive Timer\n"
|
|
|
|
"Seconds\n")
|
|
|
|
{
|
2022-01-19 14:27:24 +01:00
|
|
|
return pim_process_keepalivetimer_cmd(vty, kat_str);
|
2016-08-18 16:00:02 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_keep_alive,
|
|
|
|
ip_pim_keep_alive_cmd,
|
|
|
|
"ip pim keep-alive-timer (1-65535)$kat",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Keep alive Timer\n"
|
|
|
|
"Seconds\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
2016-08-18 16:00:02 +02:00
|
|
|
{
|
2024-06-12 18:26:48 +02:00
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2016-08-18 16:00:02 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_keepalivetimer_cmd(vty, kat_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (no_pim_keep_alive,
|
|
|
|
no_pim_keep_alive_cmd,
|
|
|
|
"no keep-alive-timer [(1-65535)]",
|
|
|
|
NO_STR
|
|
|
|
"Keep alive Timer\n"
|
|
|
|
IGNORED_IN_NO_STR)
|
|
|
|
{
|
|
|
|
return pim_process_no_keepalivetimer_cmd(vty);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_keep_alive,
|
|
|
|
no_ip_pim_keep_alive_cmd,
|
|
|
|
"no ip pim keep-alive-timer [(1-65535)]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Keep alive Timer\n"
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_keepalivetimer_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_packets,
|
|
|
|
pim_packets_cmd,
|
|
|
|
"packets (1-255)",
|
|
|
|
"packets to process at one time per fd\n"
|
|
|
|
"Number of packets\n")
|
2016-11-18 18:50:46 +01:00
|
|
|
{
|
2022-01-19 14:21:20 +01:00
|
|
|
return pim_process_pim_packet_cmd(vty, packets_str);
|
2016-11-18 18:50:46 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_packets,
|
|
|
|
ip_pim_packets_cmd,
|
|
|
|
"ip pim packets (1-255)",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"packets to process at one time per fd\n"
|
|
|
|
"Number of packets\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_pim_packet_cmd(vty, packets_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
2016-11-18 18:50:46 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (no_pim_packets,
|
|
|
|
no_pim_packets_cmd,
|
|
|
|
"no packets [(1-255)]",
|
2016-11-18 18:50:46 +01:00
|
|
|
NO_STR
|
2017-01-25 15:13:46 +01:00
|
|
|
"packets to process at one time per fd\n"
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2016-11-18 18:50:46 +01:00
|
|
|
{
|
2022-01-19 14:21:20 +01:00
|
|
|
return pim_process_no_pim_packet_cmd(vty);
|
2016-11-18 18:50:46 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_packets,
|
|
|
|
no_ip_pim_packets_cmd,
|
|
|
|
"no ip pim packets [(1-255)]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"packets to process at one time per fd\n"
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_pim_packet_cmd(vty);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-18 18:50:46 +01:00
|
|
|
|
2022-04-08 08:01:34 +02:00
|
|
|
DEFPY (ip_igmp_group_watermark,
|
|
|
|
ip_igmp_group_watermark_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip igmp watermark-warn (1-65535)$limit",
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
"Configure group limit for watermark warning\n"
|
|
|
|
"Group count to generate watermark warning\n")
|
|
|
|
{
|
2022-04-28 11:06:20 +02:00
|
|
|
PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
|
2022-06-30 10:17:26 +02:00
|
|
|
pim->gm_watermark_limit = limit;
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-04-08 08:01:34 +02:00
|
|
|
DEFPY (no_ip_igmp_group_watermark,
|
|
|
|
no_ip_igmp_group_watermark_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp watermark-warn [(1-65535)$limit]",
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
"Unconfigure group limit for watermark warning\n"
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
{
|
2022-04-28 11:06:20 +02:00
|
|
|
PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
|
2022-06-30 10:17:26 +02:00
|
|
|
pim->gm_watermark_limit = 0;
|
pimd: Implement watermark warning for igmp group count and add igmp group count
This CLI will allow user to configure a igmp group limit which will generate
a watermark warning when reached.
Though watermark may not make sense without setting a limit, this
implementation shall serve as a base to implementing limit in future and helps
tracking a particular scale currently.
Testing:
=======
ip igmp watermark-warn <10-60000>
on reaching the configured number of group, pim will issue warning
2019/09/18 18:30:55 PIM: SCALE ALERT: igmp group count reached watermak limit: 210(vrf: default)
Also added group count and watermark limit configured on cli - show ip igmp groups [json]
<snip>
Sw3# sh ip igmp groups json
{
"Total Groups":221, <=====
"Watermark limit":210, <=========
"ens224":{
"name":"ens224",
"state":"up",
"address":"40.0.0.1",
"index":6,
"flagMulticast":true,
"flagBroadcast":true,
"lanDelayEnabled":true,
"groups":[
{
"source":"40.0.0.1",
"group":"225.1.1.122",
"timer":"00:03:56",
"sourcesCount":1,
"version":2,
"uptime":"00:00:24"
<\snip>
<snip>
Sw3(config)# do sh ip igmp group
Total IGMP groups: 221
Watermark warn limit(Set) : 210
Interface Address Group Mode Timer Srcs V Uptime
ens224 40.0.0.1 225.1.1.122 ---- 00:04:06 1 2 00:13:22
ens224 40.0.0.1 225.1.1.144 ---- 00:04:02 1 2 00:13:22
ens224 40.0.0.1 225.1.1.57 ---- 00:04:01 1 2 00:13:22
ens224 40.0.0.1 225.1.1.210 ---- 00:04:06 1 2 00:13:22
<\snip>
Signed-off-by: Saravanan K <saravanank@vmware.com>
2020-04-01 04:34:13 +02:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_v6_secondary,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_v6_secondary_cmd,
|
|
|
|
"send-v6-secondary",
|
2017-03-30 05:23:25 +02:00
|
|
|
"Send v6 secondary addresses\n")
|
|
|
|
{
|
2020-10-23 15:45:33 +02:00
|
|
|
char send_v6_secondary_xpath[XPATH_MAXLEN];
|
2017-03-30 05:23:25 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
|
|
|
|
"./send-v6-secondary");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
|
|
|
|
"true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_v6_secondary,
|
|
|
|
ip_pim_v6_secondary_cmd,
|
|
|
|
"ip pim send-v6-secondary",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Send v6 secondary addresses\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char send_v6_secondary_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:45:33 +02:00
|
|
|
|
|
|
|
snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./send-v6-secondary");
|
2020-10-23 15:45:33 +02:00
|
|
|
nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:45:33 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2017-03-30 05:23:25 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_v6_secondary,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_v6_secondary_cmd,
|
|
|
|
"no send-v6-secondary",
|
2017-03-30 05:23:25 +02:00
|
|
|
NO_STR
|
|
|
|
"Send v6 secondary addresses\n")
|
|
|
|
{
|
2020-10-23 15:45:33 +02:00
|
|
|
char send_v6_secondary_xpath[XPATH_MAXLEN];
|
2017-03-30 05:23:25 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
|
|
|
|
"./send-v6-secondary");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
|
|
|
|
"false");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_v6_secondary,
|
|
|
|
no_ip_pim_v6_secondary_cmd,
|
|
|
|
"no ip pim send-v6-secondary",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Send v6 secondary addresses\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char send_v6_secondary_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:45:33 +02:00
|
|
|
|
|
|
|
snprintf(send_v6_secondary_xpath, sizeof(send_v6_secondary_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./send-v6-secondary");
|
2020-10-23 15:45:33 +02:00
|
|
|
nb_cli_enqueue_change(vty, send_v6_secondary_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"false");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:45:33 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2017-03-30 05:23:25 +02:00
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (pim_rp,
|
|
|
|
pim_rp_cmd,
|
|
|
|
"rp A.B.C.D$rp [A.B.C.D/M]$gp",
|
2022-04-19 08:13:17 +02:00
|
|
|
"Rendezvous Point\n"
|
2017-01-25 15:13:46 +01:00
|
|
|
"ip address of RP\n"
|
|
|
|
"Group Address range to cover\n")
|
2015-09-08 17:10:19 +02:00
|
|
|
{
|
2022-03-07 07:02:10 +01:00
|
|
|
const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
|
2020-10-23 16:52:41 +02:00
|
|
|
|
2022-03-07 07:02:10 +01:00
|
|
|
return pim_process_rp_cmd(vty, rp_str, group_str);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_rp,
|
|
|
|
ip_pim_rp_cmd,
|
|
|
|
"ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"ip address of RP\n"
|
|
|
|
"Group Address range to cover\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2016-09-13 21:41:33 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_rp_cmd(vty, rp_str, group_str);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_rp_prefix_list,
|
|
|
|
pim_rp_prefix_list_cmd,
|
|
|
|
"rp A.B.C.D$rp prefix-list PREFIXLIST4_NAME$plist",
|
2022-03-07 08:18:47 +01:00
|
|
|
"Rendezvous Point\n"
|
2016-09-13 21:41:33 +02:00
|
|
|
"ip address of RP\n"
|
|
|
|
"group prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n")
|
|
|
|
{
|
2022-03-07 08:18:47 +01:00
|
|
|
return pim_process_rp_plist_cmd(vty, rp_str, plist);
|
2015-09-08 17:10:19 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_rp_prefix_list,
|
|
|
|
ip_pim_rp_prefix_list_cmd,
|
|
|
|
"ip pim rp A.B.C.D$rp prefix-list PREFIXLIST4_NAME$plist",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"ip address of RP\n"
|
|
|
|
"group prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_rp_plist_cmd(vty, rp_str, plist);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-09-08 17:10:19 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_rp,
|
|
|
|
no_pim_rp_cmd,
|
|
|
|
"no rp A.B.C.D$rp [A.B.C.D/M]$gp",
|
2015-09-08 17:10:19 +02:00
|
|
|
NO_STR
|
2022-04-19 08:13:17 +02:00
|
|
|
"Rendezvous Point\n"
|
2017-01-25 15:13:46 +01:00
|
|
|
"ip address of RP\n"
|
|
|
|
"Group Address range to cover\n")
|
2015-09-08 17:10:19 +02:00
|
|
|
{
|
2022-03-07 07:02:10 +01:00
|
|
|
const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
|
2020-10-23 16:52:41 +02:00
|
|
|
|
2022-03-07 07:02:10 +01:00
|
|
|
return pim_process_no_rp_cmd(vty, rp_str, group_str);
|
2016-09-13 21:41:33 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_rp,
|
|
|
|
no_ip_pim_rp_cmd,
|
|
|
|
"no ip pim rp A.B.C.D$rp [A.B.C.D/M]$gp",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"ip address of RP\n"
|
|
|
|
"Group Address range to cover\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
const char *group_str = (gp_str) ? gp_str : "224.0.0.0/4";
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_rp_cmd(vty, rp_str, group_str);
|
2016-09-13 21:41:33 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (no_pim_rp_prefix_list,
|
|
|
|
no_pim_rp_prefix_list_cmd,
|
|
|
|
"no rp A.B.C.D$rp prefix-list PREFIXLIST4_NAME$plist",
|
2016-09-13 21:41:33 +02:00
|
|
|
NO_STR
|
2022-03-07 08:18:47 +01:00
|
|
|
"Rendezvous Point\n"
|
2016-09-13 21:41:33 +02:00
|
|
|
"ip address of RP\n"
|
|
|
|
"group prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n")
|
|
|
|
{
|
2022-03-07 08:18:47 +01:00
|
|
|
return pim_process_no_rp_plist_cmd(vty, rp_str, plist);
|
2017-03-17 19:51:13 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_rp_prefix_list,
|
|
|
|
no_ip_pim_rp_prefix_list_cmd,
|
|
|
|
"no ip pim rp A.B.C.D$rp prefix-list PREFIXLIST4_NAME$plist",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Rendezvous Point\n"
|
|
|
|
"ip address of RP\n"
|
|
|
|
"group prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2017-03-17 19:51:13 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pim_process_no_rp_plist_cmd(vty, rp_str, plist);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-09-17 04:10:03 +02:00
|
|
|
DEFPY (pim_autorp_discovery,
|
|
|
|
pim_autorp_discovery_cmd,
|
|
|
|
"[no] autorp discovery",
|
|
|
|
NO_STR
|
|
|
|
"AutoRP\n"
|
|
|
|
"Enable AutoRP discovery\n")
|
|
|
|
{
|
|
|
|
if (no)
|
|
|
|
return pim_process_no_autorp_cmd(vty);
|
|
|
|
else
|
|
|
|
return pim_process_autorp_cmd(vty);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_autorp_announce_rp,
|
|
|
|
pim_autorp_announce_rp_cmd,
|
|
|
|
"[no] autorp announce A.B.C.D$rpaddr ![A.B.C.D/M$grp|group-list PREFIX_LIST$plist]",
|
|
|
|
NO_STR
|
|
|
|
"AutoRP\n"
|
|
|
|
"AutoRP Candidate RP announcement\n"
|
|
|
|
"AutoRP Candidate RP address\n"
|
|
|
|
"Group prefix\n"
|
|
|
|
"Prefix list\n"
|
|
|
|
"List name\n")
|
|
|
|
{
|
2024-11-01 20:11:14 +01:00
|
|
|
if (grp_str && (!pim_addr_is_multicast(grp->prefix) || grp->prefixlen < 4)) {
|
|
|
|
vty_out(vty, "%% group prefix %pFX is not a valid multicast range\n", grp);
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, grp_str, plist);
|
2024-09-17 04:10:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_autorp_announce_scope_int,
|
|
|
|
pim_autorp_announce_scope_int_cmd,
|
2024-11-01 20:11:14 +01:00
|
|
|
"[no] autorp announce {scope (1-255) | interval (1-65535) | holdtime (0-65535)}",
|
2024-09-17 04:10:03 +02:00
|
|
|
NO_STR
|
|
|
|
"AutoRP\n"
|
|
|
|
"AutoRP Candidate RP announcement\n"
|
|
|
|
"Packet scope (TTL)\n"
|
|
|
|
"TTL value\n"
|
|
|
|
"Announcement interval\n"
|
|
|
|
"Time in seconds\n"
|
|
|
|
"Announcement holdtime\n"
|
|
|
|
"Time in seconds\n")
|
|
|
|
{
|
2024-11-01 20:11:14 +01:00
|
|
|
return pim_process_autorp_announce_scope_int_cmd(vty, no, scope_str, interval_str,
|
2024-09-17 04:10:03 +02:00
|
|
|
holdtime_str);
|
|
|
|
}
|
|
|
|
|
2024-11-01 20:11:14 +01:00
|
|
|
DEFPY (pim_autorp_send_rp_discovery,
|
|
|
|
pim_autorp_send_rp_discovery_cmd,
|
|
|
|
"[no] autorp send-rp-discovery [source <address A.B.C.D | interface IFNAME | loopback$loopback | any$any>]",
|
|
|
|
NO_STR
|
|
|
|
"AutoRP\n"
|
|
|
|
"Enable AutoRP mapping agent\n"
|
|
|
|
"Specify AutoRP discovery source\n"
|
|
|
|
"Local address\n"
|
|
|
|
IP_ADDR_STR
|
|
|
|
"Local Interface (uses highest address)\n"
|
|
|
|
IFNAME_STR
|
|
|
|
"Highest loopback address (default)\n"
|
|
|
|
"Highest address of any interface\n")
|
|
|
|
{
|
|
|
|
return pim_process_autorp_send_rp_discovery_cmd(vty, no, any, loopback, ifname, address_str);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_autorp_send_rp_discovery_scope_int,
|
|
|
|
pim_autorp_send_rp_discovery_scope_int_cmd,
|
|
|
|
"[no] autorp send-rp-discovery {scope (0-255) | interval (1-65535) | holdtime (0-65535)}",
|
|
|
|
NO_STR
|
|
|
|
"AutoRP\n"
|
|
|
|
"Enable AutoRP mapping agent\n"
|
|
|
|
"Packet scope (TTL)\n"
|
|
|
|
"TTL value\n"
|
|
|
|
"Discovery TX interval\n"
|
|
|
|
"Time in seconds\n"
|
|
|
|
"Announcement holdtime\n"
|
|
|
|
"Time in seconds\n")
|
|
|
|
{
|
|
|
|
return pim_process_autorp_send_rp_discovery_scope_int_cmd(vty, no, scope_str, interval_str,
|
|
|
|
holdtime_str);
|
|
|
|
}
|
|
|
|
|
2024-07-26 16:57:44 +02:00
|
|
|
DEFPY (pim_bsr_candidate_bsr,
|
|
|
|
pim_bsr_candidate_bsr_cmd,
|
|
|
|
"[no] bsr candidate-bsr [{priority (0-255)|source <address A.B.C.D|interface IFNAME|loopback$loopback|any$any>}]",
|
|
|
|
NO_STR
|
|
|
|
BSR_STR
|
|
|
|
"Make this router a Candidate BSR\n"
|
|
|
|
"BSR Priority (higher wins)\n"
|
|
|
|
"BSR Priority (higher wins)\n"
|
|
|
|
"Specify IP address for BSR operation\n"
|
|
|
|
"Local address to use\n"
|
|
|
|
"Local address to use\n"
|
|
|
|
"Interface to pick address from\n"
|
|
|
|
"Interface to pick address from\n"
|
|
|
|
"Pick highest loopback address (default)\n"
|
|
|
|
"Pick highest address from any interface\n")
|
|
|
|
{
|
|
|
|
return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_BSR_XPATH, no,
|
|
|
|
false, any, ifname, address_str,
|
|
|
|
priority_str, NULL);
|
|
|
|
}
|
|
|
|
|
2024-07-23 07:45:02 +02:00
|
|
|
DEFPY (pim_bsr_candidate_rp,
|
|
|
|
pim_bsr_candidate_rp_cmd,
|
|
|
|
"[no] bsr candidate-rp [{priority (0-255)|interval (1-4294967295)|source <address A.B.C.D|interface IFNAME|loopback$loopback|any$any>}]",
|
|
|
|
NO_STR
|
|
|
|
BSR_STR
|
|
|
|
"Make this router a Candidate RP\n"
|
|
|
|
"RP Priority (lower wins)\n"
|
|
|
|
"RP Priority (lower wins)\n"
|
|
|
|
"Advertisement interval (seconds)\n"
|
|
|
|
"Advertisement interval (seconds)\n"
|
|
|
|
"Specify IP address for RP operation\n"
|
|
|
|
"Local address to use\n"
|
|
|
|
"Local address to use\n"
|
|
|
|
"Interface to pick address from\n"
|
|
|
|
"Interface to pick address from\n"
|
|
|
|
"Pick highest loopback address (default)\n"
|
|
|
|
"Pick highest address from any interface\n")
|
|
|
|
{
|
|
|
|
return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_RP_XPATH, no,
|
|
|
|
true, any, ifname, address_str,
|
|
|
|
priority_str, interval_str);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (pim_bsr_candidate_rp_group,
|
|
|
|
pim_bsr_candidate_rp_group_cmd,
|
|
|
|
"[no] bsr candidate-rp group A.B.C.D/M",
|
|
|
|
NO_STR
|
|
|
|
BSR_STR
|
|
|
|
"Make this router a Candidate RP\n"
|
2024-07-23 07:57:08 +02:00
|
|
|
"Configure groups to become candidate RP for (At least one group must be configured)\n"
|
2024-07-23 07:45:02 +02:00
|
|
|
"Multicast group prefix\n")
|
|
|
|
{
|
|
|
|
return pim_process_bsr_crp_grp_cmd(vty, group_str, no);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_ssm_prefix_list,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_ssm_prefix_list_cmd,
|
|
|
|
"ssm prefix-list PREFIXLIST4_NAME$plist",
|
2017-03-17 19:51:13 +01:00
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n")
|
|
|
|
{
|
2020-10-23 15:58:51 +02:00
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN];
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "./ssm-prefix-list");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_MODIFY, plist);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_ssm_prefix_list,
|
|
|
|
ip_pim_ssm_prefix_list_cmd,
|
|
|
|
"ip pim ssm prefix-list PREFIXLIST4_NAME$plist",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:58:51 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "./ssm-prefix-list");
|
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_MODIFY, plist);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:58:51 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
2020-10-23 15:58:51 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
return ret;
|
2017-03-17 19:51:13 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_ssm_prefix_list,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_ssm_prefix_list_cmd,
|
|
|
|
"no ssm prefix-list",
|
2017-03-17 19:51:13 +01:00
|
|
|
NO_STR
|
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n")
|
|
|
|
{
|
2020-10-23 15:58:51 +02:00
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN];
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "./ssm-prefix-list");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_ssm_prefix_list,
|
|
|
|
no_ip_pim_ssm_prefix_list_cmd,
|
|
|
|
"no ip pim ssm prefix-list",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:58:51 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "./ssm-prefix-list");
|
2020-10-23 15:58:51 +02:00
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_DESTROY, NULL);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:58:51 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2017-03-17 19:51:13 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_ssm_prefix_list_name,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_ssm_prefix_list_name_cmd,
|
|
|
|
"no ssm prefix-list PREFIXLIST4_NAME$plist",
|
2017-03-17 19:51:13 +01:00
|
|
|
NO_STR
|
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n")
|
|
|
|
{
|
2020-10-23 15:58:51 +02:00
|
|
|
const struct lyd_node *ssm_plist_dnode;
|
2024-07-22 18:19:50 +02:00
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN + 16];
|
2020-10-23 15:58:51 +02:00
|
|
|
const char *ssm_plist_name;
|
2017-03-17 19:51:13 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "%s/ssm-prefix-list",
|
|
|
|
VTY_CURR_XPATH);
|
2020-10-23 15:58:51 +02:00
|
|
|
ssm_plist_dnode = yang_dnode_get(vty->candidate_config->dnode,
|
2020-11-20 22:06:34 +01:00
|
|
|
ssm_plist_xpath);
|
2020-10-23 15:58:51 +02:00
|
|
|
|
|
|
|
if (!ssm_plist_dnode) {
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, "%% pim ssm prefix-list %s doesn't exist\n", plist);
|
2020-10-23 15:58:51 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssm_plist_name = yang_dnode_get_string(ssm_plist_dnode, ".");
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (ssm_plist_name && !strcmp(ssm_plist_name, plist)) {
|
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath, NB_OP_DESTROY, NULL);
|
2020-10-23 15:58:51 +02:00
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
2017-03-17 19:51:13 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, "%% pim ssm prefix-list %s doesn't exist\n", plist);
|
2017-03-17 19:51:13 +01:00
|
|
|
|
2017-07-13 21:56:08 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2017-03-17 19:51:13 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_ssm_prefix_list_name,
|
|
|
|
no_ip_pim_ssm_prefix_list_name_cmd,
|
|
|
|
"no ip pim ssm prefix-list PREFIXLIST4_NAME$plist",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Source Specific Multicast\n"
|
|
|
|
"group range prefix-list filter\n"
|
|
|
|
"Name of a prefix-list\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
const struct lyd_node *ssm_plist_dnode;
|
2024-07-22 18:19:50 +02:00
|
|
|
char ssm_plist_xpath[XPATH_MAXLEN + 16];
|
2024-06-12 18:26:48 +02:00
|
|
|
const char *ssm_plist_name;
|
|
|
|
int ret = CMD_WARNING_CONFIG_FAILED;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(ssm_plist_xpath, sizeof(ssm_plist_xpath), "%s/ssm-prefix-list",
|
|
|
|
VTY_CURR_XPATH);
|
|
|
|
ssm_plist_dnode = yang_dnode_get(vty->candidate_config->dnode,
|
|
|
|
ssm_plist_xpath);
|
|
|
|
if (ssm_plist_dnode) {
|
|
|
|
ssm_plist_name = yang_dnode_get_string(ssm_plist_dnode, ".");
|
|
|
|
if (ssm_plist_name && !strcmp(ssm_plist_name, plist)) {
|
|
|
|
nb_cli_enqueue_change(vty, ssm_plist_xpath,
|
|
|
|
NB_OP_DESTROY, NULL);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% pim ssm prefix-list %s doesn't exist\n",
|
|
|
|
plist);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% pim ssm prefix-list %s doesn't exist\n", plist);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-03-17 19:51:13 +01:00
|
|
|
|
|
|
|
DEFUN (show_ip_pim_ssm_range,
|
|
|
|
show_ip_pim_ssm_range_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] group-type [json]",
|
2017-03-17 19:51:13 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-03-17 19:51:13 +01:00
|
|
|
"PIM group type\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-03-17 19:51:13 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_pim_ssm_show_group_range(vrf->info, vty, uj);
|
2017-03-17 19:51:13 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_pim_ssm_show_group_type(struct pim_instance *pim,
|
2018-09-04 19:39:04 +02:00
|
|
|
struct vty *vty, bool uj,
|
2017-03-17 19:51:13 +01:00
|
|
|
const char *group)
|
|
|
|
{
|
|
|
|
struct in_addr group_addr;
|
|
|
|
const char *type_str;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = inet_pton(AF_INET, group, &group_addr);
|
|
|
|
if (result <= 0)
|
|
|
|
type_str = "invalid";
|
|
|
|
else {
|
|
|
|
if (pim_is_group_224_4(group_addr))
|
2017-05-23 14:57:11 +02:00
|
|
|
type_str =
|
|
|
|
pim_is_grp_ssm(pim, group_addr) ? "SSM" : "ASM";
|
2017-03-17 19:51:13 +01:00
|
|
|
else
|
|
|
|
type_str = "not-multicast";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object *json;
|
|
|
|
json = json_object_new_object();
|
|
|
|
json_object_string_add(json, "groupType", type_str);
|
2022-01-31 20:20:41 +01:00
|
|
|
vty_json(vty, json);
|
2017-03-17 19:51:13 +01:00
|
|
|
} else
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Group type : %s\n", type_str);
|
2017-03-17 19:51:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_pim_group_type,
|
|
|
|
show_ip_pim_group_type_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip pim [vrf NAME] group-type A.B.C.D [json]",
|
2017-03-17 19:51:13 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-05-23 14:57:11 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2017-03-17 19:51:13 +01:00
|
|
|
"multicast group type\n"
|
|
|
|
"group address\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-03-17 19:51:13 +01:00
|
|
|
{
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 14:57:11 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
argv_find(argv, argc, "A.B.C.D", &idx);
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_pim_ssm_show_group_type(vrf->info, vty, uj, argv[idx]->arg);
|
2017-03-17 19:51:13 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-07-12 13:16:17 +02:00
|
|
|
DEFPY (show_ip_pim_bsr,
|
2019-05-03 10:19:48 +02:00
|
|
|
show_ip_pim_bsr_cmd,
|
2022-07-12 13:16:17 +02:00
|
|
|
"show ip pim bsr [vrf NAME] [json$json]",
|
2019-05-03 10:19:48 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"boot-strap router information\n"
|
2022-07-12 13:16:17 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2019-05-03 10:19:48 +02:00
|
|
|
JSON_STR)
|
|
|
|
{
|
2022-07-12 13:16:17 +02:00
|
|
|
return pim_show_bsr_helper(vrf, vty, !!json);
|
2019-05-03 10:19:48 +02:00
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (pim_ssmpingd,
|
|
|
|
pim_ssmpingd_cmd,
|
|
|
|
"ssmpingd [A.B.C.D]$src",
|
2015-02-04 07:01:14 +01:00
|
|
|
CONF_SSMPINGD_STR
|
|
|
|
"Source address\n")
|
|
|
|
{
|
2024-06-12 18:26:48 +02:00
|
|
|
if (src_str)
|
|
|
|
return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str);
|
|
|
|
else
|
|
|
|
return pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, "0.0.0.0");
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_ssmpingd,
|
|
|
|
ip_ssmpingd_cmd,
|
|
|
|
"ip ssmpingd [A.B.C.D]$src",
|
|
|
|
IP_STR
|
|
|
|
CONF_SSMPINGD_STR
|
|
|
|
"Source address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src_str)
|
|
|
|
ret = pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, src_str);
|
|
|
|
else
|
|
|
|
ret = pim_process_ssmpingd_cmd(vty, NB_OP_CREATE, "0.0.0.0");
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY (no_pim_ssmpingd,
|
|
|
|
no_pim_ssmpingd_cmd,
|
|
|
|
"no ssmpingd [A.B.C.D]$src",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
CONF_SSMPINGD_STR
|
|
|
|
"Source address\n")
|
|
|
|
{
|
2024-06-12 18:26:48 +02:00
|
|
|
if (src_str)
|
|
|
|
return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str);
|
|
|
|
else
|
|
|
|
return pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, "0.0.0.0");
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_ssmpingd,
|
|
|
|
no_ip_ssmpingd_cmd,
|
|
|
|
"no ip ssmpingd [A.B.C.D]$src",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CONF_SSMPINGD_STR
|
|
|
|
"Source address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src_str)
|
|
|
|
ret = pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, src_str);
|
|
|
|
else
|
|
|
|
ret = pim_process_ssmpingd_cmd(vty, NB_OP_DESTROY, "0.0.0.0");
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
return ret;
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_ecmp,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_ecmp_cmd,
|
|
|
|
"ecmp",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
"Enable PIM ECMP \n")
|
|
|
|
{
|
2020-10-23 15:31:39 +02:00
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_ecmp,
|
|
|
|
ip_pim_ecmp_cmd,
|
|
|
|
"ip pim ecmp",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Enable PIM ECMP \n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
2020-10-23 15:31:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_ecmp,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_ecmp_cmd,
|
|
|
|
"no ecmp",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
NO_STR
|
|
|
|
"Disable PIM ECMP \n")
|
|
|
|
{
|
2020-10-23 15:31:39 +02:00
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "false");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_ecmp,
|
|
|
|
no_ip_pim_ecmp_cmd,
|
|
|
|
"no ip pim ecmp",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Disable PIM ECMP \n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
2020-10-23 15:31:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "false");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (pim_ecmp_rebalance,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_ecmp_rebalance_cmd,
|
|
|
|
"ecmp rebalance",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
"Enable PIM ECMP \n"
|
|
|
|
"Enable PIM ECMP Rebalance\n")
|
|
|
|
{
|
2020-10-23 15:31:39 +02:00
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
|
|
|
char ecmp_rebalance_xpath[XPATH_MAXLEN];
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
|
|
|
snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
|
|
|
|
"./ecmp-rebalance");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_ecmp_rebalance,
|
|
|
|
ip_pim_ecmp_rebalance_cmd,
|
|
|
|
"ip pim ecmp rebalance",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Enable PIM ECMP \n"
|
|
|
|
"Enable PIM ECMP Rebalance\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ecmp_xpath[XPATH_MAXLEN];
|
|
|
|
char ecmp_rebalance_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_xpath, sizeof(ecmp_xpath), "./ecmp");
|
2020-10-23 15:31:39 +02:00
|
|
|
snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./ecmp-rebalance");
|
2020-10-23 15:31:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, ecmp_xpath, NB_OP_MODIFY, "true");
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "true");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_ecmp_rebalance,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_ecmp_rebalance_cmd,
|
|
|
|
"no ecmp rebalance",
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
NO_STR
|
|
|
|
"Disable PIM ECMP \n"
|
|
|
|
"Disable PIM ECMP Rebalance\n")
|
|
|
|
{
|
2020-10-23 15:31:39 +02:00
|
|
|
char ecmp_rebalance_xpath[XPATH_MAXLEN];
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
|
|
|
|
"./ecmp-rebalance");
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "false");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_ecmp_rebalance,
|
|
|
|
no_ip_pim_ecmp_rebalance_cmd,
|
|
|
|
"no ip pim ecmp rebalance",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Disable PIM ECMP \n"
|
|
|
|
"Disable PIM ECMP Rebalance\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char ecmp_rebalance_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2020-10-23 15:31:39 +02:00
|
|
|
snprintf(ecmp_rebalance_xpath, sizeof(ecmp_rebalance_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./ecmp-rebalance");
|
2020-10-23 15:31:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, ecmp_rebalance_xpath, NB_OP_MODIFY, "false");
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-10-23 15:31:39 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_ip_igmp,
|
2017-03-02 16:12:52 +01:00
|
|
|
interface_ip_igmp_cmd,
|
|
|
|
"ip igmp",
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR)
|
|
|
|
{
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY, "true");
|
2017-03-02 16:12:52 +01:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2017-03-02 16:12:52 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_no_ip_igmp,
|
2015-02-04 07:01:14 +01:00
|
|
|
interface_no_ip_igmp_cmd,
|
|
|
|
"no ip igmp",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR)
|
|
|
|
{
|
2020-10-23 13:46:39 +02:00
|
|
|
const struct lyd_node *pim_enable_dnode;
|
2022-01-20 23:24:30 +01:00
|
|
|
char pim_if_xpath[XPATH_MAXLEN];
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-01-20 23:24:30 +01:00
|
|
|
int printed =
|
|
|
|
snprintf(pim_if_xpath, sizeof(pim_if_xpath),
|
|
|
|
"%s/frr-pim:pim/address-family[address-family='%s']",
|
|
|
|
VTY_CURR_XPATH, "frr-routing:ipv4");
|
|
|
|
|
|
|
|
if (printed >= (int)(sizeof(pim_if_xpath))) {
|
|
|
|
vty_out(vty, "Xpath too long (%d > %u)", printed + 1,
|
|
|
|
XPATH_MAXLEN);
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
pim_enable_dnode = yang_dnode_getf(vty->candidate_config->dnode,
|
2021-12-15 14:24:51 +01:00
|
|
|
FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2020-10-23 13:46:39 +02:00
|
|
|
if (!pim_enable_dnode) {
|
|
|
|
nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
|
|
|
|
} else {
|
|
|
|
if (!yang_dnode_get_bool(pim_enable_dnode, ".")) {
|
|
|
|
nb_cli_enqueue_change(vty, pim_if_xpath, NB_OP_DESTROY,
|
2020-11-20 21:45:32 +01:00
|
|
|
NULL);
|
2020-10-23 13:46:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
|
|
|
|
} else
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable",
|
2020-11-20 21:45:32 +01:00
|
|
|
NB_OP_MODIFY, "false");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2024-06-26 19:41:45 +02:00
|
|
|
DEFPY_YANG_HIDDEN (interface_ip_igmp_join,
|
|
|
|
interface_ip_igmp_join_cmd,
|
|
|
|
"[no] ip igmp join A.B.C.D$grp [A.B.C.D]$src",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"IGMP join multicast group\n"
|
|
|
|
"Multicast group address\n"
|
|
|
|
"Source address\n")
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2024-06-26 19:41:45 +02:00
|
|
|
nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
|
|
|
|
NULL);
|
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_JOIN_GROUP_XPATH,
|
|
|
|
"frr-routing:ipv4", grp_str,
|
|
|
|
(src_str ? src_str : "0.0.0.0"));
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
2024-06-26 19:41:45 +02:00
|
|
|
ALIAS(interface_ip_igmp_join,
|
|
|
|
interface_ip_igmp_join_group_cmd,
|
|
|
|
"[no] ip igmp join-group A.B.C.D$grp [A.B.C.D]$src",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"IGMP join multicast group\n"
|
|
|
|
"Multicast group address\n"
|
|
|
|
"Source address\n");
|
|
|
|
|
|
|
|
DEFPY_YANG (interface_ip_igmp_static_group,
|
|
|
|
interface_ip_igmp_static_group_cmd,
|
|
|
|
"[no] ip igmp static-group A.B.C.D$grp [A.B.C.D]$src",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"Static multicast group\n"
|
|
|
|
"Multicast group address\n"
|
|
|
|
"Source address\n")
|
|
|
|
{
|
|
|
|
nb_cli_enqueue_change(vty, ".", (!no ? NB_OP_CREATE : NB_OP_DESTROY),
|
|
|
|
NULL);
|
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_STATIC_GROUP_XPATH,
|
|
|
|
"frr-routing:ipv4", grp_str,
|
|
|
|
(src_str ? src_str : "0.0.0.0"));
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_ip_igmp_query_interval,
|
2015-02-04 07:01:14 +01:00
|
|
|
interface_ip_igmp_query_interval_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip igmp query-interval (1-65535)",
|
2015-02-04 07:01:14 +01:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
IFACE_IGMP_QUERY_INTERVAL_STR
|
|
|
|
"Query interval in seconds\n")
|
|
|
|
{
|
2020-10-23 13:46:39 +02:00
|
|
|
const struct lyd_node *pim_enable_dnode;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
pim_enable_dnode =
|
|
|
|
yang_dnode_getf(vty->candidate_config->dnode,
|
2021-12-15 14:24:51 +01:00
|
|
|
FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2020-10-23 13:46:39 +02:00
|
|
|
if (!pim_enable_dnode) {
|
2021-12-15 14:24:51 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2020-10-23 13:46:39 +02:00
|
|
|
} else {
|
|
|
|
if (!yang_dnode_get_bool(pim_enable_dnode, "."))
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable",
|
2020-11-20 22:06:34 +01:00
|
|
|
NB_OP_MODIFY, "true");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-23 13:46:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./query-interval", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
argv[3]->arg);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_no_ip_igmp_query_interval,
|
2015-02-04 07:01:14 +01:00
|
|
|
interface_no_ip_igmp_query_interval_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp query-interval [(1-65535)]",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IFACE_IGMP_QUERY_INTERVAL_STR
|
|
|
|
IGNORED_IN_NO_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2021-08-04 00:22:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./query-interval", NB_OP_DESTROY, NULL);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_ip_igmp_version,
|
2016-10-20 15:34:29 +02:00
|
|
|
interface_ip_igmp_version_cmd,
|
2017-01-26 15:10:54 +01:00
|
|
|
"ip igmp version (2-3)",
|
2016-10-20 15:34:29 +02:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"IGMP version\n"
|
|
|
|
"IGMP version number\n")
|
|
|
|
{
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./igmp-version", NB_OP_MODIFY,
|
|
|
|
argv[3]->arg);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2016-10-20 15:34:29 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_no_ip_igmp_version,
|
2016-10-20 15:34:29 +02:00
|
|
|
interface_no_ip_igmp_version_cmd,
|
2017-01-26 15:10:54 +01:00
|
|
|
"no ip igmp version (2-3)",
|
2016-10-20 15:34:29 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"IGMP version\n"
|
|
|
|
"IGMP version number\n")
|
|
|
|
{
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./igmp-version", NB_OP_DESTROY, NULL);
|
2016-10-20 15:34:29 +02:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2016-10-20 15:34:29 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (interface_ip_igmp_query_max_response_time,
|
2015-02-04 07:01:14 +01:00
|
|
|
interface_ip_igmp_query_max_response_time_cmd,
|
2022-01-18 16:16:51 +01:00
|
|
|
"ip igmp query-max-response-time (1-65535)$qmrt",
|
2015-02-04 07:01:14 +01:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR
|
2016-11-29 21:48:54 +01:00
|
|
|
"Query response value in deci-seconds\n")
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-01-18 16:16:51 +01:00
|
|
|
return gm_process_query_max_response_time_cmd(vty, qmrt_str);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_no_ip_igmp_query_max_response_time,
|
2015-02-04 07:01:14 +01:00
|
|
|
interface_no_ip_igmp_query_max_response_time_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp query-max-response-time [(1-65535)]",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
2017-01-25 15:13:46 +01:00
|
|
|
IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-01-18 16:16:51 +01:00
|
|
|
return gm_process_no_query_max_response_time_cmd(vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG_HIDDEN (interface_ip_igmp_query_max_response_time_dsec,
|
2016-11-29 21:48:54 +01:00
|
|
|
interface_ip_igmp_query_max_response_time_dsec_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip igmp query-max-response-time-dsec (1-65535)",
|
2016-11-29 21:48:54 +01:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_DSEC_STR
|
|
|
|
"Query response value in deciseconds\n")
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2020-10-23 13:46:39 +02:00
|
|
|
const struct lyd_node *pim_enable_dnode;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
pim_enable_dnode =
|
|
|
|
yang_dnode_getf(vty->candidate_config->dnode,
|
2021-12-15 14:24:51 +01:00
|
|
|
FRR_PIM_ENABLE_XPATH, VTY_CURR_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2020-10-23 13:46:39 +02:00
|
|
|
if (!pim_enable_dnode) {
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2020-10-23 13:46:39 +02:00
|
|
|
} else {
|
|
|
|
if (!yang_dnode_get_bool(pim_enable_dnode, "."))
|
2022-01-04 12:26:06 +01:00
|
|
|
nb_cli_enqueue_change(vty, "./enable",
|
2020-11-20 22:06:34 +01:00
|
|
|
NB_OP_MODIFY, "true");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2020-10-23 13:46:39 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
argv[3]->arg);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG_HIDDEN (interface_no_ip_igmp_query_max_response_time_dsec,
|
2016-11-29 21:48:54 +01:00
|
|
|
interface_no_ip_igmp_query_max_response_time_dsec_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp query-max-response-time-dsec [(1-65535)]",
|
2016-11-29 21:48:54 +01:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IFACE_IGMP_QUERY_MAX_RESPONSE_TIME_DSEC_STR
|
|
|
|
IGNORED_IN_NO_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2021-08-04 00:22:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./query-max-response-time", NB_OP_DESTROY,
|
|
|
|
NULL);
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-01-04 12:26:06 +01:00
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-01-18 16:24:11 +01:00
|
|
|
DEFPY (interface_ip_igmp_last_member_query_count,
|
2019-05-20 19:40:12 +02:00
|
|
|
interface_ip_igmp_last_member_query_count_cmd,
|
2022-01-18 16:24:11 +01:00
|
|
|
"ip igmp last-member-query-count (1-255)$lmqc",
|
2019-05-20 19:40:12 +02:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR
|
|
|
|
"Last member query count\n")
|
|
|
|
{
|
2022-01-18 16:24:11 +01:00
|
|
|
return gm_process_last_member_query_count_cmd(vty, lmqc_str);
|
2019-05-20 19:40:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (interface_no_ip_igmp_last_member_query_count,
|
|
|
|
interface_no_ip_igmp_last_member_query_count_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp last-member-query-count [(1-255)]",
|
2019-05-20 19:40:12 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
2021-08-02 02:22:36 +02:00
|
|
|
IFACE_IGMP_LAST_MEMBER_QUERY_COUNT_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2019-05-20 19:40:12 +02:00
|
|
|
{
|
2022-01-18 16:24:11 +01:00
|
|
|
return gm_process_no_last_member_query_count_cmd(vty);
|
2019-05-20 19:40:12 +02:00
|
|
|
}
|
|
|
|
|
2022-01-18 16:31:56 +01:00
|
|
|
DEFPY (interface_ip_igmp_last_member_query_interval,
|
2019-05-20 19:40:12 +02:00
|
|
|
interface_ip_igmp_last_member_query_interval_cmd,
|
2022-01-18 16:31:56 +01:00
|
|
|
"ip igmp last-member-query-interval (1-65535)$lmqi",
|
2019-05-20 19:40:12 +02:00
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR
|
|
|
|
"Last member query interval in deciseconds\n")
|
|
|
|
{
|
2022-01-18 16:31:56 +01:00
|
|
|
return gm_process_last_member_query_interval_cmd(vty, lmqi_str);
|
2019-05-20 19:40:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (interface_no_ip_igmp_last_member_query_interval,
|
|
|
|
interface_no_ip_igmp_last_member_query_interval_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip igmp last-member-query-interval [(1-65535)]",
|
2019-05-20 19:40:12 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
2021-08-02 02:22:36 +02:00
|
|
|
IFACE_IGMP_LAST_MEMBER_QUERY_INTERVAL_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR)
|
2019-05-20 19:40:12 +02:00
|
|
|
{
|
2022-01-18 16:31:56 +01:00
|
|
|
return gm_process_no_last_member_query_interval_cmd(vty);
|
2019-05-20 19:40:12 +02:00
|
|
|
}
|
|
|
|
|
2021-08-24 18:21:59 +02:00
|
|
|
DEFPY_YANG(interface_ip_igmp_limits,
|
|
|
|
interface_ip_igmp_limits_cmd,
|
|
|
|
"[no] ip igmp <max-sources$do_src (0-4294967295)$val"
|
|
|
|
"|max-groups$do_grp (0-4294967295)$val>",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"Limit number of IGMPv3 sources to track\n"
|
|
|
|
"Permitted number of sources\n"
|
|
|
|
"Limit number of IGMP group memberships to track\n"
|
|
|
|
"Permitted number of groups\n")
|
|
|
|
{
|
|
|
|
const char *xpath;
|
|
|
|
|
|
|
|
assert(do_src || do_grp);
|
|
|
|
if (do_src)
|
|
|
|
xpath = "./max-sources";
|
|
|
|
else
|
|
|
|
xpath = "./max-groups";
|
|
|
|
|
|
|
|
if (no)
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, val_str);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALIAS_YANG(interface_ip_igmp_limits,
|
|
|
|
no_interface_ip_igmp_limits_cmd,
|
|
|
|
"no ip igmp <max-sources$do_src|max-groups$do_grp>",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"Limit number of IGMPv3 sources to track\n"
|
|
|
|
"Limit number of IGMP group memberships to track\n")
|
|
|
|
|
2021-09-28 14:40:23 +02:00
|
|
|
DEFPY_YANG(interface_ip_igmp_immediate_leave,
|
|
|
|
interface_ip_igmp_immediate_leave_cmd,
|
|
|
|
"[no] ip igmp immediate-leave",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"Immediately drop group memberships on receiving Leave (IGMPv2 only)\n")
|
|
|
|
{
|
|
|
|
nb_cli_enqueue_change(vty, "./immediate-leave", NB_OP_MODIFY, no ? "false" : "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
}
|
|
|
|
|
2015-06-19 03:14:20 +02:00
|
|
|
DEFUN (interface_ip_pim_drprio,
|
|
|
|
interface_ip_pim_drprio_cmd,
|
2023-04-14 17:17:27 +02:00
|
|
|
"ip pim drpriority (0-4294967295)",
|
2015-06-19 03:14:20 +02:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Set the Designated Router Election Priority\n"
|
|
|
|
"Value of the new DR Priority\n")
|
|
|
|
{
|
2016-09-23 22:03:41 +02:00
|
|
|
int idx_number = 3;
|
2015-06-19 03:14:20 +02:00
|
|
|
|
2022-03-01 03:16:32 +01:00
|
|
|
return pim_process_ip_pim_drprio_cmd(vty, argv[idx_number]->arg);
|
2015-06-19 03:14:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (interface_no_ip_pim_drprio,
|
|
|
|
interface_no_ip_pim_drprio_cmd,
|
2023-04-14 17:17:27 +02:00
|
|
|
"no ip pim drpriority [(0-4294967295)]",
|
2016-11-08 02:46:04 +01:00
|
|
|
NO_STR
|
2015-06-19 03:14:20 +02:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Revert the Designated Router Priority to default\n"
|
|
|
|
"Old Value of the Priority\n")
|
|
|
|
{
|
2022-03-01 03:16:32 +01:00
|
|
|
return pim_process_no_ip_pim_drprio_cmd(vty);
|
2015-06-19 03:14:20 +02:00
|
|
|
}
|
|
|
|
|
2019-06-13 19:21:37 +02:00
|
|
|
DEFPY_HIDDEN (interface_ip_igmp_query_generate,
|
2020-11-20 22:06:34 +01:00
|
|
|
interface_ip_igmp_query_generate_cmd,
|
|
|
|
"ip igmp generate-query-once [version (2-3)]",
|
|
|
|
IP_STR
|
|
|
|
IFACE_IGMP_STR
|
|
|
|
"Generate igmp general query once\n"
|
|
|
|
"IGMP version\n"
|
|
|
|
"IGMP version number\n")
|
2019-06-13 19:21:37 +02:00
|
|
|
{
|
2022-01-18 11:37:56 +01:00
|
|
|
#if PIM_IPV == 4
|
2019-06-13 19:21:37 +02:00
|
|
|
VTY_DECLVAR_CONTEXT(interface, ifp);
|
2021-07-29 11:22:42 +02:00
|
|
|
int igmp_version;
|
|
|
|
struct pim_interface *pim_ifp = ifp->info;
|
2019-06-13 19:21:37 +02:00
|
|
|
|
|
|
|
if (!ifp->info) {
|
|
|
|
vty_out(vty, "IGMP/PIM is not enabled on the interface %s\n",
|
|
|
|
ifp->name);
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
2021-07-29 11:22:42 +02:00
|
|
|
/* It takes the igmp version configured on the interface as default */
|
2022-02-16 18:04:08 +01:00
|
|
|
igmp_version = pim_ifp->igmp_version;
|
2021-07-29 11:22:42 +02:00
|
|
|
|
2019-06-13 19:21:37 +02:00
|
|
|
if (argc > 3)
|
|
|
|
igmp_version = atoi(argv[4]->arg);
|
|
|
|
|
|
|
|
igmp_send_query_on_intf(ifp, igmp_version);
|
2022-01-18 11:37:56 +01:00
|
|
|
#endif
|
2019-06-13 19:21:37 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-02-20 23:21:39 +01:00
|
|
|
DEFPY_HIDDEN (pim_test_sg_keepalive,
|
|
|
|
pim_test_sg_keepalive_cmd,
|
|
|
|
"test pim [vrf NAME$name] keepalive-reset A.B.C.D$source A.B.C.D$group",
|
|
|
|
"Test code\n"
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"Reset the Keepalive Timer\n"
|
|
|
|
"The Source we are resetting\n"
|
|
|
|
"The Group we are resetting\n")
|
|
|
|
{
|
|
|
|
struct pim_upstream *up;
|
2022-07-12 15:00:57 +02:00
|
|
|
struct vrf *vrf;
|
2019-02-20 23:21:39 +01:00
|
|
|
struct pim_instance *pim;
|
2022-01-04 17:54:44 +01:00
|
|
|
pim_sgaddr sg;
|
2019-02-20 23:21:39 +01:00
|
|
|
|
|
|
|
sg.src = source;
|
|
|
|
sg.grp = group;
|
|
|
|
|
2022-07-12 15:00:57 +02:00
|
|
|
vrf = vrf_lookup_by_name(name ? name : VRF_DEFAULT_NAME);
|
|
|
|
if (!vrf) {
|
|
|
|
vty_out(vty, "%% Vrf specified: %s does not exist\n", name);
|
|
|
|
return CMD_WARNING;
|
2019-02-20 23:21:39 +01:00
|
|
|
}
|
|
|
|
|
2022-07-12 15:00:57 +02:00
|
|
|
pim = vrf->info;
|
|
|
|
|
2019-02-20 23:21:39 +01:00
|
|
|
if (!pim) {
|
|
|
|
vty_out(vty, "%% Unable to find pim instance\n");
|
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
|
|
|
up = pim_upstream_find(pim, &sg);
|
|
|
|
if (!up) {
|
2022-01-04 21:24:48 +01:00
|
|
|
vty_out(vty, "%% Unable to find %pSG specified\n", &sg);
|
2019-02-20 23:21:39 +01:00
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
2022-01-04 21:24:48 +01:00
|
|
|
vty_out(vty, "Setting %pSG to current keep alive time: %d\n", &sg,
|
|
|
|
pim->keep_alive_time);
|
2019-02-20 23:21:39 +01:00
|
|
|
pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-02-19 15:52:17 +01:00
|
|
|
DEFPY (interface_ip_pim_activeactive,
|
|
|
|
interface_ip_pim_activeactive_cmd,
|
|
|
|
"[no$no] ip pim active-active",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Mark interface as Active-Active for MLAG operations, Hidden because not finished yet\n")
|
2019-01-14 16:43:53 +01:00
|
|
|
{
|
2022-03-01 03:22:19 +01:00
|
|
|
return pim_process_ip_pim_activeactive_cmd(vty, no);
|
2019-01-14 16:43:53 +01:00
|
|
|
}
|
|
|
|
|
2017-03-21 18:12:30 +01:00
|
|
|
DEFUN_HIDDEN (interface_ip_pim_ssm,
|
2020-11-20 22:06:34 +01:00
|
|
|
interface_ip_pim_ssm_cmd,
|
|
|
|
"ip pim ssm",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_STR)
|
2015-09-08 17:10:19 +02:00
|
|
|
{
|
2020-10-23 14:06:06 +02:00
|
|
|
int ret;
|
2017-05-17 22:21:33 +02:00
|
|
|
|
2022-03-01 03:25:29 +01:00
|
|
|
ret = pim_process_ip_pim_cmd(vty);
|
2015-09-08 17:10:19 +02:00
|
|
|
|
2020-10-23 14:06:06 +02:00
|
|
|
if (ret != NB_OK)
|
|
|
|
return ret;
|
2017-05-17 21:08:39 +02:00
|
|
|
|
2020-10-23 14:06:06 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"WARN: Enabled PIM SM on interface; configure PIM SSM range if needed\n");
|
2015-10-29 14:35:12 +01:00
|
|
|
|
2020-10-23 14:06:06 +02:00
|
|
|
return NB_OK;
|
2015-09-08 17:10:19 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 13:46:57 +02:00
|
|
|
DEFUN_HIDDEN (interface_ip_pim_sm,
|
2020-11-20 22:06:34 +01:00
|
|
|
interface_ip_pim_sm_cmd,
|
|
|
|
"ip pim sm",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_SM_STR)
|
2018-09-07 13:46:57 +02:00
|
|
|
{
|
2022-03-01 03:26:43 +01:00
|
|
|
return pim_process_ip_pim_cmd(vty);
|
2018-09-07 13:46:57 +02:00
|
|
|
}
|
|
|
|
|
2022-04-07 09:41:14 +02:00
|
|
|
DEFPY (interface_ip_pim,
|
2018-09-07 13:46:57 +02:00
|
|
|
interface_ip_pim_cmd,
|
2022-04-07 09:41:14 +02:00
|
|
|
"ip pim [passive$passive]",
|
2018-09-07 13:46:57 +02:00
|
|
|
IP_STR
|
2022-04-07 09:41:14 +02:00
|
|
|
PIM_STR
|
|
|
|
"Disable exchange of protocol packets\n")
|
2018-09-07 13:46:57 +02:00
|
|
|
{
|
2022-04-07 09:41:14 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pim_process_ip_pim_cmd(vty);
|
|
|
|
|
|
|
|
if (ret != NB_OK)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (passive)
|
2022-05-04 15:05:43 +02:00
|
|
|
return pim_process_ip_pim_passive_cmd(vty, true);
|
2022-04-07 09:41:14 +02:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
2018-09-07 13:46:57 +02:00
|
|
|
}
|
|
|
|
|
2017-03-21 18:12:30 +01:00
|
|
|
DEFUN_HIDDEN (interface_no_ip_pim_ssm,
|
2020-11-20 22:06:34 +01:00
|
|
|
interface_no_ip_pim_ssm_cmd,
|
|
|
|
"no ip pim ssm",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_STR)
|
2015-09-08 17:10:19 +02:00
|
|
|
{
|
2022-03-01 03:25:29 +01:00
|
|
|
return pim_process_no_ip_pim_cmd(vty);
|
2015-09-08 17:10:19 +02:00
|
|
|
}
|
|
|
|
|
2018-09-07 13:46:57 +02:00
|
|
|
DEFUN_HIDDEN (interface_no_ip_pim_sm,
|
2020-11-20 22:06:34 +01:00
|
|
|
interface_no_ip_pim_sm_cmd,
|
|
|
|
"no ip pim sm",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_SM_STR)
|
2015-09-08 17:10:19 +02:00
|
|
|
{
|
2022-03-01 03:26:43 +01:00
|
|
|
return pim_process_no_ip_pim_cmd(vty);
|
2018-09-07 13:46:57 +02:00
|
|
|
}
|
2015-09-08 17:10:19 +02:00
|
|
|
|
2022-04-07 09:41:14 +02:00
|
|
|
DEFPY (interface_no_ip_pim,
|
2018-09-07 13:46:57 +02:00
|
|
|
interface_no_ip_pim_cmd,
|
2022-04-07 09:41:14 +02:00
|
|
|
"no ip pim [passive$passive]",
|
2018-09-07 13:46:57 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
2022-04-07 09:41:14 +02:00
|
|
|
PIM_STR
|
|
|
|
"Disable exchange of protocol packets\n")
|
2018-09-07 13:46:57 +02:00
|
|
|
{
|
2022-04-07 09:41:14 +02:00
|
|
|
if (passive)
|
2022-05-04 15:05:43 +02:00
|
|
|
return pim_process_ip_pim_passive_cmd(vty, false);
|
2022-04-07 09:41:14 +02:00
|
|
|
|
2022-05-04 15:05:43 +02:00
|
|
|
return pim_process_no_ip_pim_cmd(vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2017-09-01 20:33:00 +02:00
|
|
|
/* boundaries */
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG(interface_ip_pim_boundary_oil,
|
2017-09-01 20:33:00 +02:00
|
|
|
interface_ip_pim_boundary_oil_cmd,
|
|
|
|
"ip multicast boundary oil WORD",
|
|
|
|
IP_STR
|
|
|
|
"Generic multicast configuration options\n"
|
|
|
|
"Define multicast boundary\n"
|
|
|
|
"Filter OIL by group using prefix list\n"
|
2017-10-04 23:32:48 +02:00
|
|
|
"Prefix list to filter OIL with\n")
|
2017-09-01 20:33:00 +02:00
|
|
|
{
|
2022-03-01 03:27:52 +01:00
|
|
|
return pim_process_ip_pim_boundary_oil_cmd(vty, argv[4]->arg);
|
2017-09-01 20:33:00 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG(interface_no_ip_pim_boundary_oil,
|
2017-09-01 20:33:00 +02:00
|
|
|
interface_no_ip_pim_boundary_oil_cmd,
|
|
|
|
"no ip multicast boundary oil [WORD]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
"Generic multicast configuration options\n"
|
|
|
|
"Define multicast boundary\n"
|
|
|
|
"Filter OIL by group using prefix list\n"
|
2017-10-04 23:32:48 +02:00
|
|
|
"Prefix list to filter OIL with\n")
|
2017-09-01 20:33:00 +02:00
|
|
|
{
|
2022-03-01 03:27:52 +01:00
|
|
|
return pim_process_no_ip_pim_boundary_oil_cmd(vty);
|
2017-09-01 20:33:00 +02:00
|
|
|
}
|
|
|
|
|
2024-11-25 17:36:54 +01:00
|
|
|
DEFPY_YANG(interface_ip_pim_boundary_acl,
|
|
|
|
interface_ip_pim_boundary_acl_cmd,
|
|
|
|
"[no] ip multicast boundary ACCESSLIST4_NAME$name",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
"Generic multicast configuration options\n"
|
|
|
|
"Define multicast boundary\n"
|
|
|
|
"Access-list to filter OIL with by source and group\n")
|
|
|
|
{
|
|
|
|
nb_cli_enqueue_change(vty, "./multicast-boundary-acl",
|
|
|
|
(!!no ? NB_OP_DESTROY : NB_OP_MODIFY), name);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
}
|
|
|
|
|
2015-06-12 01:29:02 +02:00
|
|
|
DEFUN (interface_ip_mroute,
|
|
|
|
interface_ip_mroute_cmd,
|
2019-12-20 22:30:55 +01:00
|
|
|
"ip mroute INTERFACE A.B.C.D [A.B.C.D]",
|
2015-06-12 01:29:02 +02:00
|
|
|
IP_STR
|
|
|
|
"Add multicast route\n"
|
|
|
|
"Outgoing interface name\n"
|
|
|
|
"Group address\n"
|
|
|
|
"Source address\n")
|
|
|
|
{
|
2016-09-23 22:03:41 +02:00
|
|
|
int idx_interface = 2;
|
|
|
|
int idx_ipv4 = 3;
|
2020-10-23 14:59:37 +02:00
|
|
|
const char *source_str;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-23 14:59:37 +02:00
|
|
|
if (argc == (idx_ipv4 + 1))
|
|
|
|
source_str = "0.0.0.0";
|
|
|
|
else
|
|
|
|
source_str = argv[idx_ipv4 + 1]->arg;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-01 03:30:14 +01:00
|
|
|
return pim_process_ip_mroute_cmd(vty, argv[idx_interface]->arg,
|
|
|
|
argv[idx_ipv4]->arg, source_str);
|
2015-06-12 01:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (interface_no_ip_mroute,
|
|
|
|
interface_no_ip_mroute_cmd,
|
2019-12-20 22:30:55 +01:00
|
|
|
"no ip mroute INTERFACE A.B.C.D [A.B.C.D]",
|
2015-06-12 01:29:02 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
"Add multicast route\n"
|
|
|
|
"Outgoing interface name\n"
|
|
|
|
"Group Address\n"
|
|
|
|
"Source Address\n")
|
|
|
|
{
|
2022-03-01 03:30:14 +01:00
|
|
|
int idx_interface = 3;
|
2016-09-23 22:03:41 +02:00
|
|
|
int idx_ipv4 = 4;
|
2020-10-23 14:59:37 +02:00
|
|
|
const char *source_str;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-23 14:59:37 +02:00
|
|
|
if (argc == (idx_ipv4 + 1))
|
|
|
|
source_str = "0.0.0.0";
|
|
|
|
else
|
|
|
|
source_str = argv[idx_ipv4 + 1]->arg;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-01 03:30:14 +01:00
|
|
|
return pim_process_no_ip_mroute_cmd(vty, argv[idx_interface]->arg,
|
|
|
|
argv[idx_ipv4]->arg, source_str);
|
2015-06-12 01:29:02 +02:00
|
|
|
}
|
|
|
|
|
2015-08-22 01:35:27 +02:00
|
|
|
DEFUN (interface_ip_pim_hello,
|
|
|
|
interface_ip_pim_hello_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip pim hello (1-65535) [(1-65535)]",
|
2015-08-22 01:35:27 +02:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_HELLO_STR
|
2016-09-27 02:26:41 +02:00
|
|
|
IFACE_PIM_HELLO_TIME_STR
|
|
|
|
IFACE_PIM_HELLO_HOLD_STR)
|
2015-08-22 01:35:27 +02:00
|
|
|
{
|
2016-09-27 02:26:41 +02:00
|
|
|
int idx_time = 3;
|
|
|
|
int idx_hold = 4;
|
2015-08-22 01:35:27 +02:00
|
|
|
|
2017-05-03 00:23:34 +02:00
|
|
|
if (argc == idx_hold + 1)
|
2022-03-01 03:18:42 +01:00
|
|
|
return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
|
|
|
|
argv[idx_hold]->arg);
|
2015-08-22 01:35:27 +02:00
|
|
|
|
2022-03-01 03:18:42 +01:00
|
|
|
else
|
|
|
|
return pim_process_ip_pim_hello_cmd(vty, argv[idx_time]->arg,
|
|
|
|
NULL);
|
2015-08-22 01:35:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (interface_no_ip_pim_hello,
|
|
|
|
interface_no_ip_pim_hello_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip pim hello [(1-65535) [(1-65535)]]",
|
2015-08-22 01:35:27 +02:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
IFACE_PIM_HELLO_STR
|
2021-08-04 00:22:09 +02:00
|
|
|
IGNORED_IN_NO_STR
|
|
|
|
IGNORED_IN_NO_STR)
|
2015-08-22 01:35:27 +02:00
|
|
|
{
|
2022-03-01 03:18:42 +01:00
|
|
|
return pim_process_no_ip_pim_hello_cmd(vty);
|
2015-08-22 01:35:27 +02:00
|
|
|
}
|
|
|
|
|
2024-08-09 00:03:06 +02:00
|
|
|
DEFPY (interface_ip_igmp_proxy,
|
|
|
|
interface_ip_igmp_proxy_cmd,
|
|
|
|
"[no] ip igmp proxy",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
IGMP_STR
|
|
|
|
"Proxy IGMP join/prune operations\n")
|
|
|
|
{
|
2024-09-17 23:21:05 +02:00
|
|
|
return pim_process_ip_gmp_proxy_cmd(vty, !no);
|
2024-08-09 00:03:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-08-19 16:46:30 +02:00
|
|
|
DEFPY_YANG(interface_ip_pim_neighbor_prefix_list,
|
|
|
|
interface_ip_pim_neighbor_prefix_list_cmd,
|
|
|
|
"[no] ip pim allowed-neighbors prefix-list WORD",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
"pim multicast routing\n"
|
|
|
|
"Restrict allowed PIM neighbors\n"
|
|
|
|
"Use prefix-list to filter neighbors\n"
|
|
|
|
"Name of a prefix-list\n")
|
|
|
|
{
|
|
|
|
if (no)
|
|
|
|
nb_cli_enqueue_change(vty, "./neighbor-filter-prefix-list", NB_OP_DESTROY, NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, "./neighbor-filter-prefix-list", NB_OP_MODIFY,
|
|
|
|
prefix_list);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALIAS (interface_ip_pim_neighbor_prefix_list,
|
|
|
|
interface_no_ip_pim_neighbor_prefix_list_cmd,
|
|
|
|
"no ip pim allowed-neighbors [prefix-list]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
"pim multicast routing\n"
|
|
|
|
"Restrict allowed PIM neighbors\n"
|
|
|
|
"Use prefix-list to filter neighbors\n")
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (debug_igmp,
|
|
|
|
debug_igmp_cmd,
|
|
|
|
"debug igmp",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DO_DEBUG_GM_EVENTS;
|
|
|
|
PIM_DO_DEBUG_GM_PACKETS;
|
|
|
|
PIM_DO_DEBUG_GM_TRACE;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_igmp,
|
|
|
|
no_debug_igmp_cmd,
|
|
|
|
"no debug igmp",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DONT_DEBUG_GM_EVENTS;
|
|
|
|
PIM_DONT_DEBUG_GM_PACKETS;
|
|
|
|
PIM_DONT_DEBUG_GM_TRACE;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DEFUN (debug_igmp_events,
|
|
|
|
debug_igmp_events_cmd,
|
|
|
|
"debug igmp events",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_EVENTS_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DO_DEBUG_GM_EVENTS;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_igmp_events,
|
|
|
|
no_debug_igmp_events_cmd,
|
|
|
|
"no debug igmp events",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_EVENTS_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DONT_DEBUG_GM_EVENTS;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DEFUN (debug_igmp_packets,
|
|
|
|
debug_igmp_packets_cmd,
|
|
|
|
"debug igmp packets",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_PACKETS_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DO_DEBUG_GM_PACKETS;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_igmp_packets,
|
|
|
|
no_debug_igmp_packets_cmd,
|
|
|
|
"no debug igmp packets",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_PACKETS_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DONT_DEBUG_GM_PACKETS;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DEFUN (debug_igmp_trace,
|
|
|
|
debug_igmp_trace_cmd,
|
|
|
|
"debug igmp trace",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_TRACE_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DO_DEBUG_GM_TRACE;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_igmp_trace,
|
|
|
|
no_debug_igmp_trace_cmd,
|
|
|
|
"no debug igmp trace",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_TRACE_STR)
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DONT_DEBUG_GM_TRACE;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-04-25 14:01:42 +02:00
|
|
|
DEFUN (debug_igmp_trace_detail,
|
|
|
|
debug_igmp_trace_detail_cmd,
|
|
|
|
"debug igmp trace detail",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_TRACE_STR
|
|
|
|
"detailed\n")
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DO_DEBUG_GM_TRACE_DETAIL;
|
2022-04-25 14:01:42 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_igmp_trace_detail,
|
|
|
|
no_debug_igmp_trace_detail_cmd,
|
|
|
|
"no debug igmp trace detail",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_IGMP_STR
|
|
|
|
DEBUG_IGMP_TRACE_STR
|
|
|
|
"detailed\n")
|
|
|
|
{
|
2022-09-01 07:20:00 +02:00
|
|
|
PIM_DONT_DEBUG_GM_TRACE_DETAIL;
|
2022-04-25 14:01:42 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (debug_mroute,
|
|
|
|
debug_mroute_cmd,
|
|
|
|
"debug mroute",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MROUTE_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MROUTE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-08-17 03:13:22 +02:00
|
|
|
DEFUN (debug_mroute_detail,
|
|
|
|
debug_mroute_detail_cmd,
|
|
|
|
"debug mroute detail",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MROUTE_STR
|
|
|
|
"detailed\n")
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MROUTE_DETAIL;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (no_debug_mroute,
|
|
|
|
no_debug_mroute_cmd,
|
|
|
|
"no debug mroute",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MROUTE_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MROUTE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-08-17 03:13:22 +02:00
|
|
|
DEFUN (no_debug_mroute_detail,
|
|
|
|
no_debug_mroute_detail_cmd,
|
|
|
|
"no debug mroute detail",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MROUTE_STR
|
|
|
|
"detailed\n")
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MROUTE_DETAIL;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2019-04-11 17:46:40 +02:00
|
|
|
DEFUN (debug_pim_static,
|
|
|
|
debug_pim_static_cmd,
|
|
|
|
"debug pim static",
|
2015-06-12 01:29:02 +02:00
|
|
|
DEBUG_STR
|
2019-04-11 17:46:40 +02:00
|
|
|
DEBUG_PIM_STR
|
2015-06-12 01:29:02 +02:00
|
|
|
DEBUG_STATIC_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_STATIC;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-04-11 17:46:40 +02:00
|
|
|
DEFUN (no_debug_pim_static,
|
|
|
|
no_debug_pim_static_cmd,
|
|
|
|
"no debug pim static",
|
2015-06-12 01:29:02 +02:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
2019-04-11 17:46:40 +02:00
|
|
|
DEBUG_PIM_STR
|
2015-06-12 01:29:02 +02:00
|
|
|
DEBUG_STATIC_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_STATIC;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-03-24 09:41:15 +01:00
|
|
|
DEFPY (debug_pim,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_cmd,
|
2022-03-24 09:41:15 +01:00
|
|
|
"[no] debug pim",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR)
|
|
|
|
{
|
2022-03-24 09:41:15 +01:00
|
|
|
if (!no)
|
|
|
|
return pim_debug_pim_cmd();
|
|
|
|
else
|
|
|
|
return pim_no_debug_pim_cmd();
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-03-24 09:53:59 +01:00
|
|
|
DEFPY (debug_pim_nht,
|
2017-06-30 16:43:21 +02:00
|
|
|
debug_pim_nht_cmd,
|
2022-03-24 09:53:59 +01:00
|
|
|
"[no] debug pim nht",
|
2017-06-30 16:43:21 +02:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
"Nexthop Tracking\n")
|
|
|
|
{
|
2022-03-24 09:53:59 +01:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_NHT;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_NHT;
|
2017-06-30 16:43:21 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2015-02-04 07:01:14 +01:00
|
|
|
|
2022-03-24 09:53:59 +01:00
|
|
|
DEFPY (debug_pim_nht_det,
|
2021-12-02 06:07:02 +01:00
|
|
|
debug_pim_nht_det_cmd,
|
2022-03-24 09:53:59 +01:00
|
|
|
"[no] debug pim nht detail",
|
2021-12-02 06:07:02 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
"Nexthop Tracking\n"
|
|
|
|
"Detailed Information\n")
|
|
|
|
{
|
2022-03-24 09:53:59 +01:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_NHT_DETAIL;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_NHT_DETAIL;
|
2021-12-02 06:07:02 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-09-14 16:28:04 +02:00
|
|
|
DEFUN (debug_pim_nht_rp,
|
|
|
|
debug_pim_nht_rp_cmd,
|
|
|
|
"debug pim nht rp",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
"Nexthop Tracking\n"
|
|
|
|
"RP Nexthop Tracking\n")
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_PIM_NHT_RP;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_pim_nht_rp,
|
|
|
|
no_debug_pim_nht_rp_cmd,
|
|
|
|
"no debug pim nht rp",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
"Nexthop Tracking\n"
|
|
|
|
"RP Nexthop Tracking\n")
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_PIM_NHT_RP;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-24 10:09:54 +01:00
|
|
|
DEFPY (debug_pim_events,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_events_cmd,
|
2022-03-24 10:09:54 +01:00
|
|
|
"[no] debug pim events",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_EVENTS_STR)
|
|
|
|
{
|
2022-03-24 10:09:54 +01:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_EVENTS;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_EVENTS;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-28 07:15:36 +02:00
|
|
|
DEFPY (debug_pim_packets,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_packets_cmd,
|
2022-03-28 07:15:36 +02:00
|
|
|
"[no] debug pim packets [<hello$hello|joins$joins|register$registers>]",
|
|
|
|
NO_STR DEBUG_STR
|
2015-02-04 07:01:14 +01:00
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_PACKETS_STR
|
|
|
|
DEBUG_PIM_HELLO_PACKETS_STR
|
2016-08-03 03:44:29 +02:00
|
|
|
DEBUG_PIM_J_P_PACKETS_STR
|
|
|
|
DEBUG_PIM_PIM_REG_PACKETS_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2022-03-28 07:15:36 +02:00
|
|
|
if (!no)
|
|
|
|
return pim_debug_pim_packets_cmd(hello, joins, registers, vty);
|
|
|
|
else
|
|
|
|
return pim_no_debug_pim_packets_cmd(hello, joins, registers,
|
|
|
|
vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|
|
|
|
|
2022-03-28 07:36:18 +02:00
|
|
|
DEFPY (debug_pim_packetdump_send,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_packetdump_send_cmd,
|
2022-03-28 07:36:18 +02:00
|
|
|
"[no] debug pim packet-dump send",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_PACKETDUMP_STR
|
|
|
|
DEBUG_PIM_PACKETDUMP_SEND_STR)
|
|
|
|
{
|
2022-03-28 07:36:18 +02:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-28 07:36:18 +02:00
|
|
|
DEFPY (debug_pim_packetdump_recv,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_packetdump_recv_cmd,
|
2022-03-28 07:36:18 +02:00
|
|
|
"[no] debug pim packet-dump receive",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_PACKETDUMP_STR
|
|
|
|
DEBUG_PIM_PACKETDUMP_RECV_STR)
|
|
|
|
{
|
2022-03-28 07:36:18 +02:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-28 08:28:06 +02:00
|
|
|
DEFPY (debug_pim_trace,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_trace_cmd,
|
2022-03-28 08:28:06 +02:00
|
|
|
"[no] debug pim trace",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_TRACE_STR)
|
|
|
|
{
|
2022-03-28 08:28:06 +02:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_TRACE;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_TRACE;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-28 08:28:06 +02:00
|
|
|
DEFPY (debug_pim_trace_detail,
|
|
|
|
debug_pim_trace_detail_cmd,
|
|
|
|
"[no] debug pim trace detail",
|
2017-06-05 19:15:47 +02:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
2017-07-11 21:17:34 +02:00
|
|
|
DEBUG_PIM_TRACE_STR
|
|
|
|
"Detailed Information\n")
|
2017-06-05 19:15:47 +02:00
|
|
|
{
|
2022-03-28 08:28:06 +02:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_PIM_TRACE_DETAIL;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
|
2017-06-05 19:15:47 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
DEFUN (debug_ssmpingd,
|
|
|
|
debug_ssmpingd_cmd,
|
|
|
|
"debug ssmpingd",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_SSMPINGD_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_SSMPINGD;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_ssmpingd,
|
|
|
|
no_debug_ssmpingd_cmd,
|
|
|
|
"no debug ssmpingd",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_SSMPINGD_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_SSMPINGD;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2022-03-28 08:44:08 +02:00
|
|
|
DEFPY (debug_pim_zebra,
|
2015-02-04 07:01:14 +01:00
|
|
|
debug_pim_zebra_cmd,
|
2022-03-28 08:44:08 +02:00
|
|
|
"[no] debug pim zebra",
|
2015-02-04 07:01:14 +01:00
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_ZEBRA_STR)
|
|
|
|
{
|
2022-03-28 08:44:08 +02:00
|
|
|
if (!no)
|
|
|
|
PIM_DO_DEBUG_ZEBRA;
|
|
|
|
else
|
|
|
|
PIM_DONT_DEBUG_ZEBRA;
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFUN(debug_pim_mlag,
|
|
|
|
debug_pim_mlag_cmd,
|
|
|
|
"debug pim mlag",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_MLAG_STR)
|
2019-11-12 07:36:17 +01:00
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MLAG;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFUN(no_debug_pim_mlag,
|
|
|
|
no_debug_pim_mlag_cmd,
|
|
|
|
"no debug pim mlag",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_MLAG_STR)
|
2019-11-12 07:36:17 +01:00
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MLAG;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-03-25 01:34:45 +01:00
|
|
|
DEFUN (debug_pim_vxlan,
|
|
|
|
debug_pim_vxlan_cmd,
|
|
|
|
"debug pim vxlan",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_VXLAN_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_VXLAN;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_pim_vxlan,
|
|
|
|
no_debug_pim_vxlan_cmd,
|
|
|
|
"no debug pim vxlan",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_VXLAN_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_VXLAN;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-10-25 19:59:48 +02:00
|
|
|
DEFUN (debug_msdp,
|
|
|
|
debug_msdp_cmd,
|
|
|
|
"debug msdp",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MSDP_EVENTS;
|
|
|
|
PIM_DO_DEBUG_MSDP_PACKETS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_msdp,
|
|
|
|
no_debug_msdp_cmd,
|
|
|
|
"no debug msdp",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MSDP_EVENTS;
|
|
|
|
PIM_DONT_DEBUG_MSDP_PACKETS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (debug_msdp_events,
|
|
|
|
debug_msdp_events_cmd,
|
|
|
|
"debug msdp events",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR
|
|
|
|
DEBUG_MSDP_EVENTS_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MSDP_EVENTS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_msdp_events,
|
|
|
|
no_debug_msdp_events_cmd,
|
|
|
|
"no debug msdp events",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR
|
|
|
|
DEBUG_MSDP_EVENTS_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MSDP_EVENTS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (debug_msdp_packets,
|
|
|
|
debug_msdp_packets_cmd,
|
|
|
|
"debug msdp packets",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR
|
|
|
|
DEBUG_MSDP_PACKETS_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MSDP_PACKETS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_msdp_packets,
|
|
|
|
no_debug_msdp_packets_cmd,
|
|
|
|
"no debug msdp packets",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MSDP_STR
|
|
|
|
DEBUG_MSDP_PACKETS_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MSDP_PACKETS;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-02-12 23:41:33 +01:00
|
|
|
DEFUN (debug_mtrace,
|
|
|
|
debug_mtrace_cmd,
|
|
|
|
"debug mtrace",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MTRACE_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_MTRACE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_mtrace,
|
|
|
|
no_debug_mtrace_cmd,
|
|
|
|
"no debug mtrace",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_MTRACE_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_MTRACE;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-05-02 05:32:59 +02:00
|
|
|
DEFUN (debug_bsm,
|
|
|
|
debug_bsm_cmd,
|
|
|
|
"debug pim bsm",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_BSM_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_BSM;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_bsm,
|
|
|
|
no_debug_bsm_cmd,
|
|
|
|
"no debug pim bsm",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_BSM_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_BSM;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-09-17 04:10:03 +02:00
|
|
|
DEFUN (debug_autorp,
|
|
|
|
debug_autorp_cmd,
|
|
|
|
"debug pim autorp",
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_AUTORP_STR)
|
|
|
|
{
|
|
|
|
PIM_DO_DEBUG_AUTORP;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_debug_autorp,
|
|
|
|
no_debug_autorp_cmd,
|
|
|
|
"no debug pim autorp",
|
|
|
|
NO_STR
|
|
|
|
DEBUG_STR
|
|
|
|
DEBUG_PIM_STR
|
|
|
|
DEBUG_PIM_AUTORP_STR)
|
|
|
|
{
|
|
|
|
PIM_DONT_DEBUG_AUTORP;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-05-02 05:32:59 +02:00
|
|
|
|
2017-08-18 18:50:13 +02:00
|
|
|
DEFUN_NOSH (show_debugging_pim,
|
|
|
|
show_debugging_pim_cmd,
|
|
|
|
"show debugging [pim]",
|
|
|
|
SHOW_STR
|
|
|
|
DEBUG_STR
|
|
|
|
PIM_STR)
|
2015-02-04 07:01:14 +01:00
|
|
|
{
|
2017-08-18 18:50:13 +02:00
|
|
|
vty_out(vty, "PIM debugging status\n");
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
pim_debug_config_write(vty);
|
2017-08-18 18:50:13 +02:00
|
|
|
|
2022-10-07 13:51:17 +02:00
|
|
|
cmd_show_lib_debugs(vty);
|
2015-02-04 07:01:14 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_pim_use_source,
|
2016-11-19 01:19:26 +01:00
|
|
|
interface_pim_use_source_cmd,
|
|
|
|
"ip pim use-source A.B.C.D",
|
|
|
|
IP_STR
|
2018-09-07 13:46:57 +02:00
|
|
|
PIM_STR
|
2016-11-19 01:19:26 +01:00
|
|
|
"Configure primary IP address\n"
|
|
|
|
"source ip address\n")
|
|
|
|
{
|
2020-10-23 14:59:37 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./use-source", NB_OP_MODIFY, argv[3]->arg);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty,
|
2021-12-15 14:24:51 +01:00
|
|
|
FRR_PIM_INTERFACE_XPATH,
|
2020-11-20 22:06:34 +01:00
|
|
|
"frr-routing:ipv4");
|
2016-11-19 01:19:26 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (interface_no_pim_use_source,
|
2016-11-19 01:19:26 +01:00
|
|
|
interface_no_pim_use_source_cmd,
|
2017-08-15 16:16:51 +02:00
|
|
|
"no ip pim use-source [A.B.C.D]",
|
2016-11-19 01:19:26 +01:00
|
|
|
NO_STR
|
|
|
|
IP_STR
|
2018-09-07 13:46:57 +02:00
|
|
|
PIM_STR
|
2017-08-15 16:16:51 +02:00
|
|
|
"Delete source IP address\n"
|
|
|
|
"source ip address\n")
|
2016-11-19 01:19:26 +01:00
|
|
|
{
|
2020-10-23 14:59:37 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./use-source", NB_OP_MODIFY, "0.0.0.0");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty,
|
2021-12-15 14:24:51 +01:00
|
|
|
FRR_PIM_INTERFACE_XPATH,
|
2020-11-20 22:06:34 +01:00
|
|
|
"frr-routing:ipv4");
|
2016-11-19 01:19:26 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (ip_pim_bfd,
|
2017-04-11 03:01:53 +02:00
|
|
|
ip_pim_bfd_cmd,
|
2021-04-13 16:07:01 +02:00
|
|
|
"ip pim bfd [profile BFDPROF$prof]",
|
2017-04-11 03:01:53 +02:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2021-04-13 16:07:01 +02:00
|
|
|
"Enables BFD support\n"
|
|
|
|
"Use BFD profile\n"
|
|
|
|
"Use BFD profile name\n")
|
2017-04-11 03:01:53 +02:00
|
|
|
{
|
2020-10-23 14:43:16 +02:00
|
|
|
const struct lyd_node *igmp_enable_dnode;
|
2017-04-11 03:01:53 +02:00
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
igmp_enable_dnode =
|
|
|
|
yang_dnode_getf(vty->candidate_config->dnode,
|
2022-01-04 12:26:06 +01:00
|
|
|
FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2020-10-23 14:43:16 +02:00
|
|
|
if (!igmp_enable_dnode)
|
|
|
|
nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
|
2020-11-20 21:45:32 +01:00
|
|
|
"true");
|
2020-10-23 14:43:16 +02:00
|
|
|
else {
|
|
|
|
if (!yang_dnode_get_bool(igmp_enable_dnode, "."))
|
|
|
|
nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
|
2020-11-20 21:45:32 +01:00
|
|
|
"true");
|
2017-08-14 02:28:11 +02:00
|
|
|
}
|
|
|
|
|
2021-04-12 18:48:21 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd", NB_OP_CREATE, NULL);
|
2021-04-13 16:07:01 +02:00
|
|
|
if (prof)
|
|
|
|
nb_cli_enqueue_change(vty, "./bfd/profile", NB_OP_MODIFY, prof);
|
|
|
|
|
2021-12-15 14:24:51 +01:00
|
|
|
return nb_cli_apply_changes(vty,
|
|
|
|
FRR_PIM_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2021-04-13 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_ip_pim_bfd_profile, no_ip_pim_bfd_profile_cmd,
|
2021-04-13 16:07:01 +02:00
|
|
|
"no ip pim bfd profile [BFDPROF]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Enables BFD support\n"
|
|
|
|
"Disable BFD profile\n"
|
|
|
|
"BFD Profile name\n")
|
|
|
|
{
|
|
|
|
nb_cli_enqueue_change(vty, "./bfd/profile", NB_OP_DESTROY, NULL);
|
2017-04-11 03:01:53 +02:00
|
|
|
|
2021-12-15 14:24:51 +01:00
|
|
|
return nb_cli_apply_changes(vty,
|
|
|
|
FRR_PIM_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2017-04-11 03:01:53 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (no_ip_pim_bfd,
|
2017-04-11 03:01:53 +02:00
|
|
|
no_ip_pim_bfd_cmd,
|
|
|
|
"no ip pim bfd",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Disables BFD support\n")
|
|
|
|
{
|
2020-10-23 14:43:16 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd", NB_OP_DESTROY, NULL);
|
2017-04-11 03:01:53 +02:00
|
|
|
|
2021-12-15 14:24:51 +01:00
|
|
|
return nb_cli_apply_changes(vty,
|
|
|
|
FRR_PIM_INTERFACE_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2017-04-11 03:01:53 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (ip_pim_bsm,
|
2019-05-02 17:04:47 +02:00
|
|
|
ip_pim_bsm_cmd,
|
|
|
|
"ip pim bsm",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2022-07-12 12:45:14 +02:00
|
|
|
"Enable BSM support on the interface\n")
|
2019-05-02 17:04:47 +02:00
|
|
|
{
|
2022-07-12 12:45:14 +02:00
|
|
|
return pim_process_bsm_cmd(vty);
|
2019-05-02 17:04:47 +02:00
|
|
|
}
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (no_ip_pim_bsm,
|
2019-05-02 17:04:47 +02:00
|
|
|
no_ip_pim_bsm_cmd,
|
|
|
|
"no ip pim bsm",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2022-07-12 12:45:14 +02:00
|
|
|
"Enable BSM support on the interface\n")
|
2019-05-02 17:04:47 +02:00
|
|
|
{
|
2022-07-12 12:45:14 +02:00
|
|
|
return pim_process_no_bsm_cmd(vty);
|
2019-05-02 17:04:47 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (ip_pim_ucast_bsm,
|
2019-05-02 17:04:47 +02:00
|
|
|
ip_pim_ucast_bsm_cmd,
|
|
|
|
"ip pim unicast-bsm",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Accept/Send unicast BSM on the interface\n")
|
|
|
|
{
|
2022-07-12 12:49:48 +02:00
|
|
|
return pim_process_unicast_bsm_cmd(vty);
|
2019-05-02 17:04:47 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG (no_ip_pim_ucast_bsm,
|
2019-05-02 17:04:47 +02:00
|
|
|
no_ip_pim_ucast_bsm_cmd,
|
|
|
|
"no ip pim unicast-bsm",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2022-07-12 12:49:48 +02:00
|
|
|
"Accept/Send unicast BSM on the interface\n")
|
2019-05-02 17:04:47 +02:00
|
|
|
{
|
2022-07-12 12:49:48 +02:00
|
|
|
return pim_process_no_unicast_bsm_cmd(vty);
|
2019-05-02 17:04:47 +02:00
|
|
|
}
|
|
|
|
|
2018-08-01 20:24:52 +02:00
|
|
|
#if HAVE_BFDD > 0
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG_HIDDEN (
|
2020-11-20 22:06:34 +01:00
|
|
|
ip_pim_bfd_param,
|
|
|
|
ip_pim_bfd_param_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip pim bfd (2-255) (1-65535) (1-65535)",
|
2020-11-20 22:06:34 +01:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Enables BFD support\n"
|
|
|
|
"Detect Multiplier\n"
|
|
|
|
"Required min receive interval\n"
|
|
|
|
"Desired min transmit interval\n")
|
2018-08-01 20:24:52 +02:00
|
|
|
#else
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFUN_YANG(
|
2020-11-20 22:06:34 +01:00
|
|
|
ip_pim_bfd_param,
|
|
|
|
ip_pim_bfd_param_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"ip pim bfd (2-255) (1-65535) (1-65535)",
|
2020-11-20 22:06:34 +01:00
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"Enables BFD support\n"
|
|
|
|
"Detect Multiplier\n"
|
|
|
|
"Required min receive interval\n"
|
|
|
|
"Desired min transmit interval\n")
|
2019-06-04 17:07:57 +02:00
|
|
|
#endif /* HAVE_BFDD */
|
2017-04-11 03:01:53 +02:00
|
|
|
{
|
|
|
|
int idx_number = 3;
|
|
|
|
int idx_number_2 = 4;
|
|
|
|
int idx_number_3 = 5;
|
2020-10-23 14:43:16 +02:00
|
|
|
const struct lyd_node *igmp_enable_dnode;
|
2017-04-11 03:01:53 +02:00
|
|
|
|
2021-05-04 16:41:58 +02:00
|
|
|
igmp_enable_dnode =
|
|
|
|
yang_dnode_getf(vty->candidate_config->dnode,
|
2022-01-04 12:26:06 +01:00
|
|
|
FRR_GMP_ENABLE_XPATH, VTY_CURR_XPATH,
|
|
|
|
"frr-routing:ipv4");
|
2020-10-23 14:43:16 +02:00
|
|
|
if (!igmp_enable_dnode)
|
|
|
|
nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2020-10-23 14:43:16 +02:00
|
|
|
else {
|
|
|
|
if (!yang_dnode_get_bool(igmp_enable_dnode, "."))
|
|
|
|
nb_cli_enqueue_change(vty, "./pim-enable", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2020-10-23 14:43:16 +02:00
|
|
|
}
|
2017-04-11 03:01:53 +02:00
|
|
|
|
2021-04-12 18:48:21 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd", NB_OP_CREATE, NULL);
|
2020-10-23 14:43:16 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd/min-rx-interval", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
argv[idx_number_2]->arg);
|
2020-10-23 14:43:16 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd/min-tx-interval", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
argv[idx_number_3]->arg);
|
2020-10-23 14:43:16 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./bfd/detect_mult", NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
argv[idx_number]->arg);
|
2020-10-23 14:43:16 +02:00
|
|
|
|
2021-12-15 14:24:51 +01:00
|
|
|
return nb_cli_apply_changes(vty,
|
|
|
|
FRR_PIM_INTERFACE_XPATH, "frr-routing:ipv4");
|
2017-04-11 03:01:53 +02:00
|
|
|
}
|
|
|
|
|
2018-08-01 20:24:52 +02:00
|
|
|
#if HAVE_BFDD == 0
|
2017-04-11 03:01:53 +02:00
|
|
|
ALIAS(no_ip_pim_bfd, no_ip_pim_bfd_param_cmd,
|
2021-08-04 00:22:09 +02:00
|
|
|
"no ip pim bfd (2-255) (1-65535) (1-65535)",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
2017-04-11 03:01:53 +02:00
|
|
|
"Enables BFD support\n"
|
|
|
|
"Detect Multiplier\n"
|
|
|
|
"Required min receive interval\n"
|
|
|
|
"Desired min transmit interval\n")
|
2018-08-01 20:24:52 +02:00
|
|
|
#endif /* !HAVE_BFDD */
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(pim_msdp_peer, pim_msdp_peer_cmd,
|
2024-06-12 18:26:48 +02:00
|
|
|
"msdp peer A.B.C.D$peer source A.B.C.D$source",
|
2021-04-22 20:10:07 +02:00
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"Peer IP address\n"
|
|
|
|
"Source address for TCP connection\n"
|
|
|
|
"Local IP address\n")
|
2016-10-25 19:59:48 +02:00
|
|
|
{
|
2020-10-23 16:29:44 +02:00
|
|
|
char msdp_peer_source_xpath[XPATH_MAXLEN];
|
2016-10-25 19:59:48 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(msdp_peer_source_xpath, sizeof(msdp_peer_source_xpath),
|
|
|
|
"./msdp-peer[peer-ip='%s']/source-ip", peer_str);
|
|
|
|
nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY,
|
|
|
|
source_str);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_msdp_peer,
|
|
|
|
ip_msdp_peer_cmd,
|
|
|
|
"ip msdp peer A.B.C.D$peer source A.B.C.D$source",
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"Peer IP address\n"
|
|
|
|
"Source address for TCP connection\n"
|
|
|
|
"Local IP address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char msdp_peer_source_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2016-10-25 19:59:48 +02:00
|
|
|
|
2020-10-23 16:29:44 +02:00
|
|
|
snprintf(msdp_peer_source_xpath, sizeof(msdp_peer_source_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./msdp-peer[peer-ip='%s']/source-ip", peer_str);
|
2020-10-23 16:29:44 +02:00
|
|
|
nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY,
|
2021-04-22 20:10:07 +02:00
|
|
|
source_str);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2016-10-25 19:59:48 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-10-25 19:59:48 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_peer_md5, msdp_peer_md5_cmd,
|
2022-12-07 14:13:39 +01:00
|
|
|
"msdp peer A.B.C.D$peer password WORD$psk",
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"MSDP Peer address\n"
|
|
|
|
"Use MD5 authentication\n"
|
|
|
|
"MD5 pre shared key\n")
|
|
|
|
{
|
|
|
|
const struct lyd_node *peer_node;
|
|
|
|
char xpath[XPATH_MAXLEN + 24];
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']",
|
|
|
|
VTY_CURR_XPATH, peer_str);
|
|
|
|
peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
|
|
|
|
if (peer_node == NULL) {
|
|
|
|
vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, "./authentication-type", NB_OP_MODIFY, "MD5");
|
|
|
|
nb_cli_enqueue_change(vty, "./authentication-key", NB_OP_MODIFY, psk);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, "%s", xpath);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_msdp_peer_md5, no_msdp_peer_md5_cmd,
|
2022-12-07 14:13:39 +01:00
|
|
|
"no msdp peer A.B.C.D$peer password [WORD]",
|
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"MSDP Peer address\n"
|
|
|
|
"Use MD5 authentication\n"
|
|
|
|
"MD5 pre shared key\n")
|
|
|
|
{
|
|
|
|
const struct lyd_node *peer_node;
|
|
|
|
char xpath[XPATH_MAXLEN + 24];
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']",
|
|
|
|
VTY_CURR_XPATH, peer_str);
|
|
|
|
peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
|
|
|
|
if (peer_node == NULL) {
|
|
|
|
vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, "./authentication-type", NB_OP_MODIFY,
|
|
|
|
"None");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, "%s", xpath);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(pim_msdp_timers, pim_msdp_timers_cmd,
|
2024-06-12 18:26:48 +02:00
|
|
|
"msdp timers (1-65535)$keepalive (1-65535)$holdtime [(1-65535)$connretry]",
|
2021-04-20 21:17:48 +02:00
|
|
|
CFG_MSDP_STR
|
|
|
|
"MSDP timers configuration\n"
|
|
|
|
"Keep alive period (in seconds)\n"
|
|
|
|
"Hold time period (in seconds)\n"
|
|
|
|
"Connection retry period (in seconds)\n")
|
|
|
|
{
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/hold-time", NB_OP_MODIFY,
|
|
|
|
holdtime_str);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/keep-alive", NB_OP_MODIFY,
|
|
|
|
keepalive_str);
|
|
|
|
if (connretry_str)
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry",
|
|
|
|
NB_OP_MODIFY, connretry_str);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry",
|
|
|
|
NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
nb_cli_apply_changes(vty, NULL);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_msdp_timers,
|
|
|
|
ip_msdp_timers_cmd,
|
|
|
|
"ip msdp timers (1-65535)$keepalive (1-65535)$holdtime [(1-65535)$connretry]",
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"MSDP timers configuration\n"
|
|
|
|
"Keep alive period (in seconds)\n"
|
|
|
|
"Hold time period (in seconds)\n"
|
|
|
|
"Connection retry period (in seconds)\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
2021-04-20 21:17:48 +02:00
|
|
|
const char *vrfname;
|
2024-06-12 18:26:48 +02:00
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2021-04-20 21:17:48 +02:00
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 21:17:48 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2021-04-20 21:17:48 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/hold-time", NB_OP_MODIFY,
|
|
|
|
holdtime_str);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/keep-alive", NB_OP_MODIFY,
|
|
|
|
keepalive_str);
|
2021-04-20 21:17:48 +02:00
|
|
|
if (connretry_str)
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry",
|
|
|
|
NB_OP_MODIFY, connretry_str);
|
2021-04-20 21:17:48 +02:00
|
|
|
else
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry",
|
|
|
|
NB_OP_DESTROY, NULL);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2021-04-20 21:17:48 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2021-04-20 21:17:48 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_pim_msdp_timers, no_pim_msdp_timers_cmd,
|
2024-06-12 18:26:48 +02:00
|
|
|
"no msdp timers [(1-65535) (1-65535) [(1-65535)]]",
|
2021-08-04 00:22:09 +02:00
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"MSDP timers configuration\n"
|
|
|
|
IGNORED_IN_NO_STR
|
|
|
|
IGNORED_IN_NO_STR
|
|
|
|
IGNORED_IN_NO_STR)
|
|
|
|
{
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/hold-time", NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/keep-alive", NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry", NB_OP_DESTROY,
|
|
|
|
NULL);
|
|
|
|
nb_cli_apply_changes(vty, NULL);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_msdp_timers,
|
|
|
|
no_ip_msdp_timers_cmd,
|
|
|
|
"no ip msdp timers [(1-65535) (1-65535) [(1-65535)]]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"MSDP timers configuration\n"
|
|
|
|
IGNORED_IN_NO_STR
|
|
|
|
IGNORED_IN_NO_STR
|
|
|
|
IGNORED_IN_NO_STR,
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
int ret;
|
2021-08-04 00:22:09 +02:00
|
|
|
const char *vrfname;
|
2024-06-12 18:26:48 +02:00
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2021-08-04 00:22:09 +02:00
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-08-04 00:22:09 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2021-08-04 00:22:09 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
nb_cli_enqueue_change(vty, "./msdp/hold-time", NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/keep-alive", NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, "./msdp/connection-retry", NB_OP_DESTROY,
|
|
|
|
NULL);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2021-08-04 00:22:09 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
2021-08-04 00:22:09 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
return ret;
|
2021-08-04 00:22:09 +02:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG (no_pim_msdp_peer,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_msdp_peer_cmd,
|
|
|
|
"no msdp peer A.B.C.D",
|
2016-11-12 14:39:44 +01:00
|
|
|
NO_STR
|
2016-10-25 19:59:48 +02:00
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP peer\n"
|
|
|
|
"peer ip address\n")
|
|
|
|
{
|
2020-10-23 16:29:44 +02:00
|
|
|
char msdp_peer_xpath[XPATH_MAXLEN];
|
2024-06-12 18:26:48 +02:00
|
|
|
|
|
|
|
snprintf(msdp_peer_xpath, sizeof(msdp_peer_xpath),
|
|
|
|
"./msdp-peer[peer-ip='%s']", peer_str);
|
|
|
|
nb_cli_enqueue_change(vty, msdp_peer_xpath, NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_msdp_peer,
|
|
|
|
no_ip_msdp_peer_cmd,
|
|
|
|
"no ip msdp peer A.B.C.D",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP peer\n"
|
|
|
|
"peer ip address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char msdp_peer_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2016-10-25 19:59:48 +02:00
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2020-10-23 16:29:44 +02:00
|
|
|
snprintf(msdp_peer_xpath, sizeof(msdp_peer_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./msdp-peer[peer-ip='%s']", peer_str);
|
2020-10-23 16:29:44 +02:00
|
|
|
nb_cli_enqueue_change(vty, msdp_peer_xpath, NB_OP_DESTROY, NULL);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_peer_sa_filter, msdp_peer_sa_filter_cmd,
|
2021-05-03 15:25:52 +02:00
|
|
|
"msdp peer A.B.C.D$peer sa-filter ACL_NAME$acl_name <in|out>$dir",
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"MSDP Peer address\n"
|
|
|
|
"SA access-list filter\n"
|
|
|
|
"SA access-list name\n"
|
|
|
|
"Filter incoming SAs\n"
|
|
|
|
"Filter outgoing SAs\n")
|
|
|
|
{
|
|
|
|
const struct lyd_node *peer_node;
|
|
|
|
char xpath[XPATH_MAXLEN + 24];
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']",
|
|
|
|
VTY_CURR_XPATH, peer_str);
|
|
|
|
peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
|
|
|
|
if (peer_node == NULL) {
|
|
|
|
vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(dir, "in") == 0)
|
|
|
|
nb_cli_enqueue_change(vty, "./sa-filter-in", NB_OP_MODIFY,
|
|
|
|
acl_name);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, "./sa-filter-out", NB_OP_MODIFY,
|
|
|
|
acl_name);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, "%s", xpath);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_msdp_peer_sa_filter, no_ip_msdp_peer_sa_filter_cmd,
|
2021-05-03 15:25:52 +02:00
|
|
|
"no msdp peer A.B.C.D$peer sa-filter ACL_NAME <in|out>$dir",
|
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"MSDP Peer address\n"
|
|
|
|
"SA access-list filter\n"
|
|
|
|
"SA access-list name\n"
|
|
|
|
"Filter incoming SAs\n"
|
|
|
|
"Filter outgoing SAs\n")
|
|
|
|
{
|
|
|
|
const struct lyd_node *peer_node;
|
|
|
|
char xpath[XPATH_MAXLEN + 24];
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']",
|
|
|
|
VTY_CURR_XPATH, peer_str);
|
|
|
|
peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
|
|
|
|
if (peer_node == NULL) {
|
|
|
|
vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strcmp(dir, "in") == 0)
|
|
|
|
nb_cli_enqueue_change(vty, "./sa-filter-in", NB_OP_DESTROY,
|
|
|
|
NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, "./sa-filter-out", NB_OP_DESTROY,
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, "%s", xpath);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(pim_msdp_mesh_group_member,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_msdp_mesh_group_member_cmd,
|
|
|
|
"msdp mesh-group WORD$gname member A.B.C.D$maddr",
|
2021-04-20 19:54:09 +02:00
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP mesh-group\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group member\n"
|
|
|
|
"Peer IP address\n")
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2021-04-20 19:54:09 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN];
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
/* Create mesh group. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
|
|
|
|
|
|
|
/* Create mesh group member. */
|
|
|
|
strlcat(xpath_value, "/members[address='", sizeof(xpath_value));
|
|
|
|
strlcat(xpath_value, maddr_str, sizeof(xpath_value));
|
|
|
|
strlcat(xpath_value, "']", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_msdp_mesh_group_member,
|
|
|
|
ip_msdp_mesh_group_member_cmd,
|
|
|
|
"ip msdp mesh-group WORD$gname member A.B.C.D$maddr",
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP mesh-group\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group member\n"
|
|
|
|
"Peer IP address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Create mesh group. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
2021-04-20 19:54:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Create mesh group member. */
|
|
|
|
strlcat(xpath_value, "/members[address='", sizeof(xpath_value));
|
|
|
|
strlcat(xpath_value, maddr_str, sizeof(xpath_value));
|
|
|
|
strlcat(xpath_value, "']", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_pim_msdp_mesh_group_member,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_msdp_mesh_group_member_cmd,
|
|
|
|
"no msdp mesh-group WORD$gname member A.B.C.D$maddr",
|
2021-04-20 19:54:09 +02:00
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group member\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group member\n"
|
|
|
|
"Peer IP address\n")
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2024-07-22 18:19:50 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN + 26];
|
2021-04-20 19:54:09 +02:00
|
|
|
char xpath_member_value[XPATH_MAXLEN];
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"%s/msdp-mesh-groups[name='%s']", VTY_CURR_XPATH, gname);
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value)) {
|
|
|
|
vty_out(vty, "%% mesh-group does not exist\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Remove mesh group member. */
|
|
|
|
strlcpy(xpath_member_value, xpath_value, sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "/members[address='",
|
|
|
|
sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, maddr_str, sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "']", sizeof(xpath_member_value));
|
|
|
|
if (!yang_dnode_exists(vty->candidate_config->dnode,
|
|
|
|
xpath_member_value)) {
|
|
|
|
vty_out(vty, "%% mesh-group member does not exist\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
2021-09-28 04:38:41 +02:00
|
|
|
nb_cli_enqueue_change(vty, xpath_member_value, NB_OP_DESTROY, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/*
|
|
|
|
* If this is the last member, then we must remove the group altogether
|
|
|
|
* to not break legacy CLI behaviour.
|
|
|
|
*/
|
|
|
|
pim_cli_legacy_mesh_group_behavior(vty, gname);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
2020-10-23 16:29:44 +02:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_msdp_mesh_group_member,
|
|
|
|
no_ip_msdp_mesh_group_member_cmd,
|
|
|
|
"no ip msdp mesh-group WORD$gname member A.B.C.D$maddr",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group member\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group member\n"
|
|
|
|
"Peer IP address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
2024-07-22 18:19:50 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN + 26];
|
2024-06-12 18:26:48 +02:00
|
|
|
char xpath_member_value[XPATH_MAXLEN];
|
|
|
|
int ret = CMD_WARNING_CONFIG_FAILED;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
|
|
|
"%s/msdp-mesh-groups[name='%s']", VTY_CURR_XPATH, gname);
|
|
|
|
|
|
|
|
if (yang_dnode_exists(vty->candidate_config->dnode, xpath_value)) {
|
|
|
|
/* Remove mesh group member. */
|
|
|
|
strlcpy(xpath_member_value, xpath_value,
|
|
|
|
sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "/members[address='",
|
|
|
|
sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, maddr_str,
|
|
|
|
sizeof(xpath_member_value));
|
|
|
|
strlcat(xpath_member_value, "']", sizeof(xpath_member_value));
|
|
|
|
if (yang_dnode_exists(vty->candidate_config->dnode,
|
|
|
|
xpath_member_value)) {
|
|
|
|
nb_cli_enqueue_change(vty, xpath_member_value,
|
|
|
|
NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the last member, then we must remove the group altogether
|
|
|
|
* to not break legacy CLI behaviour.
|
|
|
|
*/
|
|
|
|
pim_cli_legacy_mesh_group_behavior(vty, gname);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% mesh-group member does not exist\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% mesh-group does not exist\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(pim_msdp_mesh_group_source,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_msdp_mesh_group_source_cmd,
|
|
|
|
"msdp mesh-group WORD$gname source A.B.C.D$saddr",
|
2021-04-20 19:54:09 +02:00
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP mesh-group\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group local address\n"
|
|
|
|
"Source IP address for the TCP connection\n")
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2021-04-20 19:54:09 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN];
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
/* Create mesh group. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
|
|
|
|
|
|
|
/* Create mesh group source. */
|
|
|
|
strlcat(xpath_value, "/source", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, saddr_str);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(ip_pim_msdp_mesh_group_source,
|
|
|
|
ip_msdp_mesh_group_source_cmd,
|
|
|
|
"ip msdp mesh-group WORD$gname source A.B.C.D$saddr",
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP mesh-group\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group local address\n"
|
|
|
|
"Source IP address for the TCP connection\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Create mesh group. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
2021-04-20 19:54:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
2021-09-28 04:38:41 +02:00
|
|
|
/* Create mesh group source. */
|
2021-04-20 19:54:09 +02:00
|
|
|
strlcat(xpath_value, "/source", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, saddr_str);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_pim_msdp_mesh_group_source,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_msdp_mesh_group_source_cmd,
|
|
|
|
"no msdp mesh-group WORD$gname source [A.B.C.D]",
|
2021-04-20 19:54:09 +02:00
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group source\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group source\n"
|
|
|
|
"Mesh group local address\n")
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2021-04-20 19:54:09 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN];
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
|
|
|
|
|
|
|
/* Create mesh group source. */
|
|
|
|
strlcat(xpath_value, "/source", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is the last member, then we must remove the group altogether
|
|
|
|
* to not break legacy CLI behaviour.
|
|
|
|
*/
|
|
|
|
pim_cli_legacy_mesh_group_behavior(vty, gname);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_msdp_mesh_group_source,
|
|
|
|
no_ip_msdp_mesh_group_source_cmd,
|
|
|
|
"no ip msdp mesh-group WORD$gname source [A.B.C.D]",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group source\n"
|
|
|
|
"Mesh group name\n"
|
|
|
|
"Mesh group source\n"
|
|
|
|
"Mesh group local address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
2021-04-20 19:53:49 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
2024-06-12 18:26:48 +02:00
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
2021-04-20 19:53:49 +02:00
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
2024-06-12 18:26:48 +02:00
|
|
|
}
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./msdp-mesh-groups[name='%s']", gname);
|
2021-04-20 19:54:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_CREATE, NULL);
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2021-09-28 04:38:41 +02:00
|
|
|
/* Create mesh group source. */
|
2021-04-20 19:54:09 +02:00
|
|
|
strlcat(xpath_value, "/source", sizeof(xpath_value));
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/*
|
|
|
|
* If this is the last member, then we must remove the group altogether
|
|
|
|
* to not break legacy CLI behaviour.
|
|
|
|
*/
|
|
|
|
pim_cli_legacy_mesh_group_behavior(vty, gname);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2020-03-16 21:41:46 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2020-03-16 21:41:46 +01:00
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(no_pim_msdp_mesh_group,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_msdp_mesh_group_cmd,
|
|
|
|
"no msdp mesh-group WORD$gname",
|
2021-04-20 19:54:09 +02:00
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group\n"
|
2022-10-04 15:18:29 +02:00
|
|
|
"Mesh group name\n")
|
2020-03-16 21:41:46 +01:00
|
|
|
{
|
2024-07-22 18:19:50 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN + 26];
|
2020-03-16 21:41:46 +01:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
2024-06-12 18:26:48 +02:00
|
|
|
"%s/msdp-mesh-groups[name='%s']", VTY_CURR_XPATH, gname);
|
2021-04-20 19:54:09 +02:00
|
|
|
if (!yang_dnode_exists(vty->candidate_config->dnode, xpath_value))
|
|
|
|
return CMD_SUCCESS;
|
2020-10-23 16:29:44 +02:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
2020-10-23 16:29:44 +02:00
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(no_ip_pim_msdp_mesh_group,
|
|
|
|
no_ip_msdp_mesh_group_cmd,
|
|
|
|
"no ip msdp mesh-group WORD$gname",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Delete MSDP mesh-group\n"
|
|
|
|
"Mesh group name\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
2024-07-22 18:19:50 +02:00
|
|
|
char xpath_value[XPATH_MAXLEN + 26];
|
2024-06-12 18:26:48 +02:00
|
|
|
int ret = CMD_SUCCESS;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get mesh group base XPath. */
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value),
|
|
|
|
"%s/msdp-mesh-groups[name='%s']", VTY_CURR_XPATH, gname);
|
|
|
|
if (yang_dnode_exists(vty->candidate_config->dnode, xpath_value)) {
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_shutdown,
|
2022-05-10 13:23:33 +02:00
|
|
|
msdp_shutdown_cmd,
|
|
|
|
"[no] msdp shutdown",
|
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Shutdown MSDP operation\n")
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN];
|
|
|
|
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value), "./msdp/shutdown");
|
|
|
|
if (no)
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_peer_sa_limit, msdp_peer_sa_limit_cmd,
|
2022-05-10 13:23:40 +02:00
|
|
|
"[no] msdp peer A.B.C.D$peer sa-limit ![(1-4294967294)$sa_limit]",
|
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP peer\n"
|
|
|
|
"MSDP peer address\n"
|
|
|
|
"Limit amount of SA\n"
|
|
|
|
"Maximum number of SA\n")
|
|
|
|
{
|
|
|
|
const struct lyd_node *peer_node;
|
|
|
|
char xpath[XPATH_MAXLEN + 24];
|
|
|
|
|
|
|
|
snprintf(xpath, sizeof(xpath), "%s/msdp-peer[peer-ip='%s']", VTY_CURR_XPATH, peer_str);
|
|
|
|
peer_node = yang_dnode_get(vty->candidate_config->dnode, xpath);
|
|
|
|
if (peer_node == NULL) {
|
|
|
|
vty_out(vty, "%% MSDP peer %s not yet configured\n", peer_str);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_cli_enqueue_change(vty, "./sa-limit", NB_OP_MODIFY, sa_limit_str);
|
|
|
|
return nb_cli_apply_changes(vty, "%s", xpath);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_originator_id, msdp_originator_id_cmd,
|
2022-05-10 13:23:45 +02:00
|
|
|
"[no] msdp originator-id ![A.B.C.D$originator_id]",
|
|
|
|
NO_STR
|
|
|
|
CFG_MSDP_STR
|
|
|
|
"Configure MSDP RP originator\n"
|
|
|
|
"MSDP RP originator identifier\n")
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN];
|
|
|
|
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value), "./msdp/originator-id");
|
|
|
|
if (no)
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_DESTROY, NULL);
|
|
|
|
else
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, originator_id_str);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
static void ip_msdp_show_mesh_group(struct vty *vty, struct pim_msdp_mg *mg,
|
|
|
|
struct json_object *json)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
struct listnode *mbrnode;
|
|
|
|
struct pim_msdp_mg_mbr *mbr;
|
|
|
|
char mbr_str[INET_ADDRSTRLEN];
|
|
|
|
char src_str[INET_ADDRSTRLEN];
|
|
|
|
char state_str[PIM_MSDP_STATE_STRLEN];
|
|
|
|
enum pim_msdp_peer_state state;
|
|
|
|
json_object *json_mg_row = NULL;
|
2016-12-13 22:32:16 +01:00
|
|
|
json_object *json_members = NULL;
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
pim_inet4_dump("<source?>", mg->src_ip, src_str, sizeof(src_str));
|
2021-04-20 19:54:09 +02:00
|
|
|
if (json) {
|
2016-11-12 14:39:44 +01:00
|
|
|
/* currently there is only one mesh group but we should still
|
|
|
|
* make
|
2024-06-12 18:26:48 +02:00
|
|
|
* it a dict with mg-name as key
|
|
|
|
*/
|
2016-11-12 14:39:44 +01:00
|
|
|
json_mg_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_mg_row, "name",
|
|
|
|
mg->mesh_group_name);
|
|
|
|
json_object_string_add(json_mg_row, "source", src_str);
|
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Mesh group : %s\n", mg->mesh_group_name);
|
|
|
|
vty_out(vty, " Source : %s\n", src_str);
|
|
|
|
vty_out(vty, " Member State\n");
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbrnode, mbr)) {
|
|
|
|
pim_inet4_dump("<mbr?>", mbr->mbr_ip, mbr_str, sizeof(mbr_str));
|
|
|
|
if (mbr->mp) {
|
|
|
|
state = mbr->mp->state;
|
|
|
|
} else {
|
|
|
|
state = PIM_MSDP_DISABLED;
|
|
|
|
}
|
|
|
|
pim_msdp_state_dump(state, state_str, sizeof(state_str));
|
2021-04-20 19:54:09 +02:00
|
|
|
if (json) {
|
2016-11-12 14:39:44 +01:00
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "member", mbr_str);
|
|
|
|
json_object_string_add(json_row, "state", state_str);
|
2016-12-13 22:32:16 +01:00
|
|
|
if (!json_members) {
|
|
|
|
json_members = json_object_new_object();
|
|
|
|
json_object_object_add(json_mg_row, "members",
|
|
|
|
json_members);
|
|
|
|
}
|
|
|
|
json_object_object_add(json_members, mbr_str, json_row);
|
2016-11-12 14:39:44 +01:00
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, " %-15s %11s\n", mbr_str, state_str);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-04-20 19:54:09 +02:00
|
|
|
if (json)
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_object_add(json, mg->mesh_group_name, json_mg_row);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_msdp_mesh_group,
|
|
|
|
show_ip_msdp_mesh_group_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip msdp [vrf NAME] mesh-group [json]",
|
2016-11-12 14:39:44 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
2017-05-23 13:34:19 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-11-12 14:39:44 +01:00
|
|
|
"MSDP mesh-group information\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2021-04-20 19:54:09 +02:00
|
|
|
struct pim_msdp_mg *mg;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2022-08-15 17:43:27 +02:00
|
|
|
struct pim_instance *pim;
|
2021-04-20 19:54:09 +02:00
|
|
|
struct json_object *json = NULL;
|
2017-05-23 00:14:43 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2022-08-15 17:43:27 +02:00
|
|
|
pim = vrf->info;
|
2021-04-20 19:54:09 +02:00
|
|
|
/* Quick case: list is empty. */
|
|
|
|
if (SLIST_EMPTY(&pim->msdp.mglist)) {
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
|
|
|
|
SLIST_FOREACH (mg, &pim->msdp.mglist, mg_entry)
|
|
|
|
ip_msdp_show_mesh_group(vty, mg, json);
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
DEFUN (show_ip_msdp_mesh_group_vrf_all,
|
|
|
|
show_ip_msdp_mesh_group_vrf_all_cmd,
|
|
|
|
"show ip msdp vrf all mesh-group [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"MSDP mesh-group information\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2021-04-20 19:54:09 +02:00
|
|
|
struct json_object *json = NULL, *vrf_json = NULL;
|
|
|
|
struct pim_instance *pim;
|
|
|
|
struct pim_msdp_mg *mg;
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
|
|
|
|
if (uj)
|
2021-04-20 19:54:09 +02:00
|
|
|
json = json_object_new_object();
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
2021-04-20 19:54:09 +02:00
|
|
|
vrf_json = json_object_new_object();
|
|
|
|
json_object_object_add(json, vrf->name, vrf_json);
|
2017-07-07 17:29:53 +02:00
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
2021-04-20 19:54:09 +02:00
|
|
|
|
|
|
|
pim = vrf->info;
|
|
|
|
SLIST_FOREACH (mg, &pim->msdp.mglist, mg_entry)
|
|
|
|
ip_msdp_show_mesh_group(vty, mg, vrf_json);
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
bool uj)
|
2016-10-25 19:59:48 +02:00
|
|
|
{
|
|
|
|
struct listnode *mpnode;
|
|
|
|
struct pim_msdp_peer *mp;
|
|
|
|
char peer_str[INET_ADDRSTRLEN];
|
|
|
|
char local_str[INET_ADDRSTRLEN];
|
|
|
|
char state_str[PIM_MSDP_STATE_STRLEN];
|
|
|
|
char timebuf[PIM_MSDP_UPTIME_STRLEN];
|
|
|
|
int64_t now;
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
|
2016-10-25 19:59:48 +02:00
|
|
|
if (uj) {
|
2016-11-12 14:39:44 +01:00
|
|
|
json = json_object_new_object();
|
2016-10-25 19:59:48 +02:00
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Peer Local State Uptime SaCnt\n");
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
|
2016-11-12 14:39:44 +01:00
|
|
|
if (mp->state == PIM_MSDP_ESTABLISHED) {
|
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
pim_time_uptime(timebuf, sizeof(timebuf),
|
|
|
|
now - mp->uptime);
|
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(timebuf, "-", sizeof(timebuf));
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
|
|
|
|
pim_inet4_dump("<local?>", mp->local, local_str,
|
|
|
|
sizeof(local_str));
|
|
|
|
pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
|
|
|
|
if (uj) {
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "peer", peer_str);
|
|
|
|
json_object_string_add(json_row, "local", local_str);
|
|
|
|
json_object_string_add(json_row, "state", state_str);
|
|
|
|
json_object_string_add(json_row, "upTime", timebuf);
|
2016-11-21 19:20:00 +01:00
|
|
|
json_object_int_add(json_row, "saCount", mp->sa_cnt);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_object_add(json, peer_str, json_row);
|
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "%-15s %15s %11s %8s %6d\n", peer_str,
|
2016-11-21 19:20:00 +01:00
|
|
|
local_str, state_str, timebuf, mp->sa_cnt);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-10-25 19:59:48 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
const char *peer, bool uj)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
struct listnode *mpnode;
|
|
|
|
struct pim_msdp_peer *mp;
|
|
|
|
char peer_str[INET_ADDRSTRLEN];
|
|
|
|
char local_str[INET_ADDRSTRLEN];
|
|
|
|
char state_str[PIM_MSDP_STATE_STRLEN];
|
|
|
|
char timebuf[PIM_MSDP_UPTIME_STRLEN];
|
|
|
|
char katimer[PIM_MSDP_TIMER_STRLEN];
|
|
|
|
char crtimer[PIM_MSDP_TIMER_STRLEN];
|
|
|
|
char holdtimer[PIM_MSDP_TIMER_STRLEN];
|
|
|
|
int64_t now;
|
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) {
|
2016-11-12 14:39:44 +01:00
|
|
|
pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
|
|
|
|
if (strcmp(peer, "detail") && strcmp(peer, peer_str))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (mp->state == PIM_MSDP_ESTABLISHED) {
|
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
pim_time_uptime(timebuf, sizeof(timebuf),
|
|
|
|
now - mp->uptime);
|
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(timebuf, "-", sizeof(timebuf));
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
pim_inet4_dump("<local?>", mp->local, local_str,
|
|
|
|
sizeof(local_str));
|
|
|
|
pim_msdp_state_dump(mp->state, state_str, sizeof(state_str));
|
|
|
|
pim_time_timer_to_hhmmss(katimer, sizeof(katimer),
|
|
|
|
mp->ka_timer);
|
|
|
|
pim_time_timer_to_hhmmss(crtimer, sizeof(crtimer),
|
|
|
|
mp->cr_timer);
|
|
|
|
pim_time_timer_to_hhmmss(holdtimer, sizeof(holdtimer),
|
|
|
|
mp->hold_timer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (uj) {
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "peer", peer_str);
|
|
|
|
json_object_string_add(json_row, "local", local_str);
|
2021-05-06 16:41:08 +02:00
|
|
|
if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
|
|
|
|
json_object_string_add(json_row,
|
|
|
|
"meshGroupName",
|
|
|
|
mp->mesh_group_name);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_string_add(json_row, "state", state_str);
|
|
|
|
json_object_string_add(json_row, "upTime", timebuf);
|
2016-11-21 19:20:00 +01:00
|
|
|
json_object_string_add(json_row, "keepAliveTimer",
|
|
|
|
katimer);
|
|
|
|
json_object_string_add(json_row, "connRetryTimer",
|
|
|
|
crtimer);
|
|
|
|
json_object_string_add(json_row, "holdTimer",
|
|
|
|
holdtimer);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_string_add(json_row, "lastReset",
|
|
|
|
mp->last_reset);
|
|
|
|
json_object_int_add(json_row, "connAttempts",
|
|
|
|
mp->conn_attempts);
|
2016-11-21 19:20:00 +01:00
|
|
|
json_object_int_add(json_row, "establishedChanges",
|
|
|
|
mp->est_flaps);
|
|
|
|
json_object_int_add(json_row, "saCount", mp->sa_cnt);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_int_add(json_row, "kaSent", mp->ka_tx_cnt);
|
|
|
|
json_object_int_add(json_row, "kaRcvd", mp->ka_rx_cnt);
|
|
|
|
json_object_int_add(json_row, "saSent", mp->sa_tx_cnt);
|
|
|
|
json_object_int_add(json_row, "saRcvd", mp->sa_rx_cnt);
|
|
|
|
json_object_object_add(json, peer_str, json_row);
|
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Peer : %s\n", peer_str);
|
|
|
|
vty_out(vty, " Local : %s\n", local_str);
|
2021-05-06 16:41:08 +02:00
|
|
|
if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
|
|
|
|
vty_out(vty, " Mesh Group : %s\n",
|
|
|
|
mp->mesh_group_name);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, " State : %s\n", state_str);
|
|
|
|
vty_out(vty, " Uptime : %s\n", timebuf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, " Keepalive Timer : %s\n", katimer);
|
|
|
|
vty_out(vty, " Conn Retry Timer : %s\n", crtimer);
|
|
|
|
vty_out(vty, " Hold Timer : %s\n", holdtimer);
|
|
|
|
vty_out(vty, " Last Reset : %s\n",
|
|
|
|
mp->last_reset);
|
|
|
|
vty_out(vty, " Conn Attempts : %d\n",
|
|
|
|
mp->conn_attempts);
|
|
|
|
vty_out(vty, " Established Changes : %d\n",
|
|
|
|
mp->est_flaps);
|
|
|
|
vty_out(vty, " SA Count : %d\n",
|
|
|
|
mp->sa_cnt);
|
|
|
|
vty_out(vty, " Statistics :\n");
|
|
|
|
vty_out(vty,
|
|
|
|
" Sent Rcvd\n");
|
|
|
|
vty_out(vty, " Keepalives : %10d %10d\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
mp->ka_tx_cnt, mp->ka_rx_cnt);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, " SAs : %10d %10d\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
mp->sa_tx_cnt, mp->sa_rx_cnt);
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_msdp_peer_detail,
|
|
|
|
show_ip_msdp_peer_detail_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip msdp [vrf NAME] peer [detail|A.B.C.D] [json]",
|
2016-10-25 19:59:48 +02:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
2017-05-23 13:34:19 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-10-25 19:59:48 +02:00
|
|
|
"MSDP peer information\n"
|
2016-11-12 14:39:44 +01:00
|
|
|
"Detailed output\n"
|
|
|
|
"peer ip address\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-10-25 19:59:48 +02:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-19 21:02:35 +02:00
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
char *arg = NULL;
|
|
|
|
|
|
|
|
if (argv_find(argv, argc, "detail", &idx))
|
|
|
|
arg = argv[idx]->text;
|
|
|
|
else if (argv_find(argv, argc, "A.B.C.D", &idx))
|
|
|
|
arg = argv[idx]->arg;
|
|
|
|
|
|
|
|
if (arg)
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_peers_detail(vrf->info, vty, argv[idx]->arg, uj);
|
2016-11-12 14:39:44 +01:00
|
|
|
else
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_peers(vrf->info, vty, uj);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-25 19:59:48 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
DEFUN (show_ip_msdp_peer_detail_vrf_all,
|
|
|
|
show_ip_msdp_peer_detail_vrf_all_cmd,
|
|
|
|
"show ip msdp vrf all peer [detail|A.B.C.D] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"MSDP peer information\n"
|
|
|
|
"Detailed output\n"
|
|
|
|
"peer ip address\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
|
|
|
int idx = 2;
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
if (argv_find(argv, argc, "detail", &idx)
|
|
|
|
|| argv_find(argv, argc, "A.B.C.D", &idx))
|
|
|
|
ip_msdp_show_peers_detail(vrf->info, vty,
|
|
|
|
argv[idx]->arg, uj);
|
|
|
|
else
|
|
|
|
ip_msdp_show_peers(vrf->info, vty, uj);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-09-04 19:39:04 +02:00
|
|
|
static void ip_msdp_show_sa(struct pim_instance *pim, struct vty *vty, bool uj)
|
2016-10-31 20:29:17 +01:00
|
|
|
{
|
|
|
|
struct listnode *sanode;
|
|
|
|
struct pim_msdp_sa *sa;
|
|
|
|
char rp_str[INET_ADDRSTRLEN];
|
|
|
|
char timebuf[PIM_MSDP_UPTIME_STRLEN];
|
2016-11-12 14:39:44 +01:00
|
|
|
char spt_str[8];
|
2016-11-16 00:39:11 +01:00
|
|
|
char local_str[8];
|
2016-10-31 20:29:17 +01:00
|
|
|
int64_t now;
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object *json = NULL;
|
|
|
|
json_object *json_group = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-31 20:29:17 +01:00
|
|
|
if (uj) {
|
2016-11-12 14:39:44 +01:00
|
|
|
json = json_object_new_object();
|
2016-10-31 20:29:17 +01:00
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Source Group RP Local SPT Uptime\n");
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
|
2016-11-12 14:39:44 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
pim_time_uptime(timebuf, sizeof(timebuf), now - sa->uptime);
|
2016-11-16 00:39:11 +01:00
|
|
|
if (sa->flags & PIM_MSDP_SAF_PEER) {
|
2016-11-12 14:39:44 +01:00
|
|
|
pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
|
|
|
|
if (sa->up) {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(spt_str, "yes", sizeof(spt_str));
|
2016-10-31 20:29:17 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(spt_str, "no", sizeof(spt_str));
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2016-11-16 00:39:11 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(rp_str, "-", sizeof(rp_str));
|
|
|
|
strlcpy(spt_str, "-", sizeof(spt_str));
|
2016-11-16 00:39:11 +01:00
|
|
|
}
|
|
|
|
if (sa->flags & PIM_MSDP_SAF_LOCAL) {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(local_str, "yes", sizeof(local_str));
|
2016-11-16 00:39:11 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(local_str, "no", sizeof(local_str));
|
2016-11-16 00:39:11 +01:00
|
|
|
}
|
|
|
|
if (uj) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
|
|
|
|
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
|
|
|
|
&sa->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs",
|
|
|
|
&sa->sg.src);
|
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_object_get_ex(json, grp_str, &json_group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (!json_group) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
json_object_object_add(json, grp_str,
|
|
|
|
json_group);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "source", src_str);
|
|
|
|
json_object_string_add(json_row, "group", grp_str);
|
|
|
|
json_object_string_add(json_row, "rp", rp_str);
|
2016-11-16 00:39:11 +01:00
|
|
|
json_object_string_add(json_row, "local", local_str);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_string_add(json_row, "sptSetup", spt_str);
|
|
|
|
json_object_string_add(json_row, "upTime", timebuf);
|
|
|
|
json_object_object_add(json_group, src_str, json_row);
|
|
|
|
} else {
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-15pPAs %15pPAs %15s %5c %3c %8s\n",
|
|
|
|
&sa->sg.src, &sa->sg.grp, rp_str, local_str[0],
|
2017-06-21 05:10:57 +02:00
|
|
|
spt_str[0], timebuf);
|
2016-10-31 20:29:17 +01:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-10-31 20:29:17 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa,
|
|
|
|
const char *src_str,
|
|
|
|
const char *grp_str, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
bool uj, json_object *json)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
char rp_str[INET_ADDRSTRLEN];
|
|
|
|
char peer_str[INET_ADDRSTRLEN];
|
|
|
|
char timebuf[PIM_MSDP_UPTIME_STRLEN];
|
|
|
|
char spt_str[8];
|
2016-11-16 00:39:11 +01:00
|
|
|
char local_str[8];
|
2016-11-12 14:39:44 +01:00
|
|
|
char statetimer[PIM_MSDP_TIMER_STRLEN];
|
|
|
|
int64_t now;
|
|
|
|
json_object *json_group = NULL;
|
|
|
|
json_object *json_row = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
now = pim_time_monotonic_sec();
|
|
|
|
pim_time_uptime(timebuf, sizeof(timebuf), now - sa->uptime);
|
2016-11-16 00:39:11 +01:00
|
|
|
if (sa->flags & PIM_MSDP_SAF_PEER) {
|
2016-11-12 14:39:44 +01:00
|
|
|
pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
|
|
|
|
pim_inet4_dump("<peer?>", sa->peer, peer_str, sizeof(peer_str));
|
|
|
|
if (sa->up) {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(spt_str, "yes", sizeof(spt_str));
|
2016-11-12 14:39:44 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(spt_str, "no", sizeof(spt_str));
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
2016-11-16 00:39:11 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(rp_str, "-", sizeof(rp_str));
|
|
|
|
strlcpy(peer_str, "-", sizeof(peer_str));
|
|
|
|
strlcpy(spt_str, "-", sizeof(spt_str));
|
2016-11-16 00:39:11 +01:00
|
|
|
}
|
|
|
|
if (sa->flags & PIM_MSDP_SAF_LOCAL) {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(local_str, "yes", sizeof(local_str));
|
2016-11-16 00:39:11 +01:00
|
|
|
} else {
|
2019-05-06 23:26:15 +02:00
|
|
|
strlcpy(local_str, "no", sizeof(local_str));
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
pim_time_timer_to_hhmmss(statetimer, sizeof(statetimer),
|
|
|
|
sa->sa_state_timer);
|
|
|
|
if (uj) {
|
|
|
|
json_object_object_get_ex(json, grp_str, &json_group);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (!json_group) {
|
|
|
|
json_group = json_object_new_object();
|
|
|
|
json_object_object_add(json, grp_str, json_group);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "source", src_str);
|
|
|
|
json_object_string_add(json_row, "group", grp_str);
|
|
|
|
json_object_string_add(json_row, "rp", rp_str);
|
2016-11-16 00:39:11 +01:00
|
|
|
json_object_string_add(json_row, "local", local_str);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_string_add(json_row, "sptSetup", spt_str);
|
|
|
|
json_object_string_add(json_row, "upTime", timebuf);
|
2016-11-21 19:20:00 +01:00
|
|
|
json_object_string_add(json_row, "stateTimer", statetimer);
|
2016-11-12 14:39:44 +01:00
|
|
|
json_object_object_add(json_group, src_str, json_row);
|
|
|
|
} else {
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "SA : %s\n", sa->sg_str);
|
|
|
|
vty_out(vty, " RP : %s\n", rp_str);
|
|
|
|
vty_out(vty, " Peer : %s\n", peer_str);
|
|
|
|
vty_out(vty, " Local : %s\n", local_str);
|
|
|
|
vty_out(vty, " SPT Setup : %s\n", spt_str);
|
|
|
|
vty_out(vty, " Uptime : %s\n", timebuf);
|
|
|
|
vty_out(vty, " State Timer : %s\n", statetimer);
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_msdp_show_sa_detail(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
bool uj)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
struct listnode *sanode;
|
|
|
|
struct pim_msdp_sa *sa;
|
|
|
|
json_object *json = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
|
|
|
|
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
|
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty, uj,
|
|
|
|
json);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_msdp_sa_detail,
|
|
|
|
show_ip_msdp_sa_detail_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip msdp [vrf NAME] sa detail [json]",
|
2016-10-31 20:29:17 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
2017-05-23 13:34:19 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-10-31 20:29:17 +01:00
|
|
|
"MSDP active-source information\n"
|
2016-11-12 14:39:44 +01:00
|
|
|
"Detailed output\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-10-31 20:29:17 +01:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2023-04-10 23:34:35 +02:00
|
|
|
struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 00:14:43 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_sa_detail(vrf->info, vty, uj);
|
2016-11-12 14:39:44 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-07-07 17:29:53 +02:00
|
|
|
DEFUN (show_ip_msdp_sa_detail_vrf_all,
|
|
|
|
show_ip_msdp_sa_detail_vrf_all_cmd,
|
|
|
|
"show ip msdp vrf all sa detail [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"MSDP active-source information\n"
|
|
|
|
"Detailed output\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2017-07-07 17:29:53 +02:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-07-07 17:29:53 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-07-07 17:29:53 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
ip_msdp_show_sa_detail(vrf->info, vty, uj);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_msdp_show_sa_addr(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
const char *addr, bool uj)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
struct listnode *sanode;
|
|
|
|
struct pim_msdp_sa *sa;
|
|
|
|
json_object *json = NULL;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
}
|
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
|
|
|
|
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
|
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (!strcmp(addr, src_str) || !strcmp(addr, grp_str)) {
|
|
|
|
ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty,
|
|
|
|
uj, json);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
2017-06-29 16:45:38 +02:00
|
|
|
static void ip_msdp_show_sa_sg(struct pim_instance *pim, struct vty *vty,
|
2018-09-04 19:39:04 +02:00
|
|
|
const char *src, const char *grp, bool uj)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
|
|
|
struct listnode *sanode;
|
|
|
|
struct pim_msdp_sa *sa;
|
|
|
|
json_object *json = NULL;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
}
|
|
|
|
|
2017-05-23 00:14:43 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim->msdp.sa_list, sanode, sa)) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
|
|
|
|
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &sa->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs", &sa->sg.src);
|
|
|
|
|
2016-11-12 14:39:44 +01:00
|
|
|
if (!strcmp(src, src_str) && !strcmp(grp, grp_str)) {
|
|
|
|
ip_msdp_show_sa_entry_detail(sa, src_str, grp_str, vty,
|
|
|
|
uj, json);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2016-11-12 14:39:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_msdp_sa_sg,
|
|
|
|
show_ip_msdp_sa_sg_cmd,
|
2017-06-20 19:47:59 +02:00
|
|
|
"show ip msdp [vrf NAME] sa [A.B.C.D [A.B.C.D]] [json]",
|
2016-11-12 14:39:44 +01:00
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
2017-05-23 13:34:19 +02:00
|
|
|
VRF_CMD_HELP_STR
|
2016-11-12 14:39:44 +01:00
|
|
|
"MSDP active-source information\n"
|
|
|
|
"source or group ip\n"
|
2017-01-25 15:13:46 +01:00
|
|
|
"group ip\n"
|
2017-07-25 14:19:57 +02:00
|
|
|
JSON_STR)
|
2016-11-12 14:39:44 +01:00
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-05-23 00:14:43 +02:00
|
|
|
struct vrf *vrf;
|
2017-05-23 14:57:11 +02:00
|
|
|
int idx = 2;
|
2017-05-23 00:14:43 +02:00
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2017-05-23 00:14:43 +02:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
2017-04-17 21:21:06 +02:00
|
|
|
char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ? argv[idx++]->arg
|
2020-11-20 22:06:34 +01:00
|
|
|
: NULL;
|
2017-04-17 21:21:06 +02:00
|
|
|
char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx)
|
2020-11-20 22:06:34 +01:00
|
|
|
? argv[idx]->arg
|
|
|
|
: NULL;
|
2017-04-17 21:21:06 +02:00
|
|
|
|
|
|
|
if (src_ip && grp_ip)
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_sa_sg(vrf->info, vty, src_ip, grp_ip, uj);
|
2017-04-17 21:21:06 +02:00
|
|
|
else if (src_ip)
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_sa_addr(vrf->info, vty, src_ip, uj);
|
2016-11-12 14:39:44 +01:00
|
|
|
else
|
2017-06-29 16:45:38 +02:00
|
|
|
ip_msdp_show_sa(vrf->info, vty, uj);
|
2016-10-31 20:29:17 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2017-08-15 22:37:13 +02:00
|
|
|
DEFUN (show_ip_msdp_sa_sg_vrf_all,
|
|
|
|
show_ip_msdp_sa_sg_vrf_all_cmd,
|
|
|
|
"show ip msdp vrf all sa [A.B.C.D [A.B.C.D]] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
MSDP_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"MSDP active-source information\n"
|
|
|
|
"source or group ip\n"
|
|
|
|
"group ip\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
2018-08-29 14:19:54 +02:00
|
|
|
bool uj = use_json(argc, argv);
|
2017-08-15 22:37:13 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
bool first = true;
|
|
|
|
int idx = 2;
|
|
|
|
|
|
|
|
char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ? argv[idx++]->arg
|
2020-11-20 22:06:34 +01:00
|
|
|
: NULL;
|
2017-08-15 22:37:13 +02:00
|
|
|
char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx)
|
2020-11-20 22:06:34 +01:00
|
|
|
? argv[idx]->arg
|
|
|
|
: NULL;
|
2017-08-15 22:37:13 +02:00
|
|
|
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "{ ");
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
|
2017-08-15 22:37:13 +02:00
|
|
|
if (uj) {
|
|
|
|
if (!first)
|
|
|
|
vty_out(vty, ", ");
|
|
|
|
vty_out(vty, " \"%s\": ", vrf->name);
|
|
|
|
first = false;
|
|
|
|
} else
|
|
|
|
vty_out(vty, "VRF: %s\n", vrf->name);
|
|
|
|
|
|
|
|
if (src_ip && grp_ip)
|
|
|
|
ip_msdp_show_sa_sg(vrf->info, vty, src_ip, grp_ip, uj);
|
|
|
|
else if (src_ip)
|
|
|
|
ip_msdp_show_sa_addr(vrf->info, vty, src_ip, uj);
|
|
|
|
else
|
|
|
|
ip_msdp_show_sa(vrf->info, vty, uj);
|
|
|
|
}
|
|
|
|
if (uj)
|
|
|
|
vty_out(vty, "}\n");
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd,
|
2022-05-26 14:54:27 +02:00
|
|
|
"[no] msdp log neighbor-events",
|
|
|
|
NO_STR
|
|
|
|
MSDP_STR
|
|
|
|
"MSDP log messages\n"
|
|
|
|
"MSDP log neighbor event messages\n")
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN + 32];
|
|
|
|
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-neighbor-events", VTY_CURR_XPATH);
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG(msdp_log_sa_changes, msdp_log_sa_changes_cmd,
|
2022-05-26 16:22:27 +02:00
|
|
|
"[no] msdp log sa-events",
|
|
|
|
NO_STR
|
|
|
|
MSDP_STR
|
|
|
|
"MSDP log messages\n"
|
|
|
|
"MSDP log SA event messages\n")
|
|
|
|
{
|
|
|
|
char xpath_value[XPATH_MAXLEN + 32];
|
|
|
|
|
|
|
|
snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-sa-events", VTY_CURR_XPATH);
|
|
|
|
nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true");
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-03-25 01:39:22 +01:00
|
|
|
struct pim_sg_cache_walk_data {
|
|
|
|
struct vty *vty;
|
|
|
|
json_object *json;
|
|
|
|
json_object *json_group;
|
|
|
|
struct in_addr addr;
|
|
|
|
bool addr_match;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct pim_sg_cache_walk_data *cwd)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
|
|
|
struct vty *vty = cwd->vty;
|
|
|
|
json_object *json = cwd->json;
|
|
|
|
json_object *json_row;
|
2019-07-01 19:26:05 +02:00
|
|
|
bool installed = (vxlan_sg->up) ? true : false;
|
2019-03-25 01:39:22 +01:00
|
|
|
const char *iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
|
|
|
|
const char *oif_name;
|
|
|
|
|
|
|
|
if (pim_vxlan_is_orig_mroute(vxlan_sg))
|
|
|
|
oif_name = vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
|
|
|
|
else
|
|
|
|
oif_name = vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
|
|
|
|
|
2022-01-05 15:55:00 +01:00
|
|
|
if (cwd->addr_match && pim_addr_cmp(vxlan_sg->sg.src, cwd->addr) &&
|
|
|
|
pim_addr_cmp(vxlan_sg->sg.grp, cwd->addr)) {
|
2019-03-25 01:39:22 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (json) {
|
2022-01-05 19:12:12 +01:00
|
|
|
char src_str[PIM_ADDRSTRLEN];
|
|
|
|
char grp_str[PIM_ADDRSTRLEN];
|
|
|
|
|
|
|
|
snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
|
|
|
|
&vxlan_sg->sg.grp);
|
|
|
|
snprintfrr(src_str, sizeof(src_str), "%pPAs",
|
|
|
|
&vxlan_sg->sg.src);
|
|
|
|
|
2019-03-25 01:39:22 +01:00
|
|
|
json_object_object_get_ex(json, grp_str, &cwd->json_group);
|
|
|
|
|
|
|
|
if (!cwd->json_group) {
|
|
|
|
cwd->json_group = json_object_new_object();
|
|
|
|
json_object_object_add(json, grp_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
cwd->json_group);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
json_row = json_object_new_object();
|
|
|
|
json_object_string_add(json_row, "source", src_str);
|
|
|
|
json_object_string_add(json_row, "group", grp_str);
|
|
|
|
json_object_string_add(json_row, "input", iif_name);
|
|
|
|
json_object_string_add(json_row, "output", oif_name);
|
|
|
|
if (installed)
|
|
|
|
json_object_boolean_true_add(json_row, "installed");
|
|
|
|
else
|
|
|
|
json_object_boolean_false_add(json_row, "installed");
|
|
|
|
json_object_object_add(cwd->json_group, src_str, json_row);
|
|
|
|
} else {
|
2022-01-05 19:12:12 +01:00
|
|
|
vty_out(vty, "%-15pPAs %-15pPAs %-15s %-15s %-5s\n",
|
|
|
|
&vxlan_sg->sg.src, &vxlan_sg->sg.grp, iif_name,
|
|
|
|
oif_name, installed ? "I" : "");
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-02 17:11:25 +01:00
|
|
|
static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *bucket, void *arg)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
2021-02-02 17:11:25 +01:00
|
|
|
pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)bucket->data,
|
2020-11-20 22:06:34 +01:00
|
|
|
(struct pim_sg_cache_walk_data *)arg);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_vxlan_sg(struct pim_instance *pim,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty, bool uj)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
|
|
|
json_object *json = NULL;
|
|
|
|
struct pim_sg_cache_walk_data cwd;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Codes: I -> installed\n");
|
|
|
|
vty_out(vty,
|
|
|
|
"Source Group Input Output Flags\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&cwd, 0, sizeof(cwd));
|
|
|
|
cwd.vty = vty;
|
|
|
|
cwd.json = json;
|
|
|
|
hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_vxlan_sg_match_addr(struct pim_instance *pim,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty, char *addr_str,
|
|
|
|
bool uj)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
|
|
|
json_object *json = NULL;
|
|
|
|
struct pim_sg_cache_walk_data cwd;
|
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
memset(&cwd, 0, sizeof(cwd));
|
|
|
|
result = inet_pton(AF_INET, addr_str, &cwd.addr);
|
|
|
|
if (result <= 0) {
|
|
|
|
vty_out(vty, "Bad address %s: errno=%d: %s\n", addr_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
errno, safe_strerror(errno));
|
2019-03-25 01:39:22 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Codes: I -> installed\n");
|
|
|
|
vty_out(vty,
|
|
|
|
"Source Group Input Output Flags\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
cwd.vty = vty;
|
|
|
|
cwd.json = json;
|
2019-07-01 19:26:05 +02:00
|
|
|
cwd.addr_match = true;
|
2019-03-25 01:39:22 +01:00
|
|
|
hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_vxlan_sg_one(struct pim_instance *pim,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty, char *src_str, char *grp_str,
|
|
|
|
bool uj)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
|
|
|
json_object *json = NULL;
|
2022-01-04 17:54:44 +01:00
|
|
|
pim_sgaddr sg;
|
2019-03-25 01:39:22 +01:00
|
|
|
int result = 0;
|
|
|
|
struct pim_vxlan_sg *vxlan_sg;
|
|
|
|
const char *iif_name;
|
|
|
|
bool installed;
|
|
|
|
const char *oif_name;
|
|
|
|
|
|
|
|
result = inet_pton(AF_INET, src_str, &sg.src);
|
|
|
|
if (result <= 0) {
|
|
|
|
vty_out(vty, "Bad src address %s: errno=%d: %s\n", src_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
errno, safe_strerror(errno));
|
2019-03-25 01:39:22 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
result = inet_pton(AF_INET, grp_str, &sg.grp);
|
|
|
|
if (result <= 0) {
|
|
|
|
vty_out(vty, "Bad grp address %s: errno=%d: %s\n", grp_str,
|
2020-11-20 22:06:34 +01:00
|
|
|
errno, safe_strerror(errno));
|
2019-03-25 01:39:22 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uj)
|
|
|
|
json = json_object_new_object();
|
|
|
|
|
|
|
|
vxlan_sg = pim_vxlan_sg_find(pim, &sg);
|
|
|
|
if (vxlan_sg) {
|
2019-07-01 19:26:05 +02:00
|
|
|
installed = (vxlan_sg->up) ? true : false;
|
2019-03-25 01:39:22 +01:00
|
|
|
iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
|
|
|
|
|
|
|
|
if (pim_vxlan_is_orig_mroute(vxlan_sg))
|
|
|
|
oif_name =
|
|
|
|
vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
|
|
|
|
else
|
|
|
|
oif_name =
|
|
|
|
vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json_object_string_add(json, "source", src_str);
|
|
|
|
json_object_string_add(json, "group", grp_str);
|
|
|
|
json_object_string_add(json, "input", iif_name);
|
|
|
|
json_object_string_add(json, "output", oif_name);
|
|
|
|
if (installed)
|
|
|
|
json_object_boolean_true_add(json, "installed");
|
|
|
|
else
|
|
|
|
json_object_boolean_false_add(json,
|
2020-11-20 22:06:34 +01:00
|
|
|
"installed");
|
2019-03-25 01:39:22 +01:00
|
|
|
} else {
|
|
|
|
vty_out(vty, "SG : %s\n", vxlan_sg->sg_str);
|
|
|
|
vty_out(vty, " Input : %s\n", iif_name);
|
|
|
|
vty_out(vty, " Output : %s\n", oif_name);
|
|
|
|
vty_out(vty, " installed : %s\n",
|
|
|
|
installed?"yes":"no");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (show_ip_pim_vxlan_sg,
|
|
|
|
show_ip_pim_vxlan_sg_cmd,
|
|
|
|
"show ip pim [vrf NAME] vxlan-groups [A.B.C.D [A.B.C.D]] [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"VxLAN BUM groups\n"
|
|
|
|
"source or group ip\n"
|
|
|
|
"group ip\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf;
|
|
|
|
int idx = 2;
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2019-03-25 01:39:22 +01:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ?
|
|
|
|
argv[idx++]->arg:NULL;
|
|
|
|
char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx) ?
|
|
|
|
argv[idx]->arg:NULL;
|
|
|
|
|
|
|
|
if (src_ip && grp_ip)
|
|
|
|
pim_show_vxlan_sg_one(vrf->info, vty, src_ip, grp_ip, uj);
|
|
|
|
else if (src_ip)
|
|
|
|
pim_show_vxlan_sg_match_addr(vrf->info, vty, src_ip, uj);
|
|
|
|
else
|
|
|
|
pim_show_vxlan_sg(vrf->info, vty, uj);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pim_show_vxlan_sg_work(struct pim_instance *pim,
|
2020-11-20 22:06:34 +01:00
|
|
|
struct vty *vty, bool uj)
|
2019-03-25 01:39:22 +01:00
|
|
|
{
|
|
|
|
json_object *json = NULL;
|
|
|
|
struct pim_sg_cache_walk_data cwd;
|
|
|
|
struct listnode *node;
|
|
|
|
struct pim_vxlan_sg *vxlan_sg;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
json = json_object_new_object();
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Codes: I -> installed\n");
|
|
|
|
vty_out(vty,
|
|
|
|
"Source Group Input Flags\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&cwd, 0, sizeof(cwd));
|
|
|
|
cwd.vty = vty;
|
|
|
|
cwd.json = json;
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(pim_vxlan_p->work_list, node, vxlan_sg))
|
|
|
|
pim_show_vxlan_sg_entry(vxlan_sg, &cwd);
|
|
|
|
|
2022-01-31 20:20:41 +01:00
|
|
|
if (uj)
|
|
|
|
vty_json(vty, json);
|
2019-03-25 01:39:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN_HIDDEN (show_ip_pim_vxlan_sg_work,
|
|
|
|
show_ip_pim_vxlan_sg_work_cmd,
|
|
|
|
"show ip pim [vrf NAME] vxlan-work [json]",
|
|
|
|
SHOW_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
VRF_CMD_HELP_STR
|
|
|
|
"VxLAN work list\n"
|
|
|
|
JSON_STR)
|
|
|
|
{
|
|
|
|
bool uj = use_json(argc, argv);
|
|
|
|
struct vrf *vrf;
|
|
|
|
int idx = 2;
|
|
|
|
|
2023-04-10 23:34:35 +02:00
|
|
|
vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, uj);
|
2019-03-25 01:39:22 +01:00
|
|
|
|
|
|
|
if (!vrf)
|
|
|
|
return CMD_WARNING;
|
|
|
|
|
|
|
|
pim_show_vxlan_sg_work(vrf->info, vty, uj);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG_HIDDEN (no_pim_mlag,
|
2024-06-12 18:26:48 +02:00
|
|
|
no_pim_mlag_cmd,
|
|
|
|
"no mlag",
|
2020-11-20 22:06:34 +01:00
|
|
|
NO_STR
|
|
|
|
"MLAG\n")
|
2019-03-26 21:47:54 +01:00
|
|
|
{
|
2020-10-23 16:39:22 +02:00
|
|
|
char mlag_xpath[XPATH_MAXLEN];
|
2019-03-26 21:47:54 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(mlag_xpath, sizeof(mlag_xpath), "./mlag");
|
|
|
|
nb_cli_enqueue_change(vty, mlag_xpath, NB_OP_DESTROY, NULL);
|
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
|
|
|
}
|
|
|
|
DEFPY_ATTR(no_ip_pim_mlag,
|
|
|
|
no_ip_pim_mlag_cmd,
|
|
|
|
"no ip pim mlag",
|
|
|
|
NO_STR
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"MLAG\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
|
|
|
{
|
|
|
|
char mlag_xpath[XPATH_MAXLEN];
|
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
|
|
|
|
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
2019-03-26 21:47:54 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(mlag_xpath, sizeof(mlag_xpath), "./mlag");
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_xpath, NB_OP_DESTROY, NULL);
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
|
|
|
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2025-04-08 16:57:36 +02:00
|
|
|
DEFPY_YANG_HIDDEN (pim_mlag,
|
2024-06-12 18:26:48 +02:00
|
|
|
pim_mlag_cmd,
|
|
|
|
"mlag INTERFACE$iface role [primary|secondary]$role state [up|down]$state addr A.B.C.D$addr",
|
|
|
|
"MLAG\n"
|
|
|
|
"peerlink sub interface\n"
|
|
|
|
"MLAG role\n"
|
|
|
|
"MLAG role primary\n"
|
|
|
|
"MLAG role secondary\n"
|
|
|
|
"peer session state\n"
|
|
|
|
"peer session state up\n"
|
|
|
|
"peer session state down\n"
|
|
|
|
"configure PIP\n"
|
|
|
|
"unique ip address\n")
|
|
|
|
{
|
|
|
|
char mlag_peerlink_rif_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_my_role_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_peer_state_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_reg_address_xpath[XPATH_MAXLEN];
|
|
|
|
|
|
|
|
snprintf(mlag_peerlink_rif_xpath, sizeof(mlag_peerlink_rif_xpath),
|
|
|
|
"./mlag/peerlink-rif");
|
|
|
|
nb_cli_enqueue_change(vty, mlag_peerlink_rif_xpath, NB_OP_MODIFY, iface);
|
|
|
|
|
|
|
|
snprintf(mlag_my_role_xpath, sizeof(mlag_my_role_xpath),
|
|
|
|
"./mlag/my-role");
|
|
|
|
if (!strcmp(role, "primary")) {
|
|
|
|
nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
|
|
|
|
"MLAG_ROLE_PRIMARY");
|
|
|
|
} else if (!strcmp(role, "secondary")) {
|
|
|
|
nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
|
|
|
|
"MLAG_ROLE_SECONDARY");
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "unknown MLAG role %s\n", role);
|
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(mlag_peer_state_xpath, sizeof(mlag_peer_state_xpath),
|
|
|
|
"./mlag/peer-state");
|
|
|
|
if (!strcmp(state, "up")) {
|
|
|
|
nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
|
|
|
|
"true");
|
|
|
|
} else if (strcmp(state, "down")) {
|
|
|
|
nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
|
|
|
|
"false");
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "unknown MLAG state %s\n", state);
|
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
2020-10-23 16:39:22 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(mlag_reg_address_xpath, sizeof(mlag_reg_address_xpath),
|
|
|
|
"./mlag/reg-address");
|
|
|
|
nb_cli_enqueue_change(vty, mlag_reg_address_xpath, NB_OP_MODIFY,
|
|
|
|
addr_str);
|
2020-10-23 16:39:22 +02:00
|
|
|
|
|
|
|
return nb_cli_apply_changes(vty, NULL);
|
2019-03-26 21:47:54 +01:00
|
|
|
}
|
2024-06-12 18:26:48 +02:00
|
|
|
DEFPY_ATTR(ip_pim_mlag,
|
|
|
|
ip_pim_mlag_cmd,
|
|
|
|
"ip pim mlag INTERFACE$iface role [primary|secondary]$role state [up|down]$state addr A.B.C.D$addr",
|
|
|
|
IP_STR
|
|
|
|
PIM_STR
|
|
|
|
"MLAG\n"
|
|
|
|
"peerlink sub interface\n"
|
|
|
|
"MLAG role\n"
|
|
|
|
"MLAG role primary\n"
|
|
|
|
"MLAG role secondary\n"
|
|
|
|
"peer session state\n"
|
|
|
|
"peer session state up\n"
|
|
|
|
"peer session state down\n"
|
|
|
|
"configure PIP\n"
|
|
|
|
"unique ip address\n",
|
|
|
|
CMD_ATTR_HIDDEN | CMD_ATTR_DEPRECATED)
|
2019-03-26 21:47:54 +01:00
|
|
|
{
|
2020-10-23 16:39:22 +02:00
|
|
|
char mlag_peerlink_rif_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_my_role_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_peer_state_xpath[XPATH_MAXLEN];
|
|
|
|
char mlag_reg_address_xpath[XPATH_MAXLEN];
|
2024-06-12 18:26:48 +02:00
|
|
|
int ret;
|
|
|
|
const char *vrfname;
|
|
|
|
char xpath[XPATH_MAXLEN];
|
|
|
|
int orig_node = -1;
|
2020-10-23 16:39:22 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
vrfname = pim_cli_get_vrf_name(vty);
|
|
|
|
if (vrfname) {
|
|
|
|
snprintf(xpath, sizeof(xpath), FRR_PIM_VRF_XPATH,
|
|
|
|
"frr-pim:pimd", "pim", vrfname, FRR_PIM_AF_XPATH_VAL);
|
|
|
|
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
|
|
|
|
if (nb_cli_apply_changes_clear_pending(vty, NULL) ==
|
|
|
|
CMD_SUCCESS) {
|
|
|
|
orig_node = vty->node;
|
|
|
|
VTY_PUSH_XPATH(PIM_NODE, xpath);
|
|
|
|
} else {
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "%% Failed to determine vrf name\n");
|
|
|
|
return CMD_WARNING_CONFIG_FAILED;
|
|
|
|
}
|
2019-03-26 21:47:54 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
snprintf(mlag_peerlink_rif_xpath, sizeof(mlag_peerlink_rif_xpath),
|
|
|
|
"./mlag/peerlink-rif");
|
|
|
|
nb_cli_enqueue_change(vty, mlag_peerlink_rif_xpath, NB_OP_MODIFY, iface);
|
2020-10-23 16:39:22 +02:00
|
|
|
|
|
|
|
snprintf(mlag_my_role_xpath, sizeof(mlag_my_role_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./mlag/my-role");
|
|
|
|
if (!strcmp(role, "primary")) {
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"MLAG_ROLE_PRIMARY");
|
2024-06-12 18:26:48 +02:00
|
|
|
} else if (!strcmp(role, "secondary")) {
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_my_role_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"MLAG_ROLE_SECONDARY");
|
2019-03-26 21:47:54 +01:00
|
|
|
} else {
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, "unknown MLAG role %s\n", role);
|
|
|
|
ret = CMD_WARNING;
|
|
|
|
goto done;
|
2019-03-26 21:47:54 +01:00
|
|
|
}
|
|
|
|
|
2020-10-23 16:39:22 +02:00
|
|
|
snprintf(mlag_peer_state_xpath, sizeof(mlag_peer_state_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./mlag/peer-state");
|
|
|
|
if (!strcmp(state, "up")) {
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"true");
|
2024-06-12 18:26:48 +02:00
|
|
|
} else if (strcmp(state, "down")) {
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_peer_state_xpath, NB_OP_MODIFY,
|
2020-11-20 22:06:34 +01:00
|
|
|
"false");
|
2019-03-26 21:47:54 +01:00
|
|
|
} else {
|
2024-06-12 18:26:48 +02:00
|
|
|
vty_out(vty, "unknown MLAG state %s\n", state);
|
|
|
|
ret = CMD_WARNING;
|
|
|
|
goto done;
|
2019-03-26 21:47:54 +01:00
|
|
|
}
|
|
|
|
|
2020-10-23 16:39:22 +02:00
|
|
|
snprintf(mlag_reg_address_xpath, sizeof(mlag_reg_address_xpath),
|
2024-06-12 18:26:48 +02:00
|
|
|
"./mlag/reg-address");
|
2020-10-23 16:39:22 +02:00
|
|
|
nb_cli_enqueue_change(vty, mlag_reg_address_xpath, NB_OP_MODIFY,
|
2024-06-12 18:26:48 +02:00
|
|
|
addr_str);
|
2019-03-26 21:47:54 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
ret = nb_cli_apply_changes(vty, NULL);
|
2019-03-26 21:47:54 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
done:
|
|
|
|
if (orig_node != -1) {
|
|
|
|
vty->node = orig_node;
|
|
|
|
vty->xpath_index--;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-10-02 21:03:48 +02:00
|
|
|
DEFPY_YANG(pim_rpf_lookup_mode, pim_rpf_lookup_mode_cmd,
|
2024-10-30 22:16:30 +01:00
|
|
|
"[no] rpf-lookup-mode\
|
|
|
|
![urib-only|mrib-only|mrib-then-urib|lower-distance|longer-prefix]$mode\
|
|
|
|
[{group-list PREFIX_LIST$grp_list|source-list PREFIX_LIST$src_list}]",
|
2024-10-02 21:03:48 +02:00
|
|
|
NO_STR
|
|
|
|
"RPF lookup behavior\n"
|
|
|
|
"Lookup in unicast RIB only\n"
|
|
|
|
"Lookup in multicast RIB only\n"
|
|
|
|
"Try multicast RIB first, fall back to unicast RIB\n"
|
|
|
|
"Lookup both, use entry with lower distance\n"
|
2024-10-30 22:16:30 +01:00
|
|
|
"Lookup both, use entry with longer prefix\n"
|
|
|
|
"Set a specific mode matching group\n"
|
|
|
|
"Multicast group prefix list\n"
|
|
|
|
"Set a specific mode matching source address\n"
|
|
|
|
"Source address prefix list\n")
|
|
|
|
{
|
|
|
|
if (no) {
|
|
|
|
nb_cli_enqueue_change(vty, "./mode", NB_OP_DESTROY, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
|
|
|
|
} else {
|
|
|
|
nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL);
|
|
|
|
nb_cli_enqueue_change(vty, "./mode", NB_OP_MODIFY, mode);
|
|
|
|
}
|
2024-10-02 21:03:48 +02:00
|
|
|
|
2024-10-30 22:16:30 +01:00
|
|
|
return nb_cli_apply_changes(vty, "./mcast-rpf-lookup[group-list='%s'][source-list='%s']",
|
|
|
|
(grp_list ? grp_list : ""), (src_list ? src_list : ""));
|
2024-10-02 21:03:48 +02:00
|
|
|
}
|
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
struct cmd_node pim_node = {
|
|
|
|
.name = "pim",
|
|
|
|
.node = PIM_NODE,
|
|
|
|
.parent_node = CONFIG_NODE,
|
|
|
|
.prompt = "%s(config-pim)# ",
|
|
|
|
.config_write = pim_router_config_write,
|
|
|
|
};
|
2019-02-20 23:21:39 +01:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
/* This function installs all of the deprecated PIM configuration commands that live in the global config and/or VRF nodes
|
|
|
|
* This configuration has been moved to the new 'router pim' config node instead like all the other routing protocols.
|
|
|
|
* No new commands should be added here.
|
|
|
|
*/
|
|
|
|
static void pim_install_deprecated(void)
|
|
|
|
{
|
2015-09-08 17:10:19 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_rp_cmd);
|
2017-05-22 21:13:13 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_rp_cmd);
|
2015-09-08 17:10:19 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_rp_cmd);
|
2017-05-22 21:13:13 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_rp_cmd);
|
2016-09-13 21:41:33 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_rp_prefix_list_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_rp_prefix_list_cmd);
|
2016-09-13 21:41:33 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_rp_prefix_list_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_rp_prefix_list_cmd);
|
2017-03-17 19:51:13 +01:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_ssm_prefix_list_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_ssm_prefix_list_cmd);
|
2017-03-17 19:51:13 +01:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_ssm_prefix_list_name_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_ssm_prefix_list_name_cmd);
|
2017-03-17 19:51:13 +01:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_ssm_prefix_list_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_ssm_prefix_list_cmd);
|
2016-11-17 00:11:49 +01:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_register_suppress_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_register_suppress_cmd);
|
2017-04-05 18:08:53 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_spt_switchover_infinity_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_spt_switchover_infinity_cmd);
|
2017-04-07 16:16:23 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_spt_switchover_infinity_plist_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_spt_switchover_infinity_plist_cmd);
|
2017-04-05 18:08:53 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_spt_switchover_infinity_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_spt_switchover_infinity_cmd);
|
2017-04-07 16:16:23 +02:00
|
|
|
install_element(CONFIG_NODE,
|
|
|
|
&no_ip_pim_spt_switchover_infinity_plist_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_spt_switchover_infinity_plist_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_register_accept_list_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_pim_register_accept_list_cmd);
|
2016-12-06 21:48:42 +01:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_joinprune_time_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_joinprune_time_cmd);
|
2016-08-18 15:07:14 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_keep_alive_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_keep_alive_cmd);
|
2017-08-02 14:15:45 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_rp_keep_alive_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_pim_rp_keep_alive_cmd);
|
2016-08-18 15:07:14 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_keep_alive_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_keep_alive_cmd);
|
2017-08-02 14:15:45 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_rp_keep_alive_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_pim_rp_keep_alive_cmd);
|
2016-11-18 18:50:46 +01:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_packets_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_packets_cmd);
|
2017-03-30 05:23:25 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_v6_secondary_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_v6_secondary_cmd);
|
2017-03-30 05:23:25 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_v6_secondary_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_v6_secondary_cmd);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_ecmp_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_ecmp_cmd);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_ecmp_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_ecmp_cmd);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_ecmp_rebalance_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &ip_pim_ecmp_rebalance_cmd);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_ecmp_rebalance_cmd);
|
2017-05-22 22:32:14 +02:00
|
|
|
install_element(VRF_NODE, &no_ip_pim_ecmp_rebalance_cmd);
|
2019-03-26 21:47:54 +01:00
|
|
|
install_element(CONFIG_NODE, &ip_pim_mlag_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_pim_mlag_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
|
|
|
|
install_element(CONFIG_NODE, &ip_ssmpingd_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_ssmpingd_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_ssmpingd_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_ssmpingd_cmd);
|
|
|
|
|
|
|
|
install_element(CONFIG_NODE, &ip_msdp_peer_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_msdp_peer_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_msdp_peer_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_msdp_peer_cmd);
|
|
|
|
install_element(CONFIG_NODE, &ip_msdp_timers_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_msdp_timers_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_msdp_timers_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_msdp_timers_cmd);
|
|
|
|
install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(CONFIG_NODE, &ip_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_msdp_mesh_group_cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pim_cmd_init(void)
|
|
|
|
{
|
|
|
|
if_cmd_init(pim_interface_config_write);
|
|
|
|
|
|
|
|
install_node(&debug_node);
|
|
|
|
|
|
|
|
install_element(CONFIG_NODE, &router_pim_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_router_pim_cmd);
|
|
|
|
|
|
|
|
install_node(&pim_node);
|
|
|
|
install_default(PIM_NODE);
|
|
|
|
|
|
|
|
install_element(PIM_NODE, &pim_rp_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_rp_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_rp_prefix_list_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_rp_prefix_list_cmd);
|
2024-09-17 04:10:03 +02:00
|
|
|
install_element(PIM_NODE, &pim_autorp_discovery_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_autorp_announce_rp_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_autorp_announce_scope_int_cmd);
|
2024-11-01 20:11:14 +01:00
|
|
|
install_element(PIM_NODE, &pim_autorp_send_rp_discovery_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_autorp_send_rp_discovery_scope_int_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(PIM_NODE, &no_pim_ssm_prefix_list_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_ssm_prefix_list_name_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_ssm_prefix_list_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_register_suppress_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_register_suppress_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_spt_switchover_infinity_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_spt_switchover_infinity_plist_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_spt_switchover_infinity_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_spt_switchover_infinity_plist_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_register_accept_list_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_joinprune_time_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_joinprune_time_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_keep_alive_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_rp_keep_alive_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_keep_alive_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_rp_keep_alive_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_packets_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_packets_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_v6_secondary_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_v6_secondary_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_ecmp_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_ecmp_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_ecmp_rebalance_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_ecmp_rebalance_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_mlag_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_mlag_cmd);
|
|
|
|
|
|
|
|
install_element(PIM_NODE, &pim_ssmpingd_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_ssmpingd_cmd);
|
|
|
|
|
|
|
|
install_element(PIM_NODE, &pim_msdp_peer_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_msdp_peer_cmd);
|
2022-12-07 14:13:39 +01:00
|
|
|
install_element(PIM_NODE, &msdp_peer_md5_cmd);
|
|
|
|
install_element(PIM_NODE, &no_msdp_peer_md5_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(PIM_NODE, &pim_msdp_timers_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_msdp_timers_cmd);
|
2021-05-03 15:25:52 +02:00
|
|
|
install_element(PIM_NODE, &msdp_peer_sa_filter_cmd);
|
|
|
|
install_element(PIM_NODE, &no_ip_msdp_peer_sa_filter_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(PIM_NODE, &pim_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_msdp_mesh_group_member_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_msdp_mesh_group_source_cmd);
|
|
|
|
install_element(PIM_NODE, &no_pim_msdp_mesh_group_cmd);
|
2022-05-26 14:54:27 +02:00
|
|
|
install_element(PIM_NODE, &msdp_log_neighbor_changes_cmd);
|
2022-05-26 16:22:27 +02:00
|
|
|
install_element(PIM_NODE, &msdp_log_sa_changes_cmd);
|
2022-05-10 13:23:33 +02:00
|
|
|
install_element(PIM_NODE, &msdp_shutdown_cmd);
|
2022-05-10 13:23:40 +02:00
|
|
|
install_element(PIM_NODE, &msdp_peer_sa_limit_cmd);
|
2022-05-10 13:23:45 +02:00
|
|
|
install_element(PIM_NODE, &msdp_originator_id_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-07-23 07:45:02 +02:00
|
|
|
install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd);
|
|
|
|
install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd);
|
2024-07-26 16:57:44 +02:00
|
|
|
install_element(PIM_NODE, &pim_bsr_candidate_bsr_cmd);
|
2024-07-23 07:45:02 +02:00
|
|
|
|
2024-10-02 21:03:48 +02:00
|
|
|
install_element(PIM_NODE, &pim_rpf_lookup_mode_cmd);
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd);
|
2024-06-26 19:41:45 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_join_group_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_static_group_cmd);
|
2016-10-20 15:34:29 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_version_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_igmp_version_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_query_interval_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_no_ip_igmp_query_interval_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_ip_igmp_query_max_response_time_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_no_ip_igmp_query_max_response_time_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_ip_igmp_query_max_response_time_dsec_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_no_ip_igmp_query_max_response_time_dsec_cmd);
|
2019-05-20 19:40:12 +02:00
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_ip_igmp_last_member_query_count_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_no_ip_igmp_last_member_query_count_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_ip_igmp_last_member_query_interval_cmd);
|
|
|
|
install_element(INTERFACE_NODE,
|
|
|
|
&interface_no_ip_igmp_last_member_query_interval_cmd);
|
2024-08-09 00:03:06 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_proxy_cmd);
|
2021-08-24 18:21:59 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_limits_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_interface_ip_igmp_limits_cmd);
|
2021-09-28 14:40:23 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_immediate_leave_cmd);
|
2019-01-14 16:43:53 +01:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd);
|
2015-06-19 03:14:20 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd);
|
2015-09-08 17:10:19 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_sm_cmd);
|
2016-06-01 17:26:29 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_sm_cmd);
|
2018-09-07 13:46:57 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_cmd);
|
2015-06-19 03:14:20 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_drprio_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_drprio_cmd);
|
2015-08-22 01:35:27 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_hello_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_hello_cmd);
|
2017-09-01 20:33:00 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_boundary_oil_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_boundary_oil_cmd);
|
2024-11-25 17:36:54 +01:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_boundary_acl_cmd);
|
2019-06-13 19:21:37 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_igmp_query_generate_cmd);
|
2021-08-19 16:46:30 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_ip_pim_neighbor_prefix_list_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_pim_neighbor_prefix_list_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 01:29:02 +02:00
|
|
|
// Static mroutes NEB
|
|
|
|
install_element(INTERFACE_NODE, &interface_ip_mroute_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_ip_mroute_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
|
|
|
|
/* Install BSM command */
|
|
|
|
install_element(INTERFACE_NODE, &ip_pim_bsm_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_ip_pim_bsm_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &ip_pim_ucast_bsm_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_ip_pim_ucast_bsm_cmd);
|
|
|
|
/* Install BFD command */
|
|
|
|
install_element(INTERFACE_NODE, &ip_pim_bfd_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &ip_pim_bfd_param_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_ip_pim_bfd_profile_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_ip_pim_bfd_cmd);
|
|
|
|
#if HAVE_BFDD == 0
|
|
|
|
install_element(INTERFACE_NODE, &no_ip_pim_bfd_param_cmd);
|
|
|
|
#endif /* !HAVE_BFDD */
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_interface_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_interface_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_join_cmd);
|
2024-06-26 19:41:45 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_join_group_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_join_vrf_all_cmd);
|
2024-06-26 19:41:45 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_join_group_vrf_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_static_group_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_static_group_vrf_all_cmd);
|
2024-09-17 23:21:05 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_proxy_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_proxy_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_groups_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_groups_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_groups_retransmissions_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_sources_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_sources_retransmissions_cmd);
|
2018-05-04 13:25:38 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_igmp_statistics_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_assert_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_assert_internal_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_assert_metric_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_assert_winner_metric_cmd);
|
2017-04-03 22:11:58 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_interface_traffic_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_interface_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_interface_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_join_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_join_vrf_all_cmd);
|
2019-11-15 20:39:12 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_jp_agg_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_local_membership_cmd);
|
2020-02-06 18:31:03 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_mlag_summary_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_mlag_up_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_mlag_up_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_neighbor_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_neighbor_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_rpf_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_rpf_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_secondary_cmd);
|
2016-09-28 03:33:33 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_state_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_state_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_upstream_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_upstream_vrf_all_cmd);
|
2019-11-15 20:58:27 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_channel_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_upstream_join_desired_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_upstream_rpf_cmd);
|
2016-08-23 22:22:14 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
|
2024-09-17 04:10:03 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_autorp_cmd);
|
2019-05-03 10:19:48 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_multicast_cmd);
|
2017-07-07 17:29:53 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
|
2020-03-02 10:05:54 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_multicast_count_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_multicast_count_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_cmd);
|
2017-06-22 01:44:20 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_count_cmd);
|
2017-07-07 23:55:10 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_count_vrf_all_cmd);
|
2019-05-16 18:58:28 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_summary_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_mroute_summary_vrf_all_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(VIEW_NODE, &show_ip_rib_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_ssmpingd_cmd);
|
pimd: Pim Nexthop Tracking support with ECMP
In this patch, PIM nexthop tracking uses locally populated nexthop cached list
to determine ECMP based nexthop (w/ ECMP knob enabled), otherwise picks
the first nexthop as RPF.
Introduced '[no] ip pim ecmp' command to enable/disable PIM ECMP knob.
By default, PIM ECMP is disabled.
Intorudced '[no] ip pim ecmp rebalance' command to provide existing mcache
entry to switch new path based on hash chosen path.
Introduced, show command to display pim registered addresses and respective nexthops.
Introuduce, show command to find nexthop and out interface for (S,G) or (RP,G).
Re-Register an address with nexthop when Interface UP event received,
to ensure the PIM nexthop cache is updated (being PIM enabled).
During PIM neighbor UP, traverse all RPs and Upstreams nexthop and determine, if
any of nexthop's IPv4 address changes/resolves due to neigbor UP event.
Testing Done: Run various LHR, RP and FHR related cases to resolve RPF using
nexthop cache with ECMP knob disabled, performed interface/PIM neighbor flap events.
Executed pim-smoke with knob disabled.
Signed-off-by: Chirag Shah <chirag@cumulusnetworks.com>
2017-04-05 22:14:12 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
|
2024-10-23 21:00:57 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_rpf_source_cmd);
|
2019-05-05 06:34:59 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
|
2019-05-05 06:02:31 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
|
2024-07-23 07:57:08 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_rpinfo_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_cand_bsr_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_cand_rp_cmd);
|
2024-07-26 16:57:44 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_rpdb_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_bsr_groups_cmd);
|
2019-05-05 07:19:25 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_peer_detail_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_peer_detail_vrf_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_sa_detail_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_sa_detail_vrf_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_sa_sg_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_sa_sg_vrf_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_mesh_group_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_msdp_mesh_group_vrf_all_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_ssm_range_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_group_type_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_cmd);
|
|
|
|
install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
|
|
|
|
|
|
|
|
install_element(ENABLE_NODE, &pim_test_sg_keepalive_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-06-03 16:39:23 +02:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_mroute_count_cmd);
|
2022-12-07 14:13:39 +01:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_msdp_peer_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_interfaces_cmd);
|
|
|
|
install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd);
|
|
|
|
install_element(ENABLE_NODE, &clear_ip_mroute_cmd);
|
|
|
|
install_element(ENABLE_NODE, &clear_ip_pim_interfaces_cmd);
|
2017-04-03 22:11:58 +02:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
|
2019-05-05 07:19:25 +02:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
|
2020-12-21 11:24:11 +01:00
|
|
|
install_element(ENABLE_NODE, &clear_ip_pim_bsr_db_cmd);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-01 16:57:23 +02:00
|
|
|
install_element(ENABLE_NODE, &show_debugging_pim_cmd);
|
|
|
|
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(ENABLE_NODE, &debug_igmp_cmd);
|
|
|
|
install_element(CONFIG_NODE, &debug_igmp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_igmp_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_igmp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_igmp_events_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_igmp_events_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_igmp_events_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_igmp_events_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_igmp_packets_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_igmp_packets_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_igmp_packets_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_igmp_packets_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_igmp_trace_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_igmp_trace_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_igmp_trace_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_igmp_trace_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_igmp_trace_detail_cmd);
|
2022-04-25 14:01:42 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_igmp_trace_detail_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_igmp_trace_detail_cmd);
|
2022-04-25 14:01:42 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_igmp_trace_detail_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_mroute_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_mroute_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_mroute_detail_cmd);
|
2016-08-17 03:13:22 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_mroute_detail_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_mroute_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_mroute_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_mroute_detail_cmd);
|
2016-08-17 03:13:22 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_mroute_detail_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_static_cmd);
|
2019-04-11 17:46:40 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_static_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_pim_static_cmd);
|
2019-04-11 17:46:40 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_pim_static_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_nht_cmd);
|
2017-06-30 16:43:21 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_nht_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_nht_det_cmd);
|
2021-12-02 06:07:02 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_nht_det_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_nht_rp_cmd);
|
2017-09-14 16:28:04 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_nht_rp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_pim_nht_rp_cmd);
|
2017-09-14 16:28:04 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_pim_nht_rp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_events_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_events_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_packets_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_packets_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_packetdump_send_cmd);
|
2022-01-21 12:02:16 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_packetdump_send_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_packetdump_recv_cmd);
|
2022-01-21 12:02:16 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_packetdump_recv_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_trace_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_trace_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_trace_detail_cmd);
|
2017-06-05 19:15:47 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_trace_detail_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_ssmpingd_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_ssmpingd_cmd);
|
2024-09-17 04:10:03 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_autorp_cmd);
|
|
|
|
install_element(ENABLE_NODE, &no_debug_autorp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_ssmpingd_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_ssmpingd_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_zebra_cmd);
|
2015-02-04 07:01:14 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_zebra_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_mlag_cmd);
|
2020-02-06 18:31:03 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_mlag_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_pim_mlag_cmd);
|
2020-02-06 18:31:03 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_pim_mlag_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_pim_vxlan_cmd);
|
2019-03-25 01:34:45 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_pim_vxlan_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_pim_vxlan_cmd);
|
2019-03-25 01:34:45 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_pim_vxlan_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_msdp_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_msdp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_msdp_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_msdp_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_msdp_events_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_msdp_events_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_msdp_events_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_msdp_events_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_msdp_packets_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_msdp_packets_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_msdp_packets_cmd);
|
2016-10-25 19:59:48 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_msdp_packets_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_mtrace_cmd);
|
2018-02-12 23:41:33 +01:00
|
|
|
install_element(CONFIG_NODE, &debug_mtrace_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_mtrace_cmd);
|
2018-02-12 23:41:33 +01:00
|
|
|
install_element(CONFIG_NODE, &no_debug_mtrace_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &debug_bsm_cmd);
|
2019-05-02 05:32:59 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_bsm_cmd);
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(ENABLE_NODE, &no_debug_bsm_cmd);
|
2019-05-02 05:32:59 +02:00
|
|
|
install_element(CONFIG_NODE, &no_debug_bsm_cmd);
|
2024-09-17 04:10:03 +02:00
|
|
|
install_element(CONFIG_NODE, &debug_autorp_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_debug_autorp_cmd);
|
2017-05-23 00:14:43 +02:00
|
|
|
|
2024-06-12 18:26:48 +02:00
|
|
|
install_element(CONFIG_NODE, &ip_igmp_group_watermark_cmd);
|
|
|
|
install_element(VRF_NODE, &ip_igmp_group_watermark_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_ip_igmp_group_watermark_cmd);
|
|
|
|
install_element(VRF_NODE, &no_ip_igmp_group_watermark_cmd);
|
|
|
|
|
|
|
|
pim_install_deprecated();
|
2015-02-04 07:01:14 +01:00
|
|
|
}
|