Merge pull request #18111 from opensourcerouting/gmp-leave

pimd,pim6d: support IGMPv2/MLDv1 immediate leave
This commit is contained in:
Donald Sharp 2025-02-19 07:21:00 -05:00 committed by GitHub
commit 23fc4ce666
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 288 additions and 37 deletions

View file

@ -424,6 +424,10 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
interfaces on this interface. Join-groups on other interfaces will interfaces on this interface. Join-groups on other interfaces will
also be proxied. The default version is v3. also be proxied. The default version is v3.
.. clicmd:: ip igmp immediate-leave
Immediately leaves an IGMP group when receiving a IGMPv2 Leave packet.
.. clicmd:: ip igmp query-interval (1-65535) .. clicmd:: ip igmp query-interval (1-65535)
Set the IGMP query interval that PIM will use. Set the IGMP query interval that PIM will use.

View file

@ -245,6 +245,10 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Join multicast group or source-group on an interface. Join multicast group or source-group on an interface.
.. clicmd:: ipv6 mld immediate-leave
Immediately leaves a MLD group when receiving a MLDv1 Done packet.
.. clicmd:: ipv6 mld query-interval (1-65535) .. clicmd:: ipv6 mld query-interval (1-65535)
Set the MLD query interval that PIM will use. Set the MLD query interval that PIM will use.

View file

@ -1649,6 +1649,19 @@ ALIAS_YANG(interface_ipv6_mld_limits,
"Limit number of MLDv2 sources to track\n" "Limit number of MLDv2 sources to track\n"
"Limit number of MLD group memberships to track\n") "Limit number of MLD group memberships to track\n")
DEFPY_YANG(interface_ipv6_mld_immediate_leave,
interface_ipv6_mld_immediate_leave_cmd,
"[no] ipv6 mld immediate-leave",
NO_STR
IPV6_STR
IFACE_MLD_STR
"Immediately drop group memberships on receiving Leave (MLDv1 only)\n")
{
nb_cli_enqueue_change(vty, "./immediate-leave", NB_OP_MODIFY, no ? "false" : "true");
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
}
DEFPY (interface_ipv6_mld_query_interval, DEFPY (interface_ipv6_mld_query_interval,
interface_ipv6_mld_query_interval_cmd, interface_ipv6_mld_query_interval_cmd,
"ipv6 mld query-interval (1-65535)$q_interval", "ipv6 mld query-interval (1-65535)$q_interval",
@ -2944,6 +2957,7 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_ipv6_mld_static_group_cmd); install_element(INTERFACE_NODE, &interface_ipv6_mld_static_group_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd); install_element(INTERFACE_NODE, &interface_ipv6_mld_version_cmd);
install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd); install_element(INTERFACE_NODE, &interface_no_ipv6_mld_version_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_immediate_leave_cmd);
install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd); install_element(INTERFACE_NODE, &interface_ipv6_mld_query_interval_cmd);
install_element(INTERFACE_NODE, install_element(INTERFACE_NODE,
&interface_no_ipv6_mld_query_interval_cmd); &interface_no_ipv6_mld_query_interval_cmd);

View file

@ -448,18 +448,23 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired)
desired == GM_SG_NOPRUNE_EXPIRING) { desired == GM_SG_NOPRUNE_EXPIRING) {
struct gm_query_timers timers; struct gm_query_timers timers;
if (!pim_ifp->gmp_immediate_leave) {
timers.qrv = gm_ifp->cur_qrv; timers.qrv = gm_ifp->cur_qrv;
timers.max_resp_ms = gm_ifp->cur_max_resp; timers.max_resp_ms = gm_ifp->cur_max_resp;
timers.qqic_ms = gm_ifp->cur_query_intv_trig; timers.qqic_ms = gm_ifp->cur_query_intv_trig;
timers.fuzz = gm_ifp->cfg_timing_fuzz; timers.fuzz = gm_ifp->cfg_timing_fuzz;
gm_expiry_calc(&timers); gm_expiry_calc(&timers);
} else
memset(&timers.expire_wait, 0, sizeof(timers.expire_wait));
gm_sg_timer_start(gm_ifp, sg, timers.expire_wait); gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
EVENT_OFF(sg->t_sg_query); EVENT_OFF(sg->t_sg_query);
sg->query_sbit = false; sg->query_sbit = false;
/* Trigger the specific queries only for querier. */ /* Trigger the specific queries only for querier. */
if (IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest)) { if (!pim_ifp->gmp_immediate_leave &&
IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest)) {
sg->n_query = gm_ifp->cur_lmqc; sg->n_query = gm_ifp->cur_lmqc;
gm_trigger_specific(sg); gm_trigger_specific(sg);
} }
@ -1102,11 +1107,24 @@ static void gm_handle_v1_leave(struct gm_if *gm_ifp,
if (grp) { if (grp) {
old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber); old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
if (old_grp) { if (old_grp) {
const struct pim_interface *pim_ifp = gm_ifp->ifp->info;
struct gm_packet_sg *item;
gm_packet_sg_drop(old_grp); gm_packet_sg_drop(old_grp);
/*
* If immediate leave drop others subscribers and proceed
* to expire the MLD join.
*/
if (pim_ifp->gmp_immediate_leave) {
frr_each_safe (gm_packet_sg_subs, grp->subs_positive, item) {
gm_packet_sg_drop(item);
}
gm_sg_update(grp, true);
} else
gm_sg_update(grp, false); gm_sg_update(grp, false);
/* TODO "need S,G PRUNE => NO_INFO transition here" */ /* TODO "need S,G PRUNE => NO_INFO transition here" */
} }
} }

View file

@ -5693,6 +5693,19 @@ ALIAS_YANG(interface_ip_igmp_limits,
"Limit number of IGMPv3 sources to track\n" "Limit number of IGMPv3 sources to track\n"
"Limit number of IGMP group memberships to track\n") "Limit number of IGMP group memberships to track\n")
DEFPY_YANG(interface_ip_igmp_immediate_leave,
interface_ip_igmp_immediate_leave_cmd,
"[no] ip igmp immediate-leave",
NO_STR
IP_STR
IFACE_IGMP_STR
"Immediately drop group memberships on receiving Leave (IGMPv2 only)\n")
{
nb_cli_enqueue_change(vty, "./immediate-leave", NB_OP_MODIFY, no ? "false" : "true");
return nb_cli_apply_changes(vty, FRR_GMP_INTERFACE_XPATH, FRR_PIM_AF_XPATH_VAL);
}
DEFUN (interface_ip_pim_drprio, DEFUN (interface_ip_pim_drprio,
interface_ip_pim_drprio_cmd, interface_ip_pim_drprio_cmd,
"ip pim drpriority (0-4294967295)", "ip pim drpriority (0-4294967295)",
@ -9140,6 +9153,7 @@ void pim_cmd_init(void)
install_element(INTERFACE_NODE, &interface_ip_igmp_proxy_cmd); install_element(INTERFACE_NODE, &interface_ip_igmp_proxy_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_limits_cmd); install_element(INTERFACE_NODE, &interface_ip_igmp_limits_cmd);
install_element(INTERFACE_NODE, &no_interface_ip_igmp_limits_cmd); install_element(INTERFACE_NODE, &no_interface_ip_igmp_limits_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_immediate_leave_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd); install_element(INTERFACE_NODE, &interface_ip_pim_activeactive_cmd);
install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd); install_element(INTERFACE_NODE, &interface_ip_pim_ssm_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd); install_element(INTERFACE_NODE, &interface_no_ip_pim_ssm_cmd);

View file

@ -107,6 +107,9 @@ struct pim_interface {
uint32_t gm_source_limit, gm_group_limit; uint32_t gm_source_limit, gm_group_limit;
/* IGMPv2 only/MLDv1 only immediate leave */
bool gmp_immediate_leave;
int pim_sock_fd; /* PIM socket file descriptor */ int pim_sock_fd; /* PIM socket file descriptor */
struct event *t_pim_sock_read; /* thread for reading PIM socket */ struct event *t_pim_sock_read; /* thread for reading PIM socket */
int64_t pim_sock_creation; /* timestamp of PIM socket creation */ int64_t pim_sock_creation; /* timestamp of PIM socket creation */

View file

@ -728,9 +728,25 @@ static void toin_incl(struct gm_group *group, int num_sources,
static void toin_excl(struct gm_group *group, int num_sources, static void toin_excl(struct gm_group *group, int num_sources,
struct in_addr *sources) struct in_addr *sources)
{ {
struct listnode *src_node, *src_next;
struct pim_interface *pim_ifp = group->interface->info;
int num_sources_tosend; int num_sources_tosend;
int i; int i;
if (group->igmp_version == 2 && pim_ifp->gmp_immediate_leave) {
struct gm_source *src;
if (PIM_DEBUG_GM_TRACE)
zlog_debug("IGMP(v2) Immediate-leave group %pI4 on %s", &group->group_addr,
group->interface->name);
igmp_group_timer_on(group, 0, group->interface->name);
for (ALL_LIST_ELEMENTS(group->group_source_list, src_node, src_next, src))
igmp_source_delete(src);
return;
}
/* Set SEND flag for X (sources with timer > 0) */ /* Set SEND flag for X (sources with timer > 0) */
num_sources_tosend = source_mark_send_flag_by_timer(group); num_sources_tosend = source_mark_send_flag_by_timer(group);
@ -1496,7 +1512,9 @@ void igmp_group_timer_lower_to_lmqt(struct gm_group *group)
pim_ifp = ifp->info; pim_ifp = ifp->info;
ifname = ifp->name; ifname = ifp->name;
lmqi_dsec = pim_ifp->gm_specific_query_max_response_time_dsec; lmqi_dsec = pim_ifp->gmp_immediate_leave
? 0
: pim_ifp->gm_specific_query_max_response_time_dsec;
lmqc = pim_ifp->gm_last_member_query_count; lmqc = pim_ifp->gm_last_member_query_count;
lmqt_msec = PIM_IGMP_LMQT_MSEC( lmqt_msec = PIM_IGMP_LMQT_MSEC(
lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */ lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */
@ -1531,7 +1549,9 @@ void igmp_source_timer_lower_to_lmqt(struct gm_source *source)
pim_ifp = ifp->info; pim_ifp = ifp->info;
ifname = ifp->name; ifname = ifp->name;
lmqi_dsec = pim_ifp->gm_specific_query_max_response_time_dsec; lmqi_dsec = pim_ifp->gmp_immediate_leave
? 0
: pim_ifp->gm_specific_query_max_response_time_dsec;
lmqc = pim_ifp->gm_last_member_query_count; lmqc = pim_ifp->gm_last_member_query_count;
lmqt_msec = PIM_IGMP_LMQT_MSEC( lmqt_msec = PIM_IGMP_LMQT_MSEC(
lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */ lmqi_dsec, lmqc); /* lmqt_msec = (100 * lmqi_dsec) * lmqc */

View file

@ -743,6 +743,12 @@ const struct frr_yang_module_info frr_gmp_info = {
.modify = lib_interface_gmp_address_family_proxy_modify, .modify = lib_interface_gmp_address_family_proxy_modify,
} }
}, },
{
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/immediate-leave",
.cbs = {
.modify = lib_interface_gmp_immediate_leave_modify,
}
},
{ {
.xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group", .xpath = "/frr-interface:lib/interface/frr-gmp:gmp/address-family/static-group",
.cbs = { .cbs = {

View file

@ -289,6 +289,7 @@ int lib_interface_gmp_address_family_static_group_destroy(
struct nb_cb_destroy_args *args); struct nb_cb_destroy_args *args);
int lib_interface_gm_max_sources_modify(struct nb_cb_modify_args *args); int lib_interface_gm_max_sources_modify(struct nb_cb_modify_args *args);
int lib_interface_gm_max_groups_modify(struct nb_cb_modify_args *args); int lib_interface_gm_max_groups_modify(struct nb_cb_modify_args *args);
int lib_interface_gmp_immediate_leave_modify(struct nb_cb_modify_args *args);
/* /*
* Callback registered with routing_nb lib to validate only * Callback registered with routing_nb lib to validate only

View file

@ -4492,6 +4492,29 @@ int lib_interface_gmp_address_family_robustness_variable_modify(
return NB_OK; return NB_OK;
} }
/*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/immediate-leave
*/
int lib_interface_gmp_immediate_leave_modify(struct nb_cb_modify_args *args)
{
struct interface *ifp;
struct pim_interface *pim_ifp;
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
pim_ifp = ifp->info;
pim_ifp->gmp_immediate_leave = yang_dnode_get_bool(args->dnode, NULL);
break;
}
return NB_OK;
}
/* /*
* XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/proxy * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family/proxy
*/ */

View file

@ -471,6 +471,12 @@ int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
++writes; ++writes;
} }
/* IF ip/ipv6 igmp/mld immediate-leave */
if (pim_ifp->gmp_immediate_leave) {
vty_out(vty, " " PIM_AF_NAME " " GM_AF_DBG " immediate-leave\n");
++writes;
}
/* IF ip pim drpriority */ /* IF ip pim drpriority */
if (pim_ifp->pim_dr_priority != PIM_DEFAULT_DR_PRIORITY) { if (pim_ifp->pim_dr_priority != PIM_DEFAULT_DR_PRIORITY) {
vty_out(vty, " " PIM_AF_NAME " pim drpriority %u\n", vty_out(vty, " " PIM_AF_NAME " pim drpriority %u\n",

View file

@ -69,8 +69,10 @@ def build_topo(tgen):
# R1 interface eth2 # R1 interface eth2
switch = tgen.add_switch("s3") switch = tgen.add_switch("s3")
tgen.add_host("h1", "192.168.100.100/24", "via 192.168.100.1") tgen.add_host("h1", "192.168.100.100/24", "via 192.168.100.1")
tgen.add_host("h3", "192.168.100.101/24", "via 192.168.100.1")
switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["h1"]) switch.add_link(tgen.gears["h1"])
switch.add_link(tgen.gears["h3"])
# R2 interface eth1 # R2 interface eth1
switch = tgen.add_switch("s4") switch = tgen.add_switch("s4")
@ -170,13 +172,13 @@ def test_pim_convergence():
# IPv6 part # IPv6 part
# #
out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth0 json", True) out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth0 json", True)
r1_r2_link_address = out["r1-eth0"]["ipAddresses"][1]["address"].split('/')[0] r1_r2_link_address = out["r1-eth0"]["ipAddresses"][1]["address"].split("/")[0]
out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth1 json", True) out = tgen.gears["r1"].vtysh_cmd("show interface r1-eth1 json", True)
r1_r3_link_address = out["r1-eth1"]["ipAddresses"][1]["address"].split('/')[0] r1_r3_link_address = out["r1-eth1"]["ipAddresses"][1]["address"].split("/")[0]
out = tgen.gears["r2"].vtysh_cmd("show interface r2-eth0 json", True) out = tgen.gears["r2"].vtysh_cmd("show interface r2-eth0 json", True)
r2_link_address = out["r2-eth0"]["ipAddresses"][1]["address"].split('/')[0] r2_link_address = out["r2-eth0"]["ipAddresses"][1]["address"].split("/")[0]
out = tgen.gears["r3"].vtysh_cmd("show interface r3-eth0 json", True) out = tgen.gears["r3"].vtysh_cmd("show interface r3-eth0 json", True)
r3_link_address = out["r3-eth0"]["ipAddresses"][1]["address"].split('/')[0] r3_link_address = out["r3-eth0"]["ipAddresses"][1]["address"].split("/")[0]
expect_pim_peer("r1", "ipv6", "r1-eth0", r2_link_address) expect_pim_peer("r1", "ipv6", "r1-eth0", r2_link_address)
expect_pim_peer("r2", "ipv6", "r2-eth0", r1_r2_link_address) expect_pim_peer("r2", "ipv6", "r2-eth0", r1_r2_link_address)
@ -189,11 +191,13 @@ def test_igmp_group_limit():
if tgen.routers_have_failure(): if tgen.routers_have_failure():
pytest.skip(tgen.errors) pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
ip igmp max-groups 4 ip igmp max-groups 4
""") """
)
app_helper.run("h1", ["224.0.100.1", "h1-eth0"]) app_helper.run("h1", ["224.0.100.1", "h1-eth0"])
app_helper.run("h1", ["224.0.100.2", "h1-eth0"]) app_helper.run("h1", ["224.0.100.2", "h1-eth0"])
app_helper.run("h1", ["224.0.100.3", "h1-eth0"]) app_helper.run("h1", ["224.0.100.3", "h1-eth0"])
@ -202,7 +206,9 @@ def test_igmp_group_limit():
app_helper.run("h1", ["224.0.100.6", "h1-eth0"]) app_helper.run("h1", ["224.0.100.6", "h1-eth0"])
def expect_igmp_group_count(): def expect_igmp_group_count():
igmp_groups = tgen.gears["r1"].vtysh_cmd("show ip igmp groups json", isjson=True) igmp_groups = tgen.gears["r1"].vtysh_cmd(
"show ip igmp groups json", isjson=True
)
try: try:
return len(igmp_groups["r1-eth2"]["groups"]) return len(igmp_groups["r1-eth2"]["groups"])
except KeyError: except KeyError:
@ -212,13 +218,15 @@ def test_igmp_group_limit():
# Cleanup # Cleanup
app_helper.stop_host("h1") app_helper.stop_host("h1")
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
no ip igmp max-groups 4 no ip igmp max-groups 4
exit exit
clear ip igmp interfaces clear ip igmp interfaces
""") """
)
def test_igmp_group_source_limit(): def test_igmp_group_source_limit():
@ -227,12 +235,14 @@ def test_igmp_group_source_limit():
if tgen.routers_have_failure(): if tgen.routers_have_failure():
pytest.skip(tgen.errors) pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
ip igmp max-sources 4 ip igmp max-sources 4
exit exit
""") """
)
app_helper.run("h1", ["--source=192.168.100.10", "232.0.101.10", "h1-eth0"]) app_helper.run("h1", ["--source=192.168.100.10", "232.0.101.10", "h1-eth0"])
app_helper.run("h1", ["--source=192.168.100.11", "232.0.101.10", "h1-eth0"]) app_helper.run("h1", ["--source=192.168.100.11", "232.0.101.10", "h1-eth0"])
@ -243,7 +253,9 @@ def test_igmp_group_source_limit():
app_helper.run("h1", ["--source=192.168.100.16", "232.0.101.10", "h1-eth0"]) app_helper.run("h1", ["--source=192.168.100.16", "232.0.101.10", "h1-eth0"])
def expect_igmp_group_source_count(): def expect_igmp_group_source_count():
igmp_sources = tgen.gears["r1"].vtysh_cmd("show ip igmp sources json", isjson=True) igmp_sources = tgen.gears["r1"].vtysh_cmd(
"show ip igmp sources json", isjson=True
)
try: try:
return len(igmp_sources["r1-eth2"]["232.0.101.10"]["sources"]) return len(igmp_sources["r1-eth2"]["232.0.101.10"]["sources"])
except KeyError: except KeyError:
@ -252,13 +264,15 @@ def test_igmp_group_source_limit():
topotest.run_and_expect(expect_igmp_group_source_count, 4, count=10, wait=2) topotest.run_and_expect(expect_igmp_group_source_count, 4, count=10, wait=2)
# Cleanup # Cleanup
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
no ip igmp max-sources 4 no ip igmp max-sources 4
exit exit
clear ip igmp interfaces clear ip igmp interfaces
""") """
)
app_helper.stop_host("h1") app_helper.stop_host("h1")
@ -268,11 +282,13 @@ def test_mld_group_limit():
if tgen.routers_have_failure(): if tgen.routers_have_failure():
pytest.skip(tgen.errors) pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
ipv6 mld max-groups 14 ipv6 mld max-groups 14
""") """
)
app_helper.run("h1", ["FF05::100", "h1-eth0"]) app_helper.run("h1", ["FF05::100", "h1-eth0"])
app_helper.run("h1", ["FF05::101", "h1-eth0"]) app_helper.run("h1", ["FF05::101", "h1-eth0"])
app_helper.run("h1", ["FF05::102", "h1-eth0"]) app_helper.run("h1", ["FF05::102", "h1-eth0"])
@ -291,25 +307,27 @@ def test_mld_group_limit():
app_helper.run("h1", ["FF05::115", "h1-eth0"]) app_helper.run("h1", ["FF05::115", "h1-eth0"])
def expect_mld_group_count(): def expect_mld_group_count():
mld_groups = tgen.gears["r1"].vtysh_cmd("show ipv6 mld groups json", isjson=True) mld_groups = tgen.gears["r1"].vtysh_cmd(
"show ipv6 mld groups json", isjson=True
)
try: try:
return len(mld_groups["r1-eth2"]["groups"]) return len(mld_groups["r1-eth2"]["groups"])
except KeyError: except KeyError:
return 0 return 0
topotest.run_and_expect(expect_mld_group_count, 14, count=10, wait=2) topotest.run_and_expect(expect_mld_group_count, 14, count=10, wait=2)
# Cleanup # Cleanup
app_helper.stop_host("h1") app_helper.stop_host("h1")
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
no ipv6 mld max-groups 4 no ipv6 mld max-groups 4
exit exit
clear ipv6 mld interfaces clear ipv6 mld interfaces
""") """
)
def test_mld_group_source_limit(): def test_mld_group_source_limit():
@ -318,12 +336,14 @@ def test_mld_group_source_limit():
if tgen.routers_have_failure(): if tgen.routers_have_failure():
pytest.skip(tgen.errors) pytest.skip(tgen.errors)
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
ipv6 mld max-sources 4 ipv6 mld max-sources 4
exit exit
""") """
)
app_helper.run("h1", ["--source=2001:db8:1::100", "FF35::100", "h1-eth0"]) app_helper.run("h1", ["--source=2001:db8:1::100", "FF35::100", "h1-eth0"])
app_helper.run("h1", ["--source=2001:db8:1::101", "FF35::100", "h1-eth0"]) app_helper.run("h1", ["--source=2001:db8:1::101", "FF35::100", "h1-eth0"])
@ -334,7 +354,9 @@ def test_mld_group_source_limit():
app_helper.run("h1", ["--source=2001:db8:1::106", "FF35::100", "h1-eth0"]) app_helper.run("h1", ["--source=2001:db8:1::106", "FF35::100", "h1-eth0"])
def expect_mld_source_group_count(): def expect_mld_source_group_count():
mld_sources = tgen.gears["r1"].vtysh_cmd("show ipv6 mld joins json", isjson=True) mld_sources = tgen.gears["r1"].vtysh_cmd(
"show ipv6 mld joins json", isjson=True
)
try: try:
return len(mld_sources["default"]["r1-eth2"]["ff35::100"].keys()) return len(mld_sources["default"]["r1-eth2"]["ff35::100"].keys())
except KeyError: except KeyError:
@ -343,16 +365,124 @@ def test_mld_group_source_limit():
topotest.run_and_expect(expect_mld_source_group_count, 4, count=10, wait=2) topotest.run_and_expect(expect_mld_source_group_count, 4, count=10, wait=2)
# Cleanup # Cleanup
tgen.gears["r1"].vtysh_cmd(""" tgen.gears["r1"].vtysh_cmd(
"""
configure terminal configure terminal
interface r1-eth2 interface r1-eth2
no ipv6 mld max-sources 4 no ipv6 mld max-sources 4
exit exit
clear ipv6 mld interfaces clear ipv6 mld interfaces
""") """
)
app_helper.stop_host("h1") app_helper.stop_host("h1")
def test_igmp_immediate_leave():
"Test IGMPv2 immediate leave feature."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
topotest.sysctl_assure(
tgen.gears["h1"], "net.ipv4.conf.h1-eth0.force_igmp_version", "2"
)
tgen.gears["r1"].vtysh_cmd(
"""
configure terminal
interface r1-eth2
ip igmp immediate-leave
"""
)
app_helper.run("h1", ["224.0.110.1", "h1-eth0"])
app_helper.run("h3", ["224.0.110.1", "h3-eth0"])
def expect_igmp_group():
igmp_groups = tgen.gears["r1"].vtysh_cmd(
"show ip igmp groups json", isjson=True
)
try:
for group in igmp_groups["r1-eth2"]["groups"]:
if group["group"] == "224.0.110.1":
return True
return False
except KeyError:
return False
topotest.run_and_expect(expect_igmp_group, True, count=10, wait=2)
# Send leave and expect immediate leave
app_helper.stop_host("h1")
topotest.run_and_expect(expect_igmp_group, False, count=10, wait=2)
# Clean up
tgen.gears["r1"].vtysh_cmd(
"""
configure terminal
interface r1-eth2
no ip igmp immediate-leave
"""
)
topotest.sysctl_assure(
tgen.gears["h1"], "net.ipv4.conf.h1-eth0.force_igmp_version", "0"
)
app_helper.stop_host("h3")
def test_mldv1_immediate_leave():
"Test MLDv1 immediate leave feature."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
topotest.sysctl_assure(
tgen.gears["h1"], "net.ipv6.conf.h1-eth0.force_mld_version", "1"
)
tgen.gears["r1"].vtysh_cmd(
"""
configure terminal
interface r1-eth2
ipv6 mld immediate-leave
"""
)
app_helper.run("h1", ["ff05::2000", "h1-eth0"])
app_helper.run("h3", ["ff05::2000", "h3-eth0"])
def expect_mld_group():
igmp_groups = tgen.gears["r1"].vtysh_cmd(
"show ipv6 mld groups json", isjson=True
)
try:
for group in igmp_groups["r1-eth2"]["groups"]:
if group["group"] == "ff05::2000":
return True
return False
except KeyError:
return False
topotest.run_and_expect(expect_mld_group, True, count=10, wait=2)
# Send leave and expect immediate leave
app_helper.stop_host("h1")
topotest.run_and_expect(expect_mld_group, False, count=10, wait=2)
# Clean up
tgen.gears["r1"].vtysh_cmd(
"""
configure terminal
interface r1-eth2
no ipv6 mld immediate-leave
"""
)
topotest.sysctl_assure(
tgen.gears["h1"], "net.ipv6.conf.h1-eth0.force_mld_version", "0"
)
app_helper.stop_host("h3")
def test_memory_leak(): def test_memory_leak():
"Run the memory leak test and report results." "Run the memory leak test and report results."
tgen = get_topogen() tgen = get_topogen()

View file

@ -186,6 +186,14 @@ module frr-gmp {
} }
} }
leaf immediate-leave {
type boolean;
default "false";
description
"Immediately drop group memberships on receiving IGMPv2/MLDv1 Leave.
Has no effect when IGMPv3/MLDv2 is in use.";
}
list static-group { list static-group {
key "group-addr source-addr"; key "group-addr source-addr";
description description