Compare commits

..

5 commits

Author SHA1 Message Date
Christian Hopps b4000e0ad7 rustlibd: rust daemon template
Signed-off-by: Christian Hopps <chopps@labn.net>
2025-03-03 12:14:22 +00:00
Christian Hopps 38c8603ebe lib: add FRR utility functions for rust code
Signed-off-by: Christian Hopps <chopps@labn.net>
2025-03-03 11:44:00 +00:00
Christian Hopps 6f9fced057 lib: add extern available variadic zlog function
Signed-off-by: Christian Hopps <chopps@labn.net>
2025-03-03 11:44:00 +00:00
Christian Hopps edad74e5bc rustbind: remove: rust binary based daemon skeleton code
Signed-off-by: Christian Hopps <chopps@labn.net>
2025-03-03 11:42:39 +00:00
Christian Hopps fff9e3fbcb rustbind: capture rust binary based daemon skeleton code work
Signed-off-by: Christian Hopps <chopps@labn.net>
2025-03-03 11:42:07 +00:00
575 changed files with 5739 additions and 24362 deletions

View file

@ -1,5 +1,5 @@
[MASTER] [MASTER]
init-hook="import sys; sys.path.extend(['..', 'tests/topotests']);" init-hook="import sys; sys.path.insert(0, '..')"
signature-mutators=common_config.retry,retry signature-mutators=common_config.retry,retry
[FORMAT] [FORMAT]

View file

@ -185,6 +185,7 @@ include grpc/subdir.am
include tools/subdir.am include tools/subdir.am
include mgmtd/subdir.am include mgmtd/subdir.am
include rustlibd/subdir.am
include bgpd/subdir.am include bgpd/subdir.am
include bgpd/rfp-example/librfp/subdir.am include bgpd/rfp-example/librfp/subdir.am
@ -285,6 +286,7 @@ EXTRA_DIST += \
qpb/Makefile \ qpb/Makefile \
ripd/Makefile \ ripd/Makefile \
ripngd/Makefile \ ripngd/Makefile \
rustlibd/Makefile \
staticd/Makefile \ staticd/Makefile \
tests/Makefile \ tests/Makefile \
tools/Makefile \ tools/Makefile \

View file

@ -310,7 +310,6 @@ DEFPY (babel_set_wired,
babel_ifp = babel_get_if_nfo(ifp); babel_ifp = babel_get_if_nfo(ifp);
assert (babel_ifp != NULL); assert (babel_ifp != NULL);
if ((CHECK_FLAG(babel_ifp->flags, BABEL_IF_WIRED) ? 1 : 0) != (no ? 0 : 1))
babel_set_wired_internal(babel_ifp, no ? 0 : 1); babel_set_wired_internal(babel_ifp, no ? 0 : 1);
return CMD_SUCCESS; return CMD_SUCCESS;
} }
@ -329,7 +328,6 @@ DEFPY (babel_set_wireless,
babel_ifp = babel_get_if_nfo(ifp); babel_ifp = babel_get_if_nfo(ifp);
assert (babel_ifp != NULL); assert (babel_ifp != NULL);
if ((CHECK_FLAG(babel_ifp->flags, BABEL_IF_WIRED) ? 1 : 0) != (no ? 1 : 0))
babel_set_wired_internal(babel_ifp, no ? 1 : 0); babel_set_wired_internal(babel_ifp, no ? 1 : 0);
return CMD_SUCCESS; return CMD_SUCCESS;
} }
@ -366,19 +364,12 @@ DEFPY (babel_set_hello_interval,
{ {
VTY_DECLVAR_CONTEXT(interface, ifp); VTY_DECLVAR_CONTEXT(interface, ifp);
babel_interface_nfo *babel_ifp; babel_interface_nfo *babel_ifp;
unsigned int old_interval;
babel_ifp = babel_get_if_nfo(ifp); babel_ifp = babel_get_if_nfo(ifp);
assert (babel_ifp != NULL); assert (babel_ifp != NULL);
old_interval = babel_ifp->hello_interval;
babel_ifp->hello_interval = no ? babel_ifp->hello_interval = no ?
BABEL_DEFAULT_HELLO_INTERVAL : hello_interval; BABEL_DEFAULT_HELLO_INTERVAL : hello_interval;
if (old_interval != babel_ifp->hello_interval){
set_timeout(&babel_ifp->hello_timeout, babel_ifp->hello_interval);
send_hello(ifp);
}
return CMD_SUCCESS; return CMD_SUCCESS;
} }
@ -755,10 +746,8 @@ babel_interface_close_all(void)
} }
/* Disable babel redistribution */ /* Disable babel redistribution */
for (type = 0; type < ZEBRA_ROUTE_MAX; type++) { for (type = 0; type < ZEBRA_ROUTE_MAX; type++) {
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, babel_zclient, AFI_IP, type, 0, zclient_redistribute (ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP, type, 0, VRF_DEFAULT);
VRF_DEFAULT); zclient_redistribute (ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP6, type, 0, VRF_DEFAULT);
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, babel_zclient, AFI_IP6, type, 0,
VRF_DEFAULT);
} }
} }
@ -976,7 +965,6 @@ DEFUN (show_babel_route,
{ {
struct route_stream *routes = NULL; struct route_stream *routes = NULL;
struct xroute_stream *xroutes = NULL; struct xroute_stream *xroutes = NULL;
routes = route_stream(0); routes = route_stream(0);
if(routes) { if(routes) {
while(1) { while(1) {

View file

@ -19,7 +19,6 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
#include "memory.h" #include "memory.h"
#include "libfrr.h" #include "libfrr.h"
#include "lib_errors.h" #include "lib_errors.h"
#include "plist.h"
#include "babel_main.h" #include "babel_main.h"
#include "babeld.h" #include "babeld.h"
@ -314,7 +313,6 @@ babel_exit_properly(void)
debugf(BABEL_DEBUG_COMMON, "Done."); debugf(BABEL_DEBUG_COMMON, "Done.");
vrf_terminate(); vrf_terminate();
prefix_list_reset();
frr_fini(); frr_fini();
exit(0); exit(0);

View file

@ -19,7 +19,7 @@ void babelz_zebra_init(void);
/* we must use a pointer because of zclient.c's functions (new, free). */ /* we must use a pointer because of zclient.c's functions (new, free). */
struct zclient *babel_zclient; struct zclient *zclient;
/* Debug types */ /* Debug types */
static const struct { static const struct {
@ -94,10 +94,9 @@ DEFUN (babel_redistribute_type,
} }
if (!negate) if (!negate)
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, babel_zclient, afi, type, 0, VRF_DEFAULT); zclient_redistribute (ZEBRA_REDISTRIBUTE_ADD, zclient, afi, type, 0, VRF_DEFAULT);
else { else {
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, babel_zclient, afi, type, 0, zclient_redistribute (ZEBRA_REDISTRIBUTE_DELETE, zclient, afi, type, 0, VRF_DEFAULT);
VRF_DEFAULT);
/* perhaps should we remove xroutes having the same type... */ /* perhaps should we remove xroutes having the same type... */
} }
return CMD_SUCCESS; return CMD_SUCCESS;
@ -231,11 +230,11 @@ static zclient_handler *const babel_handlers[] = {
void babelz_zebra_init(void) void babelz_zebra_init(void)
{ {
babel_zclient = zclient_new(master, &zclient_options_default, babel_handlers, zclient = zclient_new(master, &zclient_options_default, babel_handlers,
array_size(babel_handlers)); array_size(babel_handlers));
zclient_init(babel_zclient, ZEBRA_ROUTE_BABEL, 0, &babeld_privs); zclient_init(zclient, ZEBRA_ROUTE_BABEL, 0, &babeld_privs);
babel_zclient->zebra_connected = babel_zebra_connected; zclient->zebra_connected = babel_zebra_connected;
install_element(BABEL_NODE, &babel_redistribute_type_cmd); install_element(BABEL_NODE, &babel_redistribute_type_cmd);
install_element(ENABLE_NODE, &debug_babel_cmd); install_element(ENABLE_NODE, &debug_babel_cmd);
@ -249,6 +248,6 @@ void babelz_zebra_init(void)
void void
babel_zebra_close_connexion(void) babel_zebra_close_connexion(void)
{ {
zclient_stop(babel_zclient); zclient_stop(zclient);
zclient_free(babel_zclient); zclient_free(zclient);
} }

View file

@ -8,7 +8,7 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
#include "vty.h" #include "vty.h"
extern struct zclient *babel_zclient; extern struct zclient *zclient;
void babelz_zebra_init(void); void babelz_zebra_init(void);
void babel_zebra_close_connexion(void); void babel_zebra_close_connexion(void);

View file

@ -108,8 +108,8 @@ babel_config_write (struct vty *vty)
/* list redistributed protocols */ /* list redistributed protocols */
for (afi = AFI_IP; afi <= AFI_IP6; afi++) { for (afi = AFI_IP; afi <= AFI_IP6; afi++) {
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) { for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
if (i != babel_zclient->redist_default && if (i != zclient->redist_default &&
vrf_bitmap_check(&babel_zclient->redist[afi][i], VRF_DEFAULT)) { vrf_bitmap_check(&zclient->redist[afi][i], VRF_DEFAULT)) {
vty_out(vty, " redistribute %s %s\n", vty_out(vty, " redistribute %s %s\n",
(afi == AFI_IP) ? "ipv4" : "ipv6", (afi == AFI_IP) ? "ipv4" : "ipv6",
zebra_route_string(i)); zebra_route_string(i));
@ -183,10 +183,6 @@ static void babel_read_protocol(struct event *thread)
flog_err_sys(EC_LIB_SOCKET, "recv: %s", safe_strerror(errno)); flog_err_sys(EC_LIB_SOCKET, "recv: %s", safe_strerror(errno));
} }
} else { } else {
if(ntohs(sin6.sin6_port) != BABEL_PORT) {
return;
}
FOR_ALL_INTERFACES(vrf, ifp) { FOR_ALL_INTERFACES(vrf, ifp) {
if(!if_up(ifp)) if(!if_up(ifp))
continue; continue;
@ -216,8 +212,7 @@ static void babel_init_routing_process(struct event *thread)
babel_main_loop(thread);/* this function self-add to the t_update thread */ babel_main_loop(thread);/* this function self-add to the t_update thread */
} }
/* fill "myid" with an unique id (only if myid != {0} and myid != {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* fill "myid" with an unique id (only if myid != {0}). */
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}). */
static void static void
babel_get_myid(void) babel_get_myid(void)
{ {
@ -227,7 +222,7 @@ babel_get_myid(void)
int i; int i;
/* if we already have an id (from state file), we return. */ /* if we already have an id (from state file), we return. */
if (memcmp(myid, zeroes, 8) != 0 && memcmp(myid, ones, 8) != 0) { if (memcmp(myid, zeroes, 8) != 0) {
return; return;
} }

View file

@ -21,8 +21,6 @@ Copyright 2011 by Matthieu Boutier and Juliusz Chroboczek
#undef MAX #undef MAX
#undef MIN #undef MIN
#define BABEL_PORT 6696
#define MAX(x,y) ((x)<=(y)?(y):(x)) #define MAX(x,y) ((x)<=(y)?(y):(x))
#define MIN(x,y) ((x)<=(y)?(x):(y)) #define MIN(x,y) ((x)<=(y)?(x):(y))

View file

@ -176,7 +176,8 @@ zebra_route(int add, int family, const unsigned char *pref, unsigned short plen,
debugf(BABEL_DEBUG_ROUTE, "%s route (%s) to zebra", debugf(BABEL_DEBUG_ROUTE, "%s route (%s) to zebra",
add ? "adding" : "removing", add ? "adding" : "removing",
(family == AF_INET) ? "ipv4" : "ipv6"); (family == AF_INET) ? "ipv4" : "ipv6");
return zclient_route_send(add ? ZEBRA_ROUTE_ADD : ZEBRA_ROUTE_DELETE, babel_zclient, &api); return zclient_route_send (add ? ZEBRA_ROUTE_ADD : ZEBRA_ROUTE_DELETE,
zclient, &api);
} }
int int

View file

@ -27,7 +27,6 @@ int split_horizon = 1;
unsigned short myseqno = 0; unsigned short myseqno = 0;
#define UNICAST_BUFSIZE 1024 #define UNICAST_BUFSIZE 1024
#define RESERVED 0
static int unicast_buffered = 0; static int unicast_buffered = 0;
static unsigned char *unicast_buffer = NULL; static unsigned char *unicast_buffer = NULL;
struct neighbour *unicast_neighbour = NULL; struct neighbour *unicast_neighbour = NULL;
@ -53,17 +52,7 @@ static const unsigned char tlv_min_length[MESSAGE_MAX + 1] =
static bool static bool
known_ae(int ae) known_ae(int ae)
{ {
return ae <= 3; return ae <= 4;
}
static inline bool
is_all_zero(const unsigned char *data, int len) {
for (int j = 0; j < len; j++) {
if (data[j] != 0) {
return false;
}
}
return true;
} }
/* Parse a network prefix, encoded in the somewhat baroque compressed /* Parse a network prefix, encoded in the somewhat baroque compressed
@ -162,11 +151,7 @@ static bool parse_update_subtlv(const unsigned char *a, int alen,
"Received Mandatory bit set but this FRR version is not prepared to handle it at this point"); "Received Mandatory bit set but this FRR version is not prepared to handle it at this point");
return true; return true;
} else if (type == SUBTLV_PADN) { } else if (type == SUBTLV_PADN) {
if (!is_all_zero(a + i + 2, len)) { /* Nothing. */
debugf(BABEL_DEBUG_COMMON,
"Received pad%d with non zero MBZ field.",
len);
}
} else if (type == SUBTLV_DIVERSITY) { } else if (type == SUBTLV_DIVERSITY) {
if (len > DIVERSITY_HOPS) { if (len > DIVERSITY_HOPS) {
flog_err( flog_err(
@ -229,11 +214,7 @@ parse_hello_subtlv(const unsigned char *a, int alen,
"Received subtlv with Mandatory bit, this version of FRR is not prepared to handle this currently"); "Received subtlv with Mandatory bit, this version of FRR is not prepared to handle this currently");
return -2; return -2;
} else if (type == SUBTLV_PADN) { } else if (type == SUBTLV_PADN) {
if (!is_all_zero(a + i + 2, len)) { /* Nothing to do. */
debugf(BABEL_DEBUG_COMMON,
"Received pad%d with non zero MBZ field.",
len);
}
} else if (type == SUBTLV_TIMESTAMP) { } else if (type == SUBTLV_TIMESTAMP) {
if (len >= 4) { if (len >= 4) {
DO_NTOHL(*hello_send_us, a + i + 2); DO_NTOHL(*hello_send_us, a + i + 2);
@ -280,11 +261,7 @@ parse_ihu_subtlv(const unsigned char *a, int alen,
} }
if(type == SUBTLV_PADN) { if(type == SUBTLV_PADN) {
if (!is_all_zero(a + i + 2, len)) { /* Nothing to do. */
debugf(BABEL_DEBUG_COMMON,
"Received pad%d with non zero MBZ field.",
len);
}
} else if(type == SUBTLV_TIMESTAMP) { } else if(type == SUBTLV_TIMESTAMP) {
if(len >= 8) { if(len >= 8) {
DO_NTOHL(*hello_send_us, a + i + 2); DO_NTOHL(*hello_send_us, a + i + 2);
@ -313,7 +290,7 @@ parse_request_subtlv(int ae, const unsigned char *a, int alen,
int have_src_prefix = 0; int have_src_prefix = 0;
while(i < alen) { while(i < alen) {
type = a[i]; type = a[0];
if(type == SUBTLV_PAD1) { if(type == SUBTLV_PAD1) {
i++; i++;
continue; continue;
@ -464,14 +441,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
return; return;
} }
if (v4mapped(from)) {
memcpy(v4_nh, from, 16);
have_v4_nh = 1;
} else {
memcpy(v6_nh, from, 16);
have_v6_nh = 1;
}
i = 0; i = 0;
while(i < bodylen) { while(i < bodylen) {
message = packet + 4 + i; message = packet + 4 + i;
@ -485,23 +454,12 @@ parse_packet(const unsigned char *from, struct interface *ifp,
len = message[1]; len = message[1];
if(type == MESSAGE_PADN) { if(type == MESSAGE_PADN) {
if (!is_all_zero(message + 2, len)) {
debugf(BABEL_DEBUG_COMMON,
"Received pad%d with non zero MBZ field.",
len);
}
debugf(BABEL_DEBUG_COMMON,"Received pad%d from %s on %s.", debugf(BABEL_DEBUG_COMMON,"Received pad%d from %s on %s.",
len, format_address(from), ifp->name); len, format_address(from), ifp->name);
} else if(type == MESSAGE_ACK_REQ) { } else if(type == MESSAGE_ACK_REQ) {
unsigned short nonce, interval, Reserved; unsigned short nonce, interval;
DO_NTOHS(Reserved, message + 2);
DO_NTOHS(nonce, message + 4); DO_NTOHS(nonce, message + 4);
DO_NTOHS(interval, message + 6); DO_NTOHS(interval, message + 6);
if (Reserved != RESERVED) {
debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) with non zero Reserved from %s on %s.",
nonce, interval, format_address(from), ifp->name);
goto done;
}
debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) from %s on %s.", debugf(BABEL_DEBUG_COMMON,"Received ack-req (%04X %d) from %s on %s.",
nonce, interval, format_address(from), ifp->name); nonce, interval, format_address(from), ifp->name);
send_ack(neigh, nonce, interval); send_ack(neigh, nonce, interval);
@ -562,15 +520,8 @@ parse_packet(const unsigned char *from, struct interface *ifp,
} }
} else if(type == MESSAGE_IHU) { } else if(type == MESSAGE_IHU) {
unsigned short txcost, interval; unsigned short txcost, interval;
unsigned char Reserved;
unsigned char address[16]; unsigned char address[16];
int rc; int rc;
Reserved = message[3];
if (Reserved != RESERVED) {
debugf(BABEL_DEBUG_COMMON,"Received ihu with non zero Reserved from %s on %s.",
format_address(from), ifp->name);
goto done;
}
DO_NTOHS(txcost, message + 4); DO_NTOHS(txcost, message + 4);
DO_NTOHS(interval, message + 6); DO_NTOHS(interval, message + 6);
rc = network_address(message[2], message + 8, len - 6, address); rc = network_address(message[2], message + 8, len - 6, address);
@ -601,13 +552,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
} else if(type == MESSAGE_NH) { } else if(type == MESSAGE_NH) {
unsigned char nh[16]; unsigned char nh[16];
int rc; int rc;
if(message[2] != 1 && message[2] != 3) {
debugf(BABEL_DEBUG_COMMON,"Received NH with incorrect AE %d.",
message[2]);
have_v4_nh = 0;
have_v6_nh = 0;
goto fail;
}
rc = network_address(message[2], message + 4, len - 2, nh); rc = network_address(message[2], message + 4, len - 2, nh);
if(rc <= 0) { if(rc <= 0) {
have_v4_nh = 0; have_v4_nh = 0;
@ -632,20 +576,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
int rc, parsed_len; int rc, parsed_len;
bool ignore_update = false; bool ignore_update = false;
// Basic sanity check on length
if (len < 10) {
if (len < 2 || (message[3] & 0x80)) {
have_v4_prefix = have_v6_prefix = 0;
}
goto fail;
}
if(!known_ae(message[2])) {
debugf(BABEL_DEBUG_COMMON,"Received update with unknown AE %d. Ignoring.",
message[2]);
goto done;
}
DO_NTOHS(interval, message + 6); DO_NTOHS(interval, message + 6);
DO_NTOHS(seqno, message + 8); DO_NTOHS(seqno, message + 8);
DO_NTOHS(metric, message + 10); DO_NTOHS(metric, message + 10);
@ -684,7 +614,7 @@ parse_packet(const unsigned char *from, struct interface *ifp,
} }
have_router_id = 1; have_router_id = 1;
} }
if(metric < INFINITY && !have_router_id && message[2] != 0) { if(!have_router_id && message[2] != 0) {
flog_err(EC_BABEL_PACKET, flog_err(EC_BABEL_PACKET,
"Received prefix with no router id."); "Received prefix with no router id.");
goto fail; goto fail;
@ -696,17 +626,11 @@ parse_packet(const unsigned char *from, struct interface *ifp,
format_address(from), ifp->name); format_address(from), ifp->name);
if(message[2] == 0) { if(message[2] == 0) {
if(metric < INFINITY) { if(metric < 0xFFFF) {
flog_err(EC_BABEL_PACKET, flog_err(EC_BABEL_PACKET,
"Received wildcard update with finite metric."); "Received wildcard update with finite metric.");
goto done; goto done;
} }
// Add check for Plen and Omitted
if(message[4] != 0 || message[5] != 0) {
flog_err(EC_BABEL_PACKET,
"Received wildcard retraction with non-zero Plen or Omitted.");
goto done;
}
retract_neighbour_routes(neigh); retract_neighbour_routes(neigh);
goto done; goto done;
} else if(message[2] == 1) { } else if(message[2] == 1) {
@ -769,10 +693,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
memcpy(src_prefix, zeroes, 16); memcpy(src_prefix, zeroes, 16);
src_plen = 0; src_plen = 0;
} }
if(message[6] == 0) {
debugf(BABEL_DEBUG_COMMON, "Received seqno request with invalid hop count 0");
goto done;
}
rc = parse_request_subtlv(message[2], message + 4 + rc, rc = parse_request_subtlv(message[2], message + 4 + rc,
len - 2 - rc, src_prefix, &src_plen); len - 2 - rc, src_prefix, &src_plen);
if(rc < 0) if(rc < 0)
@ -786,11 +706,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
"Received source-specific wildcard request."); "Received source-specific wildcard request.");
goto done; goto done;
} }
if(message[3] != 0) {
flog_err(EC_BABEL_PACKET,
"Ignoring request with AE=0 and non-zero Plen");
goto done;
}
/* If a neighbour is requesting a full route dump from us, /* If a neighbour is requesting a full route dump from us,
we might as well send it an IHU. */ we might as well send it an IHU. */
send_ihu(neigh, NULL); send_ihu(neigh, NULL);
@ -806,14 +721,8 @@ parse_packet(const unsigned char *from, struct interface *ifp,
send_update(neigh->ifp, 0, prefix, plen); send_update(neigh->ifp, 0, prefix, plen);
} }
} else if(type == MESSAGE_MH_REQUEST) { } else if(type == MESSAGE_MH_REQUEST) {
unsigned char prefix[16], plen, Reserved; unsigned char prefix[16], plen;
unsigned short seqno; unsigned short seqno;
Reserved = message[7];
if (Reserved != RESERVED) {
debugf(BABEL_DEBUG_COMMON,"Received request with non zero Reserved from %s on %s.",
format_address(from), ifp->name);
goto done;
}
int rc; int rc;
DO_NTOHS(seqno, message + 4); DO_NTOHS(seqno, message + 4);
rc = network_prefix(message[2], message[3], 0, rc = network_prefix(message[2], message[3], 0,
@ -825,10 +734,6 @@ parse_packet(const unsigned char *from, struct interface *ifp,
format_prefix(prefix, plen), format_prefix(prefix, plen),
format_address(from), ifp->name, format_address(from), ifp->name,
format_eui64(message + 8), seqno); format_eui64(message + 8), seqno);
if(message[6] == 0) {
debugf(BABEL_DEBUG_COMMON, "Received request with invalid hop count 0");
goto done;
}
handle_request(neigh, prefix, plen, message[6], seqno, message + 8); handle_request(neigh, prefix, plen, message[6], seqno, message + 8);
} else { } else {
debugf(BABEL_DEBUG_COMMON,"Received unknown packet type %d from %s on %s.", debugf(BABEL_DEBUG_COMMON,"Received unknown packet type %d from %s on %s.",
@ -2000,14 +1905,8 @@ handle_request(struct neighbour *neigh, const unsigned char *prefix,
/* We were about to forward a request to its requestor. Try to /* We were about to forward a request to its requestor. Try to
find a different neighbour to forward the request to. */ find a different neighbour to forward the request to. */
struct babel_route *other_route; struct babel_route *other_route;
/* First try feasible routes as required by RFC */
other_route = find_best_route(prefix, plen, 1, neigh);
if(!other_route || route_metric(other_route) >= INFINITY) {
/* If no feasible route found, try non-feasible routes */
other_route = find_best_route(prefix, plen, 0, neigh); other_route = find_best_route(prefix, plen, 0, neigh);
}
if(other_route && route_metric(other_route) < INFINITY) if(other_route && route_metric(other_route) < INFINITY)
successor = other_route->neigh; successor = other_route->neigh;
} }

View file

@ -1078,14 +1078,6 @@ route_lost(struct source *src, unsigned oldmetric)
new_route = find_best_route(src->prefix, src->plen, 1, NULL); new_route = find_best_route(src->prefix, src->plen, 1, NULL);
if(new_route) { if(new_route) {
consider_route(new_route); consider_route(new_route);
} else {
struct babel_route *unfeasible = find_best_route(src->prefix, src->plen, 0, NULL);
if(unfeasible && !route_expired(unfeasible)) {
/* MUST send seqno request when we have unexpired unfeasible routes */
send_request_resend(NULL, src->prefix, src->plen,
seqno_plus(src->seqno, 1),
src->id);
} else if(oldmetric < INFINITY) { } else if(oldmetric < INFINITY) {
/* Avoid creating a blackhole. */ /* Avoid creating a blackhole. */
send_update_resend(NULL, src->prefix, src->plen); send_update_resend(NULL, src->prefix, src->plen);
@ -1099,7 +1091,6 @@ route_lost(struct source *src, unsigned oldmetric)
src->id); src->id);
} }
} }
}
/* This is called periodically to flush old routes. It will also send /* This is called periodically to flush old routes. It will also send
requests for routes that are about to expire. */ requests for routes that are about to expire. */

View file

@ -38,6 +38,7 @@ struct babel_route {
struct route_stream; struct route_stream;
extern struct babel_route **routes;
extern int kernel_metric; extern int kernel_metric;
extern enum babel_diversity diversity_kind; extern enum babel_diversity diversity_kind;
extern int diversity_factor; extern int diversity_factor;

View file

@ -79,7 +79,6 @@ static void bfd_profile_set_default(struct bfd_profile *bp)
bp->detection_multiplier = BFD_DEFDETECTMULT; bp->detection_multiplier = BFD_DEFDETECTMULT;
bp->echo_mode = false; bp->echo_mode = false;
bp->passive = false; bp->passive = false;
bp->log_session_changes = false;
bp->minimum_ttl = BFD_DEF_MHOP_TTL; bp->minimum_ttl = BFD_DEF_MHOP_TTL;
bp->min_echo_rx = BFD_DEF_REQ_MIN_ECHO_RX; bp->min_echo_rx = BFD_DEF_REQ_MIN_ECHO_RX;
bp->min_echo_tx = BFD_DEF_DES_MIN_ECHO_TX; bp->min_echo_tx = BFD_DEF_DES_MIN_ECHO_TX;
@ -211,12 +210,6 @@ void bfd_session_apply(struct bfd_session *bs)
else else
bfd_set_shutdown(bs, bs->peer_profile.admin_shutdown); bfd_set_shutdown(bs, bs->peer_profile.admin_shutdown);
/* Toggle 'no log-session-changes' if default value. */
if (bs->peer_profile.log_session_changes == false)
bfd_set_log_session_changes(bs, bp->log_session_changes);
else
bfd_set_log_session_changes(bs, bs->peer_profile.log_session_changes);
/* If session interval changed negotiate new timers. */ /* If session interval changed negotiate new timers. */
if (bs->ses_state == PTM_BFD_UP if (bs->ses_state == PTM_BFD_UP
&& (bs->timers.desired_min_tx != min_tx && (bs->timers.desired_min_tx != min_tx
@ -581,9 +574,6 @@ void ptm_bfd_sess_up(struct bfd_session *bfd)
zlog_debug("state-change: [%s] %s -> %s", zlog_debug("state-change: [%s] %s -> %s",
bs_to_string(bfd), state_list[old_state].str, bs_to_string(bfd), state_list[old_state].str,
state_list[bfd->ses_state].str); state_list[bfd->ses_state].str);
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES))
zlog_notice("Session-Change: [%s] %s -> %s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str);
} }
} }
@ -631,11 +621,6 @@ void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag)
bs_to_string(bfd), state_list[old_state].str, bs_to_string(bfd), state_list[old_state].str,
state_list[bfd->ses_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag)); get_diag_str(bfd->local_diag));
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES) &&
old_state == PTM_BFD_UP)
zlog_notice("Session-Change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag));
} }
/* clear peer's mac address */ /* clear peer's mac address */
@ -666,9 +651,6 @@ void ptm_sbfd_sess_up(struct bfd_session *bfd)
if (bglobal.debug_peer_event) if (bglobal.debug_peer_event)
zlog_info("state-change: [%s] %s -> %s", bs_to_string(bfd), zlog_info("state-change: [%s] %s -> %s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str); state_list[old_state].str, state_list[bfd->ses_state].str);
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES))
zlog_notice("Session-Change: [%s] %s -> %s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str);
} }
} }
@ -711,11 +693,6 @@ void ptm_sbfd_init_sess_dn(struct bfd_session *bfd, uint8_t diag)
zlog_debug("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd), zlog_debug("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str, state_list[old_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag)); get_diag_str(bfd->local_diag));
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES) &&
old_state == PTM_BFD_UP)
zlog_notice("Session-Change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag));
} }
/* reset local address ,it might has been be changed after bfd is up*/ /* reset local address ,it might has been be changed after bfd is up*/
//memset(&bfd->local_address, 0, sizeof(bfd->local_address)); //memset(&bfd->local_address, 0, sizeof(bfd->local_address));
@ -744,18 +721,32 @@ void ptm_sbfd_echo_sess_dn(struct bfd_session *bfd, uint8_t diag)
zlog_warn("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd), zlog_warn("state-change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str, state_list[old_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag)); get_diag_str(bfd->local_diag));
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES) &&
old_state == PTM_BFD_UP)
zlog_notice("Session-Change: [%s] %s -> %s reason:%s", bs_to_string(bfd),
state_list[old_state].str, state_list[bfd->ses_state].str,
get_diag_str(bfd->local_diag));
} }
} }
static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa, static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
uint32_t ldisc) uint32_t ldisc)
{ {
return bfd_id_lookup(ldisc); struct bfd_session *bs;
bs = bfd_id_lookup(ldisc);
if (bs == NULL)
return NULL;
switch (bs->key.family) {
case AF_INET:
if (memcmp(&sa->sa_sin.sin_addr, &bs->key.peer,
sizeof(sa->sa_sin.sin_addr)))
return NULL;
break;
case AF_INET6:
if (memcmp(&sa->sa_sin6.sin6_addr, &bs->key.peer,
sizeof(sa->sa_sin6.sin6_addr)))
return NULL;
break;
}
return bs;
} }
struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp, struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
@ -953,11 +944,6 @@ static void _bfd_session_update(struct bfd_session *bs,
bs->peer_profile.echo_mode = bpc->bpc_echo; bs->peer_profile.echo_mode = bpc->bpc_echo;
bfd_set_echo(bs, bpc->bpc_echo); bfd_set_echo(bs, bpc->bpc_echo);
if (bpc->bpc_log_session_changes)
SET_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES);
else
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES);
/* /*
* Shutdown needs to be the last in order to avoid timers enable when * Shutdown needs to be the last in order to avoid timers enable when
* the session is disabled. * the session is disabled.
@ -1545,7 +1531,6 @@ void bfd_set_shutdown(struct bfd_session *bs, bool shutdown)
return; return;
SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN); SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
bs->local_diag = BD_ADMIN_DOWN;
/* Handle data plane shutdown case. */ /* Handle data plane shutdown case. */
if (bs->bdc) { if (bs->bdc) {
@ -1623,14 +1608,6 @@ void bfd_set_passive_mode(struct bfd_session *bs, bool passive)
} }
} }
void bfd_set_log_session_changes(struct bfd_session *bs, bool log_session_changes)
{
if (log_session_changes)
SET_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES);
else
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES);
}
/* /*
* Helper functions. * Helper functions.
*/ */
@ -2518,7 +2495,7 @@ void sbfd_reflector_free(const uint32_t discr)
return; return;
} }
void sbfd_reflector_flush(void) void sbfd_reflector_flush()
{ {
sbfd_discr_iterate(_sbfd_reflector_free, NULL); sbfd_discr_iterate(_sbfd_reflector_free, NULL);
return; return;

View file

@ -84,7 +84,6 @@ struct bfd_peer_cfg {
bool bpc_cbit; bool bpc_cbit;
bool bpc_passive; bool bpc_passive;
bool bpc_log_session_changes;
bool bpc_has_profile; bool bpc_has_profile;
char bpc_profile[64]; char bpc_profile[64];
@ -240,7 +239,6 @@ enum bfd_session_flags {
BFD_SESS_FLAG_CBIT = 1 << 9, /* CBIT is set */ BFD_SESS_FLAG_CBIT = 1 << 9, /* CBIT is set */
BFD_SESS_FLAG_PASSIVE = 1 << 10, /* Passive mode */ BFD_SESS_FLAG_PASSIVE = 1 << 10, /* Passive mode */
BFD_SESS_FLAG_MAC_SET = 1 << 11, /* MAC of peer known */ BFD_SESS_FLAG_MAC_SET = 1 << 11, /* MAC of peer known */
BFD_SESS_FLAG_LOG_SESSION_CHANGES = 1 << 12, /* Log session changes */
}; };
enum bfd_mode_type { enum bfd_mode_type {
@ -299,8 +297,6 @@ struct bfd_profile {
bool admin_shutdown; bool admin_shutdown;
/** Passive mode. */ /** Passive mode. */
bool passive; bool passive;
/** Log session changes. */
bool log_session_changes;
/** Minimum expected TTL value. */ /** Minimum expected TTL value. */
uint8_t minimum_ttl; uint8_t minimum_ttl;
@ -686,14 +682,6 @@ void bfd_set_shutdown(struct bfd_session *bs, bool shutdown);
*/ */
void bfd_set_passive_mode(struct bfd_session *bs, bool passive); void bfd_set_passive_mode(struct bfd_session *bs, bool passive);
/**
* Set the BFD session to log or not log session changes.
*
* \param bs the BFD session.
* \param log_session indicates whether or not to log session changes.
*/
void bfd_set_log_session_changes(struct bfd_session *bs, bool log_session);
/** /**
* Picks the BFD session configuration from the appropriated source: * Picks the BFD session configuration from the appropriated source:
* if using the default peer configuration prefer profile (if it exists), * if using the default peer configuration prefer profile (if it exists),

View file

@ -754,21 +754,6 @@ void bfd_cli_show_passive(struct vty *vty, const struct lyd_node *dnode,
yang_dnode_get_bool(dnode, NULL) ? "" : "no "); yang_dnode_get_bool(dnode, NULL) ? "" : "no ");
} }
DEFPY_YANG(bfd_peer_log_session_changes, bfd_peer_log_session_changes_cmd,
"[no] log-session-changes",
NO_STR
"Log Up/Down changes for the session\n")
{
nb_cli_enqueue_change(vty, "./log-session-changes", NB_OP_MODIFY, no ? "false" : "true");
return nb_cli_apply_changes(vty, NULL);
}
void bfd_cli_show_log_session_changes(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults)
{
vty_out(vty, " %slog-session-changes\n", yang_dnode_get_bool(dnode, NULL) ? "" : "no ");
}
DEFPY_YANG( DEFPY_YANG(
bfd_peer_minimum_ttl, bfd_peer_minimum_ttl_cmd, bfd_peer_minimum_ttl, bfd_peer_minimum_ttl_cmd,
"[no] minimum-ttl (1-254)$ttl", "[no] minimum-ttl (1-254)$ttl",
@ -1078,9 +1063,6 @@ ALIAS_YANG(bfd_peer_passive, bfd_profile_passive_cmd,
NO_STR NO_STR
"Don't attempt to start sessions\n") "Don't attempt to start sessions\n")
ALIAS_YANG(bfd_peer_log_session_changes, bfd_profile_log_session_changes_cmd,
"[no] log-session-changes", NO_STR "Log Up/Down session changes in the profile\n")
ALIAS_YANG(bfd_peer_minimum_ttl, bfd_profile_minimum_ttl_cmd, ALIAS_YANG(bfd_peer_minimum_ttl, bfd_profile_minimum_ttl_cmd,
"[no] minimum-ttl (1-254)$ttl", "[no] minimum-ttl (1-254)$ttl",
NO_STR NO_STR
@ -1347,7 +1329,6 @@ bfdd_cli_init(void)
install_element(BFD_PEER_NODE, &bfd_peer_echo_receive_interval_cmd); install_element(BFD_PEER_NODE, &bfd_peer_echo_receive_interval_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_profile_cmd); install_element(BFD_PEER_NODE, &bfd_peer_profile_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_passive_cmd); install_element(BFD_PEER_NODE, &bfd_peer_passive_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_log_session_changes_cmd);
install_element(BFD_PEER_NODE, &bfd_peer_minimum_ttl_cmd); install_element(BFD_PEER_NODE, &bfd_peer_minimum_ttl_cmd);
install_element(BFD_PEER_NODE, &no_bfd_peer_minimum_ttl_cmd); install_element(BFD_PEER_NODE, &no_bfd_peer_minimum_ttl_cmd);
@ -1369,7 +1350,6 @@ bfdd_cli_init(void)
install_element(BFD_PROFILE_NODE, &bfd_profile_echo_transmit_interval_cmd); install_element(BFD_PROFILE_NODE, &bfd_profile_echo_transmit_interval_cmd);
install_element(BFD_PROFILE_NODE, &bfd_profile_echo_receive_interval_cmd); install_element(BFD_PROFILE_NODE, &bfd_profile_echo_receive_interval_cmd);
install_element(BFD_PROFILE_NODE, &bfd_profile_passive_cmd); install_element(BFD_PROFILE_NODE, &bfd_profile_passive_cmd);
install_element(BFD_PROFILE_NODE, &bfd_profile_log_session_changes_cmd);
install_element(BFD_PROFILE_NODE, &bfd_profile_minimum_ttl_cmd); install_element(BFD_PROFILE_NODE, &bfd_profile_minimum_ttl_cmd);
install_element(BFD_PROFILE_NODE, &no_bfd_profile_minimum_ttl_cmd); install_element(BFD_PROFILE_NODE, &no_bfd_profile_minimum_ttl_cmd);
} }

View file

@ -70,13 +70,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.cli_show = bfd_cli_show_passive, .cli_show = bfd_cli_show_passive,
} }
}, },
{
.xpath = "/frr-bfdd:bfdd/bfd/profile/log-session-changes",
.cbs = {
.modify = bfdd_bfd_profile_log_session_changes_modify,
.cli_show = bfd_cli_show_log_session_changes,
}
},
{ {
.xpath = "/frr-bfdd:bfdd/bfd/profile/minimum-ttl", .xpath = "/frr-bfdd:bfdd/bfd/profile/minimum-ttl",
.cbs = { .cbs = {
@ -167,13 +160,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.cli_show = bfd_cli_show_passive, .cli_show = bfd_cli_show_passive,
} }
}, },
{
.xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/log-session-changes",
.cbs = {
.modify = bfdd_bfd_sessions_single_hop_log_session_changes_modify,
.cli_show = bfd_cli_show_log_session_changes,
}
},
{ {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/echo-mode", .xpath = "/frr-bfdd:bfdd/bfd/sessions/single-hop/echo-mode",
.cbs = { .cbs = {
@ -370,13 +356,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.cli_show = bfd_cli_show_passive, .cli_show = bfd_cli_show_passive,
} }
}, },
{
.xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/log-session-changes",
.cbs = {
.modify = bfdd_bfd_sessions_single_hop_log_session_changes_modify,
.cli_show = bfd_cli_show_log_session_changes,
}
},
{ {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/minimum-ttl", .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/minimum-ttl",
.cbs = { .cbs = {
@ -593,13 +572,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.cli_show = bfd_cli_show_passive, .cli_show = bfd_cli_show_passive,
} }
}, },
{
.xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/log-session-changes",
.cbs = {
.modify = bfdd_bfd_sessions_single_hop_log_session_changes_modify,
.cli_show = bfd_cli_show_log_session_changes,
}
},
{ {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode", .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode",
.cbs = { .cbs = {
@ -816,13 +788,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.cli_show = bfd_cli_show_passive, .cli_show = bfd_cli_show_passive,
} }
}, },
{
.xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/log-session-changes",
.cbs = {
.modify = bfdd_bfd_sessions_single_hop_log_session_changes_modify,
.cli_show = bfd_cli_show_log_session_changes,
}
},
{ {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode", .xpath = "/frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode",
.cbs = { .cbs = {

View file

@ -24,7 +24,6 @@ int bfdd_bfd_profile_required_receive_interval_modify(
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int bfdd_bfd_profile_administrative_down_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_administrative_down_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_log_session_changes_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_echo_mode_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_echo_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_desired_echo_transmission_interval_modify( int bfdd_bfd_profile_desired_echo_transmission_interval_modify(
@ -55,7 +54,6 @@ int bfdd_bfd_sessions_single_hop_administrative_down_modify(
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_single_hop_passive_mode_modify( int bfdd_bfd_sessions_single_hop_passive_mode_modify(
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_single_hop_log_session_changes_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_single_hop_echo_mode_modify( int bfdd_bfd_sessions_single_hop_echo_mode_modify(
struct nb_cb_modify_args *args); struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_single_hop_desired_echo_transmission_interval_modify( int bfdd_bfd_sessions_single_hop_desired_echo_transmission_interval_modify(
@ -231,8 +229,6 @@ void bfd_cli_peer_profile_show(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults); bool show_defaults);
void bfd_cli_show_passive(struct vty *vty, const struct lyd_node *dnode, void bfd_cli_show_passive(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults); bool show_defaults);
void bfd_cli_show_log_session_changes(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
void bfd_cli_show_minimum_ttl(struct vty *vty, const struct lyd_node *dnode, void bfd_cli_show_minimum_ttl(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults); bool show_defaults);

View file

@ -595,23 +595,6 @@ int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args)
return NB_OK; return NB_OK;
} }
/*
* XPath: /frr-bfdd:bfdd/bfd/profile/log-session-changes
*/
int bfdd_bfd_profile_log_session_changes_modify(struct nb_cb_modify_args *args)
{
struct bfd_profile *bp;
if (args->event != NB_EV_APPLY)
return NB_OK;
bp = nb_running_get_entry(args->dnode, NULL, true);
bp->log_session_changes = yang_dnode_get_bool(args->dnode, NULL);
bfd_profile_update(bp);
return NB_OK;
}
/* /*
* XPath: /frr-bfdd:bfdd/bfd/profile/minimum-ttl * XPath: /frr-bfdd:bfdd/bfd/profile/minimum-ttl
*/ */
@ -920,38 +903,6 @@ int bfdd_bfd_sessions_single_hop_passive_mode_modify(
return NB_OK; return NB_OK;
} }
/*
* XPath: /frr-bfdd:bfdd/bfd/sessions/single-hop/log-session-changes
* /frr-bfdd:bfdd/bfd/sessions/multi-hop/log-session-changes
* /frr-bfdd:bfdd/bfd/sessions/sbfd_echo/log-session-changes
* /frr-bfdd:bfdd/bfd/sessions/sbfd_init/log-session-changes
*/
int bfdd_bfd_sessions_single_hop_log_session_changes_modify(struct nb_cb_modify_args *args)
{
struct bfd_session *bs;
bool log_session_changes;
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
return NB_OK;
case NB_EV_APPLY:
break;
case NB_EV_ABORT:
return NB_OK;
}
log_session_changes = yang_dnode_get_bool(args->dnode, NULL);
bs = nb_running_get_entry(args->dnode, NULL, true);
bs->peer_profile.log_session_changes = log_session_changes;
bfd_session_apply(bs);
return NB_OK;
}
/* /*
* XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode * XPath: /frr-bfdd:bfdd/bfd/sessions/sbfd-init/bfd-mode
* /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode * /frr-bfdd:bfdd/bfd/sessions/sbfd-echo/bfd-mode

View file

@ -164,10 +164,9 @@ static void _display_peer(struct vty *vty, struct bfd_session *bs)
vty_out(vty, "\t\tPassive mode\n"); vty_out(vty, "\t\tPassive mode\n");
else else
vty_out(vty, "\t\tActive mode\n"); vty_out(vty, "\t\tActive mode\n");
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES))
vty_out(vty, "\t\tLog session changes\n");
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH))
vty_out(vty, "\t\tMinimum TTL: %d\n", bs->mh_ttl); vty_out(vty, "\t\tMinimum TTL: %d\n", bs->mh_ttl);
vty_out(vty, "\t\tStatus: "); vty_out(vty, "\t\tStatus: ");
switch (bs->ses_state) { switch (bs->ses_state) {
case PTM_BFD_ADM_DOWN: case PTM_BFD_ADM_DOWN:
@ -290,8 +289,6 @@ static struct json_object *__display_peer_json(struct bfd_session *bs)
json_object_int_add(jo, "remote-id", bs->discrs.remote_discr); json_object_int_add(jo, "remote-id", bs->discrs.remote_discr);
json_object_boolean_add(jo, "passive-mode", json_object_boolean_add(jo, "passive-mode",
CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE)); CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE));
json_object_boolean_add(jo, "log-session-changes",
CHECK_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES));
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH))
json_object_int_add(jo, "minimum-ttl", bs->mh_ttl); json_object_int_add(jo, "minimum-ttl", bs->mh_ttl);
@ -1197,7 +1194,6 @@ static int bfd_configure_peer(struct bfd_peer_cfg *bpc, bool mhop,
/* Defaults */ /* Defaults */
bpc->bpc_shutdown = false; bpc->bpc_shutdown = false;
bpc->bpc_log_session_changes = false;
bpc->bpc_detectmultiplier = BPC_DEF_DETECTMULTIPLIER; bpc->bpc_detectmultiplier = BPC_DEF_DETECTMULTIPLIER;
bpc->bpc_recvinterval = BPC_DEF_RECEIVEINTERVAL; bpc->bpc_recvinterval = BPC_DEF_RECEIVEINTERVAL;
bpc->bpc_txinterval = BPC_DEF_TRANSMITINTERVAL; bpc->bpc_txinterval = BPC_DEF_TRANSMITINTERVAL;

View file

@ -384,15 +384,10 @@ bfd_dplane_session_state_change(struct bfd_dplane_ctx *bdc,
break; break;
} }
if (bglobal.debug_peer_event) { if (bglobal.debug_peer_event)
zlog_debug("state-change: [data plane: %s] %s -> %s", zlog_debug("state-change: [data plane: %s] %s -> %s",
bs_to_string(bs), state_list[old_state].str, bs_to_string(bs), state_list[old_state].str,
state_list[bs->ses_state].str); state_list[bs->ses_state].str);
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_LOG_SESSION_CHANGES) &&
old_state != bs->ses_state)
zlog_notice("Session-Change: [data plane: %s] %s -> %s", bs_to_string(bs),
state_list[old_state].str, state_list[bs->ses_state].str);
}
} }
/** /**

View file

@ -36,7 +36,7 @@ struct ptm_client {
TAILQ_HEAD(pcqueue, ptm_client); TAILQ_HEAD(pcqueue, ptm_client);
static struct pcqueue pcqueue; static struct pcqueue pcqueue;
static struct zclient *bfd_zclient; static struct zclient *zclient;
/* /*
@ -209,7 +209,7 @@ int ptm_bfd_notify(struct bfd_session *bs, uint8_t notify_state)
* *
* q(64), l(32), w(16), c(8) * q(64), l(32), w(16), c(8)
*/ */
msg = bfd_zclient->obuf; msg = zclient->obuf;
stream_reset(msg); stream_reset(msg);
/* TODO: VRF handling */ /* TODO: VRF handling */
@ -264,7 +264,7 @@ int ptm_bfd_notify(struct bfd_session *bs, uint8_t notify_state)
/* Write packet size. */ /* Write packet size. */
stream_putw_at(msg, 0, stream_get_endp(msg)); stream_putw_at(msg, 0, stream_get_endp(msg));
return zclient_send_message(bfd_zclient); return zclient_send_message(zclient);
} }
static void _ptm_msg_read_address(struct stream *msg, struct sockaddr_any *sa) static void _ptm_msg_read_address(struct stream *msg, struct sockaddr_any *sa)
@ -600,7 +600,7 @@ stream_failure:
static int bfdd_replay(ZAPI_CALLBACK_ARGS) static int bfdd_replay(ZAPI_CALLBACK_ARGS)
{ {
struct stream *msg = bfd_zclient->ibuf; struct stream *msg = zclient->ibuf;
uint32_t rcmd; uint32_t rcmd;
STREAM_GETL(msg, rcmd); STREAM_GETL(msg, rcmd);
@ -653,7 +653,7 @@ static void bfdd_zebra_connected(struct zclient *zc)
zclient_create_header(msg, ZEBRA_INTERFACE_ADD, VRF_DEFAULT); zclient_create_header(msg, ZEBRA_INTERFACE_ADD, VRF_DEFAULT);
/* Send requests. */ /* Send requests. */
zclient_send_message(zc); zclient_send_message(zclient);
} }
static void bfdd_sessions_enable_interface(struct interface *ifp) static void bfdd_sessions_enable_interface(struct interface *ifp)
@ -837,32 +837,32 @@ void bfdd_zclient_init(struct zebra_privs_t *bfdd_priv)
{ {
hook_register_prio(if_real, 0, bfd_ifp_create); hook_register_prio(if_real, 0, bfd_ifp_create);
hook_register_prio(if_unreal, 0, bfd_ifp_destroy); hook_register_prio(if_unreal, 0, bfd_ifp_destroy);
bfd_zclient = zclient_new(master, &zclient_options_default, bfd_handlers, zclient = zclient_new(master, &zclient_options_default, bfd_handlers,
array_size(bfd_handlers)); array_size(bfd_handlers));
assert(bfd_zclient != NULL); assert(zclient != NULL);
zclient_init(bfd_zclient, ZEBRA_ROUTE_BFD, 0, bfdd_priv); zclient_init(zclient, ZEBRA_ROUTE_BFD, 0, bfdd_priv);
/* Send replay request on zebra connect. */ /* Send replay request on zebra connect. */
bfd_zclient->zebra_connected = bfdd_zebra_connected; zclient->zebra_connected = bfdd_zebra_connected;
} }
void bfdd_zclient_register(vrf_id_t vrf_id) void bfdd_zclient_register(vrf_id_t vrf_id)
{ {
if (!bfd_zclient || bfd_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return; return;
zclient_send_reg_requests(bfd_zclient, vrf_id); zclient_send_reg_requests(zclient, vrf_id);
} }
void bfdd_zclient_unregister(vrf_id_t vrf_id) void bfdd_zclient_unregister(vrf_id_t vrf_id)
{ {
if (!bfd_zclient || bfd_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return; return;
zclient_send_dereg_requests(bfd_zclient, vrf_id); zclient_send_dereg_requests(zclient, vrf_id);
} }
void bfdd_zclient_stop(void) void bfdd_zclient_stop(void)
{ {
zclient_stop(bfd_zclient); zclient_stop(zclient);
/* Clean-up and free ptm clients data memory. */ /* Clean-up and free ptm clients data memory. */
pc_free_all(); pc_free_all();
@ -870,7 +870,7 @@ void bfdd_zclient_stop(void)
void bfdd_zclient_terminate(void) void bfdd_zclient_terminate(void)
{ {
zclient_free(bfd_zclient); zclient_free(zclient);
} }

View file

@ -424,12 +424,8 @@ static unsigned int aspath_count_hops_internal(const struct aspath *aspath)
/* Check if aspath has AS_SET or AS_CONFED_SET */ /* Check if aspath has AS_SET or AS_CONFED_SET */
bool aspath_check_as_sets(struct aspath *aspath) bool aspath_check_as_sets(struct aspath *aspath)
{ {
struct assegment *seg; struct assegment *seg = aspath->segments;
if (!aspath || !aspath->segments)
return false;
seg = aspath->segments;
while (seg) { while (seg) {
if (seg->type == AS_SET || seg->type == AS_CONFED_SET) if (seg->type == AS_SET || seg->type == AS_CONFED_SET)
return true; return true;
@ -2516,39 +2512,3 @@ void bgp_remove_aspath_from_aggregate_hash(struct bgp_aggregate *aggregate,
} }
} }
struct aspath *aspath_delete_as_set_seq(struct aspath *aspath)
{
struct assegment *seg, *prev, *next;
bool removed = false;
if (!(aspath && aspath->segments))
return aspath;
seg = aspath->segments;
next = NULL;
prev = NULL;
while (seg) {
next = seg->next;
if (seg->type == AS_SET || seg->type == AS_CONFED_SET) {
if (aspath->segments == seg)
aspath->segments = seg->next;
else
prev->next = seg->next;
assegment_free(seg);
removed = true;
} else
prev = seg;
seg = next;
}
if (removed) {
aspath_str_update(aspath, false);
aspath->count = aspath_count_hops_internal(aspath);
}
return aspath;
}

View file

@ -168,6 +168,5 @@ extern void bgp_remove_aspath_from_aggregate_hash(
struct aspath *aspath); struct aspath *aspath);
extern void bgp_aggr_aspath_remove(void *arg); extern void bgp_aggr_aspath_remove(void *arg);
extern struct aspath *aspath_delete_as_set_seq(struct aspath *aspath);
#endif /* _QUAGGA_BGP_ASPATH_H */ #endif /* _QUAGGA_BGP_ASPATH_H */

View file

@ -1444,11 +1444,11 @@ bgp_attr_malformed(struct bgp_attr_parser_args *args, uint8_t subcode,
uint8_t *notify_datap = (length > 0 ? args->startp : NULL); uint8_t *notify_datap = (length > 0 ? args->startp : NULL);
if (bgp_debug_update(peer, NULL, NULL, 1)) { if (bgp_debug_update(peer, NULL, NULL, 1)) {
char str[BUFSIZ] = { 0 }; char attr_str[BUFSIZ] = {0};
bgp_dump_attr(attr, str, sizeof(str)); bgp_dump_attr(attr, attr_str, sizeof(attr_str));
zlog_debug("%s: attributes: %s", __func__, str); zlog_debug("%s: attributes: %s", __func__, attr_str);
} }
/* Only relax error handling for eBGP peers */ /* Only relax error handling for eBGP peers */
@ -2043,11 +2043,11 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args)
peer->host, aspath_print(attr->aspath)); peer->host, aspath_print(attr->aspath));
if (bgp_debug_update(peer, NULL, NULL, 1)) { if (bgp_debug_update(peer, NULL, NULL, 1)) {
char str[BUFSIZ] = { 0 }; char attr_str[BUFSIZ] = {0};
bgp_dump_attr(attr, str, sizeof(str)); bgp_dump_attr(attr, attr_str, sizeof(attr_str));
zlog_debug("%s: attributes: %s", __func__, str); zlog_debug("%s: attributes: %s", __func__, attr_str);
} }
} else { } else {
SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)); SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR));
@ -2094,11 +2094,11 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args,
peer->host, aspath_print(attr->aspath)); peer->host, aspath_print(attr->aspath));
if (bgp_debug_update(peer, NULL, NULL, 1)) { if (bgp_debug_update(peer, NULL, NULL, 1)) {
char str[BUFSIZ] = { 0 }; char attr_str[BUFSIZ] = {0};
bgp_dump_attr(attr, str, sizeof(str)); bgp_dump_attr(attr, attr_str, sizeof(attr_str));
zlog_debug("%s: attributes: %s", __func__, str); zlog_debug("%s: attributes: %s", __func__, attr_str);
} }
} else { } else {
SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR)); SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR));
@ -5028,13 +5028,7 @@ void bgp_packet_mpunreach_prefix(struct stream *s, const struct prefix *p,
{ {
uint8_t wlabel[4] = {0x80, 0x00, 0x00}; uint8_t wlabel[4] = {0x80, 0x00, 0x00};
/* [RFC3107] also made it possible to withdraw a binding without if (safi == SAFI_LABELED_UNICAST) {
* specifying the label explicitly, by setting the Compatibility field
* to 0x800000. However, some implementations set it to 0x000000. In
* order to ensure backwards compatibility, it is RECOMMENDED by this
* document that the Compatibility field be set to 0x800000.
*/
if (safi == SAFI_LABELED_UNICAST || safi == SAFI_MPLS_VPN) {
label = (mpls_label_t *)wlabel; label = (mpls_label_t *)wlabel;
num_labels = 1; num_labels = 1;
} }

View file

@ -30,7 +30,7 @@
DEFINE_MTYPE_STATIC(BGPD, BFD_CONFIG, "BFD configuration data"); DEFINE_MTYPE_STATIC(BGPD, BFD_CONFIG, "BFD configuration data");
extern struct zclient *bgp_zclient; extern struct zclient *zclient;
static void bfd_session_status_update(struct bfd_session_params *bsp, static void bfd_session_status_update(struct bfd_session_params *bsp,
const struct bfd_session_status *bss, const struct bfd_session_status *bss,
@ -651,7 +651,7 @@ DEFUN(no_neighbor_bfd_profile, no_neighbor_bfd_profile_cmd,
void bgp_bfd_init(struct event_loop *tm) void bgp_bfd_init(struct event_loop *tm)
{ {
/* Initialize BFD client functions */ /* Initialize BFD client functions */
bfd_protocol_integration_init(bgp_zclient, tm); bfd_protocol_integration_init(zclient, tm);
/* "neighbor bfd" commands. */ /* "neighbor bfd" commands. */
install_element(BGP_NODE, &neighbor_bfd_cmd); install_element(BGP_NODE, &neighbor_bfd_cmd);

View file

@ -3542,6 +3542,7 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
struct bmp_targets *bt; struct bmp_targets *bt;
struct listnode *node; struct listnode *node;
struct bmp_imported_bgp *bib; struct bmp_imported_bgp *bib;
int ret = 0;
struct stream *s = bmp_peerstate(bgp->peer_self, withdraw); struct stream *s = bmp_peerstate(bgp->peer_self, withdraw);
struct bmp *bmp; struct bmp *bmp;
afi_t afi; afi_t afi;
@ -3552,7 +3553,7 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
if (bmpbgp) { if (bmpbgp) {
frr_each (bmp_targets, &bmpbgp->targets, bt) { frr_each (bmp_targets, &bmpbgp->targets, bt) {
bmp_bgp_attribute_updated_instance(bt, &bmpbgp->vrf_state, bgp, ret = bmp_bgp_attribute_updated_instance(bt, &bmpbgp->vrf_state, bgp,
withdraw, s); withdraw, s);
if (withdraw) if (withdraw)
continue; continue;
@ -3574,7 +3575,7 @@ static int bmp_bgp_attribute_updated(struct bgp *bgp, bool withdraw)
frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) { frr_each (bmp_imported_bgps, &bt->imported_bgps, bib) {
if (bgp_lookup_by_name(bib->name) != bgp) if (bgp_lookup_by_name(bib->name) != bgp)
continue; continue;
bmp_bgp_attribute_updated_instance(bt, &bib->vrf_state, bgp, ret += bmp_bgp_attribute_updated_instance(bt, &bib->vrf_state, bgp,
withdraw, s); withdraw, s);
if (withdraw) if (withdraw)
continue; continue;

View file

@ -1441,14 +1441,14 @@ static char *_ecommunity_ecom2str(struct ecommunity *ecom, int format, int filte
snprintf(encbuf, sizeof(encbuf), "FS:action %s", snprintf(encbuf, sizeof(encbuf), "FS:action %s",
action); action);
} else if (sub_type == ECOMMUNITY_TRAFFIC_RATE) { } else if (sub_type == ECOMMUNITY_TRAFFIC_RATE) {
union traffic_rate rate; union traffic_rate data;
rate.rate_byte[3] = *(pnt + 2); data.rate_byte[3] = *(pnt+2);
rate.rate_byte[2] = *(pnt + 3); data.rate_byte[2] = *(pnt+3);
rate.rate_byte[1] = *(pnt + 4); data.rate_byte[1] = *(pnt+4);
rate.rate_byte[0] = *(pnt + 5); data.rate_byte[0] = *(pnt+5);
snprintf(encbuf, sizeof(encbuf), "FS:rate %f", snprintf(encbuf, sizeof(encbuf), "FS:rate %f",
rate.rate_float); data.rate_float);
} else if (sub_type == ECOMMUNITY_TRAFFIC_MARKING) { } else if (sub_type == ECOMMUNITY_TRAFFIC_MARKING) {
snprintf(encbuf, sizeof(encbuf), snprintf(encbuf, sizeof(encbuf),
"FS:marking %u", *(pnt + 5)); "FS:marking %u", *(pnt + 5));

View file

@ -905,7 +905,7 @@ static enum zclient_send_status bgp_zebra_send_remote_macip(
bool esi_valid; bool esi_valid;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) { if (!zclient || zclient->sock < 0) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: No zclient or zclient->sock exists", zlog_debug("%s: No zclient or zclient->sock exists",
__func__); __func__);
@ -923,7 +923,7 @@ static enum zclient_send_status bgp_zebra_send_remote_macip(
if (!esi) if (!esi)
esi = zero_esi; esi = zero_esi;
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header( zclient_create_header(
@ -984,7 +984,7 @@ static enum zclient_send_status bgp_zebra_send_remote_macip(
frrtrace(5, frr_bgp, evpn_mac_ip_zsend, add, vpn, p, remote_vtep_ip, frrtrace(5, frr_bgp, evpn_mac_ip_zsend, add, vpn, p, remote_vtep_ip,
esi); esi);
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
/* /*
@ -998,7 +998,7 @@ bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
struct stream *s; struct stream *s;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) { if (!zclient || zclient->sock < 0) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: No zclient or zclient->sock exists", zlog_debug("%s: No zclient or zclient->sock exists",
__func__); __func__);
@ -1014,7 +1014,7 @@ bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
return ZCLIENT_SEND_SUCCESS; return ZCLIENT_SEND_SUCCESS;
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header( zclient_create_header(
@ -1041,7 +1041,7 @@ bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
frrtrace(3, frr_bgp, evpn_bum_vtep_zsend, add, vpn, p); frrtrace(3, frr_bgp, evpn_bum_vtep_zsend, add, vpn, p);
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
/* /*
@ -2062,7 +2062,8 @@ static int update_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
bgp_path_info_add(dest, tmp_pi); bgp_path_info_add(dest, tmp_pi);
} else { } else {
tmp_pi = local_pi; tmp_pi = local_pi;
if (!CHECK_FLAG(tmp_pi->flags, BGP_PATH_REMOVED) && attrhash_cmp(tmp_pi->attr, attr)) if (attrhash_cmp(tmp_pi->attr, attr)
&& !CHECK_FLAG(tmp_pi->flags, BGP_PATH_REMOVED))
route_change = 0; route_change = 0;
else { else {
/* /*
@ -3153,7 +3154,8 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
pi = bgp_create_evpn_bgp_path_info(parent_pi, dest, &attr); pi = bgp_create_evpn_bgp_path_info(parent_pi, dest, &attr);
new_pi = true; new_pi = true;
} else { } else {
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED) && attrhash_cmp(pi->attr, &attr)) { if (attrhash_cmp(pi->attr, &attr)
&& !CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)) {
bgp_dest_unlock_node(dest); bgp_dest_unlock_node(dest);
return 0; return 0;
} }
@ -3182,7 +3184,8 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
/* Gateway IP nexthop should be resolved */ /* Gateway IP nexthop should be resolved */
if (bre && bre->type == OVERLAY_INDEX_GATEWAY_IP) { if (bre && bre->type == OVERLAY_INDEX_GATEWAY_IP) {
if (bgp_find_or_add_nexthop(bgp_vrf, bgp_vrf, afi, safi, pi, NULL, 0, NULL, NULL)) if (bgp_find_or_add_nexthop(bgp_vrf, bgp_vrf, afi, safi, pi,
NULL, 0, NULL))
bgp_path_info_set_flag(dest, pi, BGP_PATH_VALID); bgp_path_info_set_flag(dest, pi, BGP_PATH_VALID);
else { else {
if (BGP_DEBUG(nht, NHT)) { if (BGP_DEBUG(nht, NHT)) {
@ -3275,8 +3278,8 @@ static int install_evpn_route_entry_in_vni_common(
* install_evpn_route_entry_in_vni_mac() or * install_evpn_route_entry_in_vni_mac() or
* install_evpn_route_entry_in_vni_ip() * install_evpn_route_entry_in_vni_ip()
*/ */
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED) && if (attrhash_cmp(pi->attr, parent_pi->attr) &&
attrhash_cmp(pi->attr, parent_pi->attr)) !CHECK_FLAG(pi->flags, BGP_PATH_REMOVED))
return 0; return 0;
/* The attribute has changed. */ /* The attribute has changed. */
/* Add (or update) attribute to hash. */ /* Add (or update) attribute to hash. */

View file

@ -212,8 +212,8 @@ static int bgp_evpn_es_route_install(struct bgp *bgp,
bgp_dest_lock_node((struct bgp_dest *)parent_pi->net); bgp_dest_lock_node((struct bgp_dest *)parent_pi->net);
bgp_path_info_add(dest, pi); bgp_path_info_add(dest, pi);
} else { } else {
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED) && if (attrhash_cmp(pi->attr, parent_pi->attr)
attrhash_cmp(pi->attr, parent_pi->attr)) { && !CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)) {
bgp_dest_unlock_node(dest); bgp_dest_unlock_node(dest);
return 0; return 0;
} }
@ -421,7 +421,8 @@ int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
bgp_path_info_add(dest, tmp_pi); bgp_path_info_add(dest, tmp_pi);
} else { } else {
tmp_pi = local_pi; tmp_pi = local_pi;
if (!CHECK_FLAG(tmp_pi->flags, BGP_PATH_REMOVED) && attrhash_cmp(tmp_pi->attr, attr)) if (attrhash_cmp(tmp_pi->attr, attr)
&& !CHECK_FLAG(tmp_pi->flags, BGP_PATH_REMOVED))
*route_changed = 0; *route_changed = 0;
else { else {
/* The attribute has changed. /* The attribute has changed.
@ -1387,7 +1388,7 @@ bgp_zebra_send_remote_es_vtep(struct bgp *bgp, struct bgp_evpn_es_vtep *es_vtep,
uint32_t flags = 0; uint32_t flags = 0;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) { if (!zclient || zclient->sock < 0) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: No zclient or zclient->sock exists", zlog_debug("%s: No zclient or zclient->sock exists",
__func__); __func__);
@ -1405,7 +1406,7 @@ bgp_zebra_send_remote_es_vtep(struct bgp *bgp, struct bgp_evpn_es_vtep *es_vtep,
if (CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ESR)) if (CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ESR))
SET_FLAG(flags, ZAPI_ES_VTEP_FLAG_ESR_RXED); SET_FLAG(flags, ZAPI_ES_VTEP_FLAG_ESR_RXED);
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, zclient_create_header(s,
@ -1427,7 +1428,7 @@ bgp_zebra_send_remote_es_vtep(struct bgp *bgp, struct bgp_evpn_es_vtep *es_vtep,
frrtrace(3, frr_bgp, evpn_mh_vtep_zsend, add, es, es_vtep); frrtrace(3, frr_bgp, evpn_mh_vtep_zsend, add, es, es_vtep);
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
static enum zclient_send_status bgp_evpn_es_vtep_re_eval_active( static enum zclient_send_status bgp_evpn_es_vtep_re_eval_active(
@ -2876,7 +2877,7 @@ static void bgp_evpn_l3nhg_zebra_add_v4_or_v6(struct bgp_evpn_es_vrf *es_vrf,
if (!api_nhg.nexthop_num) if (!api_nhg.nexthop_num)
return; return;
zclient_nhg_send(bgp_zclient, ZEBRA_NHG_ADD, &api_nhg); zclient_nhg_send(zclient, ZEBRA_NHG_ADD, &api_nhg);
} }
static bool bgp_evpn_l3nhg_zebra_ok(struct bgp_evpn_es_vrf *es_vrf) static bool bgp_evpn_l3nhg_zebra_ok(struct bgp_evpn_es_vrf *es_vrf)
@ -2885,7 +2886,7 @@ static bool bgp_evpn_l3nhg_zebra_ok(struct bgp_evpn_es_vrf *es_vrf)
return false; return false;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return false; return false;
return true; return true;
@ -2920,7 +2921,7 @@ static void bgp_evpn_l3nhg_zebra_del_v4_or_v6(struct bgp_evpn_es_vrf *es_vrf,
frrtrace(4, frr_bgp, evpn_mh_nhg_zsend, false, v4_nhg, api_nhg.id, frrtrace(4, frr_bgp, evpn_mh_nhg_zsend, false, v4_nhg, api_nhg.id,
es_vrf); es_vrf);
zclient_nhg_send(bgp_zclient, ZEBRA_NHG_DEL, &api_nhg); zclient_nhg_send(zclient, ZEBRA_NHG_DEL, &api_nhg);
} }
static void bgp_evpn_l3nhg_zebra_del(struct bgp_evpn_es_vrf *es_vrf) static void bgp_evpn_l3nhg_zebra_del(struct bgp_evpn_es_vrf *es_vrf)
@ -4476,7 +4477,7 @@ static void bgp_evpn_nh_zebra_update_send(struct bgp_evpn_nh *nh, bool add)
struct bgp *bgp_vrf = nh->bgp_vrf; struct bgp *bgp_vrf = nh->bgp_vrf;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return; return;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -4487,7 +4488,7 @@ static void bgp_evpn_nh_zebra_update_send(struct bgp_evpn_nh *nh, bool add)
return; return;
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header( zclient_create_header(
@ -4512,7 +4513,7 @@ static void bgp_evpn_nh_zebra_update_send(struct bgp_evpn_nh *nh, bool add)
frrtrace(2, frr_bgp, evpn_mh_nh_rmac_zsend, add, nh); frrtrace(2, frr_bgp, evpn_mh_nh_rmac_zsend, add, nh);
zclient_send_message(bgp_zclient); zclient_send_message(zclient);
} }
static void bgp_evpn_nh_zebra_update(struct bgp_evpn_nh *nh, bool add) static void bgp_evpn_nh_zebra_update(struct bgp_evpn_nh *nh, bool add)

View file

@ -673,6 +673,8 @@ static inline bool bgp_evpn_is_path_local(struct bgp *bgp,
&& pi->sub_type == BGP_ROUTE_STATIC); && pi->sub_type == BGP_ROUTE_STATIC);
} }
extern struct zclient *zclient;
extern void bgp_evpn_install_uninstall_default_route(struct bgp *bgp_vrf, extern void bgp_evpn_install_uninstall_default_route(struct bgp *bgp_vrf,
afi_t afi, safi_t safi, afi_t afi, safi_t safi,
bool add); bool add);

View file

@ -1462,22 +1462,22 @@ static int bgp_show_ethernet_vpn(struct vty *vty, struct prefix_rd *prd,
output_count++; output_count++;
if (use_json && json_array) { if (use_json && json_array) {
const struct prefix *pfx = const struct prefix *p =
bgp_dest_get_prefix(rm); bgp_dest_get_prefix(rm);
json_prefix_info = json_object_new_object(); json_prefix_info = json_object_new_object();
json_object_string_addf(json_prefix_info, json_object_string_addf(json_prefix_info,
"prefix", "%pFX", pfx); "prefix", "%pFX", p);
json_object_int_add(json_prefix_info, json_object_int_add(json_prefix_info,
"prefixLen", pfx->prefixlen); "prefixLen", p->prefixlen);
json_object_object_add(json_prefix_info, json_object_object_add(json_prefix_info,
"paths", json_array); "paths", json_array);
json_object_object_addf(json_nroute, json_object_object_addf(json_nroute,
json_prefix_info, json_prefix_info,
"%pFX", pfx); "%pFX", p);
json_array = NULL; json_array = NULL;
} }
} }
@ -6617,17 +6617,18 @@ static int add_rt(struct bgp *bgp, struct ecommunity *ecom, bool is_import,
{ {
/* Do nothing if we already have this route-target */ /* Do nothing if we already have this route-target */
if (is_import) { if (is_import) {
if (CHECK_FLAG(bgp->vrf_flags, BGP_VRF_IMPORT_RT_CFGD) && if (!bgp_evpn_vrf_rt_matches_existing(bgp->vrf_import_rtl,
bgp_evpn_vrf_rt_matches_existing(bgp->vrf_import_rtl, ecom)) ecom))
bgp_evpn_configure_import_rt_for_vrf(bgp, ecom,
is_wildcard);
else
return -1; return -1;
bgp_evpn_configure_import_rt_for_vrf(bgp, ecom, is_wildcard);
} else { } else {
if (CHECK_FLAG(bgp->vrf_flags, BGP_VRF_EXPORT_RT_CFGD) && if (!bgp_evpn_vrf_rt_matches_existing(bgp->vrf_export_rtl,
bgp_evpn_vrf_rt_matches_existing(bgp->vrf_export_rtl, ecom)) ecom))
return -1;
bgp_evpn_configure_export_rt_for_vrf(bgp, ecom); bgp_evpn_configure_export_rt_for_vrf(bgp, ecom);
else
return -1;
} }
return 0; return 0;
@ -7077,11 +7078,10 @@ DEFUN (bgp_evpn_vni_rt,
ecommunity_str(ecomadd); ecommunity_str(ecomadd);
/* Do nothing if we already have this import route-target */ /* Do nothing if we already have this import route-target */
if (CHECK_FLAG(vpn->flags, VNI_FLAG_IMPRT_CFGD) && if (!bgp_evpn_rt_matches_existing(vpn->import_rtl, ecomadd))
bgp_evpn_rt_matches_existing(vpn->import_rtl, ecomadd))
ecommunity_free(&ecomadd);
else
evpn_configure_import_rt(bgp, vpn, ecomadd); evpn_configure_import_rt(bgp, vpn, ecomadd);
else
ecommunity_free(&ecomadd);
} }
/* Add/update the export route-target */ /* Add/update the export route-target */
@ -7096,11 +7096,10 @@ DEFUN (bgp_evpn_vni_rt,
ecommunity_str(ecomadd); ecommunity_str(ecomadd);
/* Do nothing if we already have this export route-target */ /* Do nothing if we already have this export route-target */
if (CHECK_FLAG(vpn->flags, VNI_FLAG_EXPRT_CFGD) && if (!bgp_evpn_rt_matches_existing(vpn->export_rtl, ecomadd))
bgp_evpn_rt_matches_existing(vpn->export_rtl, ecomadd))
ecommunity_free(&ecomadd);
else
evpn_configure_export_rt(bgp, vpn, ecomadd); evpn_configure_export_rt(bgp, vpn, ecomadd);
else
ecommunity_free(&ecomadd);
} }
return CMD_SUCCESS; return CMD_SUCCESS;

View file

@ -105,6 +105,13 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr,
if (!attr) if (!attr)
withdraw = true; withdraw = true;
if (packet->length >= FLOWSPEC_NLRI_SIZELIMIT_EXTENDED) {
flog_err(EC_BGP_FLOWSPEC_PACKET,
"BGP flowspec nlri length maximum reached (%u)",
packet->length);
return BGP_NLRI_PARSE_ERROR_FLOWSPEC_NLRI_SIZELIMIT;
}
for (; pnt < lim; pnt += psize) { for (; pnt < lim; pnt += psize) {
/* Clear prefix structure. */ /* Clear prefix structure. */
memset(&p, 0, sizeof(p)); memset(&p, 0, sizeof(p));

View file

@ -7,6 +7,7 @@
#define _FRR_BGP_FLOWSPEC_PRIVATE_H #define _FRR_BGP_FLOWSPEC_PRIVATE_H
#define FLOWSPEC_NLRI_SIZELIMIT 240 #define FLOWSPEC_NLRI_SIZELIMIT 240
#define FLOWSPEC_NLRI_SIZELIMIT_EXTENDED 4095
/* Flowspec raffic action bit*/ /* Flowspec raffic action bit*/
#define FLOWSPEC_TRAFFIC_ACTION_TERMINAL 1 #define FLOWSPEC_TRAFFIC_ACTION_TERMINAL 1

View file

@ -94,8 +94,10 @@ int bgp_peer_reg_with_nht(struct peer *peer)
connected = 1; connected = 1;
return bgp_find_or_add_nexthop(peer->bgp, peer->bgp, return bgp_find_or_add_nexthop(peer->bgp, peer->bgp,
family2afi(peer->connection->su.sa.sa_family), SAFI_UNICAST, family2afi(
NULL, peer, connected, NULL, NULL); peer->connection->su.sa.sa_family),
SAFI_UNICAST, NULL, peer, connected,
NULL);
} }
static void peer_xfer_stats(struct peer *peer_dst, struct peer *peer_src) static void peer_xfer_stats(struct peer *peer_dst, struct peer *peer_src)
@ -182,11 +184,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
EVENT_OFF(keeper->t_delayopen); EVENT_OFF(keeper->t_delayopen);
EVENT_OFF(keeper->t_connect_check_r); EVENT_OFF(keeper->t_connect_check_r);
EVENT_OFF(keeper->t_connect_check_w); EVENT_OFF(keeper->t_connect_check_w);
EVENT_OFF(keeper->t_process_packet);
frr_with_mutex (&bm->peer_connection_mtx) {
if (peer_connection_fifo_member(&bm->connection_fifo, keeper))
peer_connection_fifo_del(&bm->connection_fifo, keeper);
}
/* /*
* At this point in time, it is possible that there are packets pending * At this point in time, it is possible that there are packets pending
@ -307,13 +305,8 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
bgp_reads_on(keeper); bgp_reads_on(keeper);
bgp_writes_on(keeper); bgp_writes_on(keeper);
event_add_event(bm->master, bgp_process_packet, keeper, 0,
frr_with_mutex (&bm->peer_connection_mtx) { &keeper->t_process_packet);
if (!peer_connection_fifo_member(&bm->connection_fifo, keeper)) {
peer_connection_fifo_add_tail(&bm->connection_fifo, keeper);
}
}
event_add_event(bm->master, bgp_process_packet, NULL, 0, &bm->e_process_packet);
return (peer); return (peer);
} }
@ -332,7 +325,7 @@ void bgp_timer_set(struct peer_connection *connection)
/* First entry point of peer's finite state machine. In Idle /* First entry point of peer's finite state machine. In Idle
status start timer is on unless peer is shutdown or peer is status start timer is on unless peer is shutdown or peer is
inactive. All other timer must be turned off */ inactive. All other timer must be turned off */
if (BGP_PEER_START_SUPPRESSED(peer) || peer_active(connection) != BGP_PEER_ACTIVE || if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(connection) ||
peer->bgp->vrf_id == VRF_UNKNOWN) { peer->bgp->vrf_id == VRF_UNKNOWN) {
EVENT_OFF(connection->t_start); EVENT_OFF(connection->t_start);
} else { } else {
@ -479,8 +472,7 @@ static void bgp_start_timer(struct event *thread)
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (start timer expire for %s).", peer->host, zlog_debug("%s [FSM] Timer (start timer expire).", peer->host);
bgp_peer_get_connection_direction(connection));
EVENT_VAL(thread) = BGP_Start; EVENT_VAL(thread) = BGP_Start;
bgp_event(thread); /* bgp_event unlocks peer */ bgp_event(thread); /* bgp_event unlocks peer */
@ -499,8 +491,8 @@ static void bgp_connect_timer(struct event *thread)
assert(!connection->t_read); assert(!connection->t_read);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (connect timer (%us) expire for %s)", peer->host, zlog_debug("%s [FSM] Timer (connect timer (%us) expire)", peer->host,
peer->v_connect, bgp_peer_get_connection_direction(connection)); peer->v_connect);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) if (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
bgp_stop(connection); bgp_stop(connection);
@ -520,8 +512,8 @@ static void bgp_holdtime_timer(struct event *thread)
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (holdtime timer expire for %s)", peer->host, zlog_debug("%s [FSM] Timer (holdtime timer expire)",
bgp_peer_get_connection_direction(connection)); peer->host);
/* /*
* Given that we do not have any expectation of ordering * Given that we do not have any expectation of ordering
@ -536,11 +528,9 @@ static void bgp_holdtime_timer(struct event *thread)
frr_with_mutex (&connection->io_mtx) { frr_with_mutex (&connection->io_mtx) {
inq_count = atomic_load_explicit(&connection->ibuf->count, memory_order_relaxed); inq_count = atomic_load_explicit(&connection->ibuf->count, memory_order_relaxed);
} }
if (inq_count) { if (inq_count)
BGP_TIMER_ON(connection->t_holdtime, bgp_holdtime_timer, BGP_TIMER_ON(connection->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime); peer->v_holdtime);
return;
}
EVENT_VAL(thread) = Hold_Timer_expired; EVENT_VAL(thread) = Hold_Timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */ bgp_event(thread); /* bgp_event unlocks peer */
@ -552,8 +542,7 @@ void bgp_routeadv_timer(struct event *thread)
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (routeadv timer expire for %s)", peer->host, zlog_debug("%s [FSM] Timer (routeadv timer expire)", peer->host);
bgp_peer_get_connection_direction(connection));
peer->synctime = monotime(NULL); peer->synctime = monotime(NULL);
@ -572,8 +561,8 @@ void bgp_delayopen_timer(struct event *thread)
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Timer (DelayOpentimer expire for %s)", peer->host, zlog_debug("%s [FSM] Timer (DelayOpentimer expire)",
bgp_peer_get_connection_direction(connection)); peer->host);
EVENT_VAL(thread) = DelayOpen_timer_expired; EVENT_VAL(thread) = DelayOpen_timer_expired;
bgp_event(thread); /* bgp_event unlocks peer */ bgp_event(thread); /* bgp_event unlocks peer */
@ -639,8 +628,8 @@ static void bgp_graceful_restart_timer_off(struct peer_connection *connection,
if (peer_dynamic_neighbor(peer) && if (peer_dynamic_neighbor(peer) &&
!(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) { !(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s (dynamic neighbor) deleted (%s) for %s", __func__, zlog_debug("%s (dynamic neighbor) deleted (%s)",
peer->host, bgp_peer_get_connection_direction(connection)); peer->host, __func__);
peer_delete(peer); peer_delete(peer);
} }
@ -665,9 +654,8 @@ static void bgp_llgr_stale_timer_expire(struct event *thread)
* stale routes from the neighbor that it is retaining. * stale routes from the neighbor that it is retaining.
*/ */
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP Long-lived stale timer (%s) expired for %s", peer, zlog_debug("%pBP Long-lived stale timer (%s) expired", peer,
get_afi_safi_str(afi, safi, false), get_afi_safi_str(afi, safi, false));
bgp_peer_get_connection_direction(peer->connection));
UNSET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_LLGR_WAIT); UNSET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_LLGR_WAIT);
@ -765,9 +753,11 @@ static void bgp_graceful_restart_timer_expire(struct event *thread)
afi_t afi; afi_t afi;
safi_t safi; safi_t safi;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer)) {
zlog_debug("%pBP graceful restart timer expired and graceful restart stalepath timer stopped for %s", zlog_debug("%pBP graceful restart timer expired", peer);
peer, bgp_peer_get_connection_direction(connection)); zlog_debug("%pBP graceful restart stalepath timer stopped",
peer);
}
FOREACH_AFI_SAFI (afi, safi) { FOREACH_AFI_SAFI (afi, safi) {
if (!peer->nsf[afi][safi]) if (!peer->nsf[afi][safi])
@ -791,10 +781,11 @@ static void bgp_graceful_restart_timer_expire(struct event *thread)
continue; continue;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP Long-lived stale timer (%s) started for %d sec for %s", zlog_debug(
peer, get_afi_safi_str(afi, safi, false), "%pBP Long-lived stale timer (%s) started for %d sec",
peer->llgr[afi][safi].stale_time, peer,
bgp_peer_get_connection_direction(connection)); get_afi_safi_str(afi, safi, false),
peer->llgr[afi][safi].stale_time);
SET_FLAG(peer->af_sflags[afi][safi], SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_LLGR_WAIT); PEER_STATUS_LLGR_WAIT);
@ -825,8 +816,8 @@ static void bgp_graceful_stale_timer_expire(struct event *thread)
safi_t safi; safi_t safi;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart stalepath timer expired for %s", peer, zlog_debug("%pBP graceful restart stalepath timer expired",
bgp_peer_get_connection_direction(connection)); peer);
/* NSF delete stale route */ /* NSF delete stale route */
FOREACH_AFI_SAFI_NSF (afi, safi) FOREACH_AFI_SAFI_NSF (afi, safi)
@ -1251,10 +1242,10 @@ void bgp_fsm_change_status(struct peer_connection *connection,
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer)) {
struct vrf *vrf = vrf_lookup_by_id(bgp->vrf_id); struct vrf *vrf = vrf_lookup_by_id(bgp->vrf_id);
zlog_debug("%s : vrf %s(%u), Status: %s established_peers %u for %s", __func__, zlog_debug("%s : vrf %s(%u), Status: %s established_peers %u", __func__,
vrf ? vrf->name : "Unknown", bgp->vrf_id, vrf ? vrf->name : "Unknown", bgp->vrf_id,
lookup_msg(bgp_status_msg, status, NULL), bgp->established_peers, lookup_msg(bgp_status_msg, status, NULL),
bgp_peer_get_connection_direction(connection)); bgp->established_peers);
} }
/* Set to router ID to the value provided by RIB if there are no peers /* Set to router ID to the value provided by RIB if there are no peers
@ -1267,7 +1258,7 @@ void bgp_fsm_change_status(struct peer_connection *connection,
/* Transition into Clearing or Deleted must /always/ clear all routes.. /* Transition into Clearing or Deleted must /always/ clear all routes..
* (and must do so before actually changing into Deleted.. * (and must do so before actually changing into Deleted..
*/ */
if (status >= Clearing && (peer->established || peer != bgp->peer_self)) { if (status >= Clearing && (peer->established || peer == bgp->peer_self)) {
bgp_clear_route_all(peer); bgp_clear_route_all(peer);
/* If no route was queued for the clear-node processing, /* If no route was queued for the clear-node processing,
@ -1290,8 +1281,7 @@ void bgp_fsm_change_status(struct peer_connection *connection,
* Clearing * Clearing
* (or Deleted). * (or Deleted).
*/ */
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH) && if (!work_queue_is_scheduled(peer->clear_node_queue) &&
!work_queue_is_scheduled(peer->clear_node_queue) &&
status != Deleted) status != Deleted)
BGP_EVENT_ADD(connection, Clearing_Completed); BGP_EVENT_ADD(connection, Clearing_Completed);
} }
@ -1332,10 +1322,10 @@ void bgp_fsm_change_status(struct peer_connection *connection,
bgp_update_delay_process_status_change(peer); bgp_update_delay_process_status_change(peer);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s fd %d went from %s to %s for %s", peer->host, connection->fd, zlog_debug("%s fd %d went from %s to %s", peer->host,
connection->fd,
lookup_msg(bgp_status_msg, connection->ostatus, NULL), lookup_msg(bgp_status_msg, connection->ostatus, NULL),
lookup_msg(bgp_status_msg, connection->status, NULL), lookup_msg(bgp_status_msg, connection->status, NULL));
bgp_peer_get_connection_direction(connection));
} }
/* Flush the event queue and ensure the peer is shut down */ /* Flush the event queue and ensure the peer is shut down */
@ -1367,8 +1357,8 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
if (peer_dynamic_neighbor_no_nsf(peer) && if (peer_dynamic_neighbor_no_nsf(peer) &&
!(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) { !(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s (dynamic neighbor) deleted (%s) for %s", __func__, zlog_debug("%s (dynamic neighbor) deleted (%s)",
peer->host, bgp_peer_get_connection_direction(connection)); peer->host, __func__);
peer_delete(peer); peer_delete(peer);
return BGP_FSM_FAILURE_AND_DELETE; return BGP_FSM_FAILURE_AND_DELETE;
} }
@ -1409,17 +1399,18 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
if (connection->t_gr_stale) { if (connection->t_gr_stale) {
EVENT_OFF(connection->t_gr_stale); EVENT_OFF(connection->t_gr_stale);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart stalepath timer stopped for %s", zlog_debug(
peer, bgp_peer_get_connection_direction(connection)); "%pBP graceful restart stalepath timer stopped",
peer);
} }
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) { if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer)) {
zlog_debug("%pBP graceful restart timer started for %d sec for %s", zlog_debug(
peer, peer->v_gr_restart, "%pBP graceful restart timer started for %d sec",
bgp_peer_get_connection_direction(connection)); peer, peer->v_gr_restart);
zlog_debug("%pBP graceful restart stalepath timer started for %d sec for %s", zlog_debug(
peer, peer->bgp->stalepath_time, "%pBP graceful restart stalepath timer started for %d sec",
bgp_peer_get_connection_direction(connection)); peer, peer->bgp->stalepath_time);
} }
BGP_TIMER_ON(connection->t_gr_restart, BGP_TIMER_ON(connection->t_gr_restart,
bgp_graceful_restart_timer_expire, bgp_graceful_restart_timer_expire,
@ -1439,8 +1430,9 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
EVENT_OFF(peer->t_refresh_stalepath); EVENT_OFF(peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP route-refresh restart stalepath timer stopped for %s", zlog_debug(
peer, bgp_peer_get_connection_direction(connection)); "%pBP route-refresh restart stalepath timer stopped",
peer);
} }
/* If peer reset before receiving EOR, decrement EOR count and /* If peer reset before receiving EOR, decrement EOR count and
@ -1462,9 +1454,9 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
gr_info->eor_required--; gr_info->eor_required--;
if (BGP_DEBUG(update, UPDATE_OUT)) if (BGP_DEBUG(update, UPDATE_OUT))
zlog_debug("peer %s, EOR_required %d for %s", peer->host, zlog_debug("peer %s, EOR_required %d",
gr_info->eor_required, peer->host,
bgp_peer_get_connection_direction(connection)); gr_info->eor_required);
/* There is no pending EOR message */ /* There is no pending EOR message */
if (gr_info->eor_required == 0) { if (gr_info->eor_required == 0) {
@ -1483,8 +1475,8 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
peer->resettime = peer->uptime = monotime(NULL); peer->resettime = peer->uptime = monotime(NULL);
if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
zlog_debug("%s remove from all update group for %s", peer->host, zlog_debug("%s remove from all update group",
bgp_peer_get_connection_direction(connection)); peer->host);
update_group_remove_peer_afs(peer); update_group_remove_peer_afs(peer);
/* Reset peer synctime */ /* Reset peer synctime */
@ -1530,7 +1522,6 @@ enum bgp_fsm_state_progress bgp_stop(struct peer_connection *connection)
if (connection->fd >= 0) { if (connection->fd >= 0) {
close(connection->fd); close(connection->fd);
connection->fd = -1; connection->fd = -1;
connection->dir = UNKNOWN;
} }
/* Reset capabilities. */ /* Reset capabilities. */
@ -1605,8 +1596,8 @@ bgp_stop_with_error(struct peer_connection *connection)
if (peer_dynamic_neighbor_no_nsf(peer)) { if (peer_dynamic_neighbor_no_nsf(peer)) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s (dynamic neighbor) deleted (%s) for %s", __func__, zlog_debug("%s (dynamic neighbor) deleted (%s)",
peer->host, bgp_peer_get_connection_direction(connection)); peer->host, __func__);
peer_delete(peer); peer_delete(peer);
return BGP_FSM_FAILURE; return BGP_FSM_FAILURE;
} }
@ -1627,8 +1618,8 @@ bgp_stop_with_notify(struct peer_connection *connection, uint8_t code,
if (peer_dynamic_neighbor_no_nsf(peer)) { if (peer_dynamic_neighbor_no_nsf(peer)) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s (dynamic neighbor) deleted (%s) for %s", __func__, zlog_debug("%s (dynamic neighbor) deleted (%s)",
peer->host, bgp_peer_get_connection_direction(connection)); peer->host, __func__);
peer_delete(peer); peer_delete(peer);
return BGP_FSM_FAILURE; return BGP_FSM_FAILURE;
} }
@ -1693,9 +1684,8 @@ static void bgp_connect_check(struct event *thread)
return; return;
} else { } else {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] Connect failed %d(%s) for connection %s", peer->host, zlog_debug("%s [Event] Connect failed %d(%s)",
status, safe_strerror(status), peer->host, status, safe_strerror(status));
bgp_peer_get_connection_direction(connection));
BGP_EVENT_ADD(connection, TCP_connection_open_failed); BGP_EVENT_ADD(connection, TCP_connection_open_failed);
return; return;
} }
@ -1734,12 +1724,10 @@ bgp_connect_success(struct peer_connection *connection)
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer)) {
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
zlog_debug("%s open active, local address %pSU for %s", peer->host, zlog_debug("%s open active, local address %pSU", peer->host,
connection->su_local, connection->su_local);
bgp_peer_get_connection_direction(connection));
else else
zlog_debug("%s passive open for %s", peer->host, zlog_debug("%s passive open", peer->host);
bgp_peer_get_connection_direction(connection));
} }
/* Send an open message */ /* Send an open message */
@ -1782,12 +1770,10 @@ bgp_connect_success_w_delayopen(struct peer_connection *connection)
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer)) {
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER)) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
zlog_debug("%s open active, local address %pSU for %s", peer->host, zlog_debug("%s open active, local address %pSU", peer->host,
connection->su_local, connection->su_local);
bgp_peer_get_connection_direction(connection));
else else
zlog_debug("%s passive open for %s", peer->host, zlog_debug("%s passive open", peer->host);
bgp_peer_get_connection_direction(connection));
} }
/* set the DelayOpenTime to the inital value */ /* set the DelayOpenTime to the inital value */
@ -1799,9 +1785,8 @@ bgp_connect_success_w_delayopen(struct peer_connection *connection)
peer->v_delayopen); peer->v_delayopen);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] BGP OPEN message delayed for %d seconds for connection %s", zlog_debug("%s [FSM] BGP OPEN message delayed for %d seconds",
peer->host, peer->delayopen, peer->host, peer->delayopen);
bgp_peer_get_connection_direction(connection));
return BGP_FSM_SUCCESS; return BGP_FSM_SUCCESS;
} }
@ -1814,8 +1799,8 @@ bgp_connect_fail(struct peer_connection *connection)
if (peer_dynamic_neighbor_no_nsf(peer)) { if (peer_dynamic_neighbor_no_nsf(peer)) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s (dynamic neighbor) deleted (%s) for %s", __func__, zlog_debug("%s (dynamic neighbor) deleted (%s)",
peer->host, bgp_peer_get_connection_direction(connection)); peer->host, __func__);
peer_delete(peer); peer_delete(peer);
return BGP_FSM_FAILURE_AND_DELETE; return BGP_FSM_FAILURE_AND_DELETE;
} }
@ -1858,8 +1843,9 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
if (connection->su.sa.sa_family == AF_UNSPEC) { if (connection->su.sa.sa_family == AF_UNSPEC) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Unable to get neighbor's IP address, waiting... for %s", zlog_debug(
peer->host, bgp_peer_get_connection_direction(connection)); "%s [FSM] Unable to get neighbor's IP address, waiting...",
peer->host);
peer->last_reset = PEER_DOWN_NBR_ADDR; peer->last_reset = PEER_DOWN_NBR_ADDR;
return BGP_FSM_FAILURE; return BGP_FSM_FAILURE;
} }
@ -1902,9 +1888,9 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
if (!bgp_peer_reg_with_nht(peer)) { if (!bgp_peer_reg_with_nht(peer)) {
if (bgp_zebra_num_connects()) { if (bgp_zebra_num_connects()) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Waiting for NHT, no path to neighbor present for %s", zlog_debug(
peer->host, "%s [FSM] Waiting for NHT, no path to neighbor present",
bgp_peer_get_connection_direction(connection)); peer->host);
peer->last_reset = PEER_DOWN_WAITING_NHT; peer->last_reset = PEER_DOWN_WAITING_NHT;
BGP_EVENT_ADD(connection, TCP_connection_open_failed); BGP_EVENT_ADD(connection, TCP_connection_open_failed);
return BGP_FSM_SUCCESS; return BGP_FSM_SUCCESS;
@ -1920,14 +1906,13 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
switch (status) { switch (status) {
case connect_error: case connect_error:
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Connect error for %s", peer->host, zlog_debug("%s [FSM] Connect error", peer->host);
bgp_peer_get_connection_direction(connection));
BGP_EVENT_ADD(connection, TCP_connection_open_failed); BGP_EVENT_ADD(connection, TCP_connection_open_failed);
break; break;
case connect_success: case connect_success:
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Connect immediately success, fd %d for %s", peer->host, zlog_debug("%s [FSM] Connect immediately success, fd %d",
connection->fd, bgp_peer_get_connection_direction(connection)); peer->host, connection->fd);
BGP_EVENT_ADD(connection, TCP_connection_open); BGP_EVENT_ADD(connection, TCP_connection_open);
break; break;
@ -1935,9 +1920,8 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection)
/* To check nonblocking connect, we wait until socket is /* To check nonblocking connect, we wait until socket is
readable or writable. */ readable or writable. */
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Non blocking connect waiting result, fd %d for %s", zlog_debug("%s [FSM] Non blocking connect waiting result, fd %d",
peer->host, connection->fd, peer->host, connection->fd);
bgp_peer_get_connection_direction(connection));
if (connection->fd < 0) { if (connection->fd < 0) {
flog_err(EC_BGP_FSM, "%s peer's fd is negative value %d", flog_err(EC_BGP_FSM, "%s peer's fd is negative value %d",
__func__, peer->connection->fd); __func__, peer->connection->fd);
@ -1984,12 +1968,14 @@ bgp_reconnect(struct peer_connection *connection)
static enum bgp_fsm_state_progress static enum bgp_fsm_state_progress
bgp_fsm_open(struct peer_connection *connection) bgp_fsm_open(struct peer_connection *connection)
{ {
struct peer *peer = connection->peer;
/* If DelayOpen is active, we may still need to send an open message */ /* If DelayOpen is active, we may still need to send an open message */
if ((connection->status == Connect) || (connection->status == Active)) if ((connection->status == Connect) || (connection->status == Active))
bgp_open_send(connection); bgp_open_send(connection);
/* Send keepalive and make keepalive timer */ /* Send keepalive and make keepalive timer */
bgp_keepalive_send(connection); bgp_keepalive_send(peer);
return BGP_FSM_SUCCESS; return BGP_FSM_SUCCESS;
} }
@ -2017,8 +2003,7 @@ bgp_fsm_holdtime_expire(struct peer_connection *connection)
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Hold timer expire for %s", peer->host, zlog_debug("%s [FSM] Hold timer expire", peer->host);
bgp_peer_get_connection_direction(connection));
/* RFC8538 updates RFC 4724 by defining an extension that permits /* RFC8538 updates RFC 4724 by defining an extension that permits
* the Graceful Restart procedures to be performed when the BGP * the Graceful Restart procedures to be performed when the BGP
@ -2199,11 +2184,9 @@ bgp_establish(struct peer_connection *connection)
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer)) {
if (BGP_PEER_RESTARTING_MODE(peer)) if (BGP_PEER_RESTARTING_MODE(peer))
zlog_debug("%pBP BGP_RESTARTING_MODE %s", peer, zlog_debug("%pBP BGP_RESTARTING_MODE", peer);
bgp_peer_get_connection_direction(connection));
else if (BGP_PEER_HELPER_MODE(peer)) else if (BGP_PEER_HELPER_MODE(peer))
zlog_debug("%pBP BGP_HELPER_MODE %s", peer, zlog_debug("%pBP BGP_HELPER_MODE", peer);
bgp_peer_get_connection_direction(connection));
} }
FOREACH_AFI_SAFI_NSF (afi, safi) { FOREACH_AFI_SAFI_NSF (afi, safi) {
@ -2276,16 +2259,16 @@ bgp_establish(struct peer_connection *connection)
if (connection->t_gr_stale) { if (connection->t_gr_stale) {
EVENT_OFF(connection->t_gr_stale); EVENT_OFF(connection->t_gr_stale);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart stalepath timer stopped for %s", zlog_debug(
peer, bgp_peer_get_connection_direction(connection)); "%pBP graceful restart stalepath timer stopped",
peer);
} }
} }
if (connection->t_gr_restart) { if (connection->t_gr_restart) {
EVENT_OFF(connection->t_gr_restart); EVENT_OFF(connection->t_gr_restart);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped for %s", peer, zlog_debug("%pBP graceful restart timer stopped", peer);
bgp_peer_get_connection_direction(connection));
} }
/* Reset uptime, turn on keepalives, send current table. */ /* Reset uptime, turn on keepalives, send current table. */
@ -2301,9 +2284,9 @@ bgp_establish(struct peer_connection *connection)
if (peer->t_llgr_stale[afi][safi]) { if (peer->t_llgr_stale[afi][safi]) {
EVENT_OFF(peer->t_llgr_stale[afi][safi]); EVENT_OFF(peer->t_llgr_stale[afi][safi]);
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP Long-lived stale timer stopped for afi/safi: %d/%d for %s", zlog_debug(
peer, afi, safi, "%pBP Long-lived stale timer stopped for afi/safi: %d/%d",
bgp_peer_get_connection_direction(connection)); peer, afi, safi);
} }
if (CHECK_FLAG(peer->af_cap[afi][safi], if (CHECK_FLAG(peer->af_cap[afi][safi],
@ -2344,8 +2327,9 @@ bgp_establish(struct peer_connection *connection)
if (peer->doppelganger && if (peer->doppelganger &&
(peer->doppelganger->connection->status != Deleted)) { (peer->doppelganger->connection->status != Deleted)) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("[Event] Deleting stub connection for peer %s for %s", peer->host, zlog_debug(
bgp_peer_get_connection_direction(peer->doppelganger->connection)); "[Event] Deleting stub connection for peer %s",
peer->host);
if (peer->doppelganger->connection->status > Active) if (peer->doppelganger->connection->status > Active)
bgp_notify_send(peer->doppelganger->connection, bgp_notify_send(peer->doppelganger->connection,
@ -2652,10 +2636,11 @@ int bgp_event_update(struct peer_connection *connection,
next = FSM[connection->status - 1][event - 1].next_state; next = FSM[connection->status - 1][event - 1].next_state;
if (bgp_debug_neighbor_events(peer) && connection->status != next) if (bgp_debug_neighbor_events(peer) && connection->status != next)
zlog_debug("%s [FSM] %s (%s->%s), fd %d for %s", peer->host, bgp_event_str[event], zlog_debug("%s [FSM] %s (%s->%s), fd %d", peer->host,
bgp_event_str[event],
lookup_msg(bgp_status_msg, connection->status, NULL), lookup_msg(bgp_status_msg, connection->status, NULL),
lookup_msg(bgp_status_msg, next, NULL), connection->fd, lookup_msg(bgp_status_msg, next, NULL),
bgp_peer_get_connection_direction(connection)); connection->fd);
peer->last_event = peer->cur_event; peer->last_event = peer->cur_event;
peer->cur_event = event; peer->cur_event = event;

View file

@ -99,11 +99,8 @@ void bgp_reads_off(struct peer_connection *connection)
assert(fpt->running); assert(fpt->running);
event_cancel_async(fpt->master, &connection->t_read, NULL); event_cancel_async(fpt->master, &connection->t_read, NULL);
EVENT_OFF(connection->t_process_packet);
frr_with_mutex (&bm->peer_connection_mtx) { EVENT_OFF(connection->t_process_packet_error);
if (peer_connection_fifo_member(&bm->connection_fifo, connection))
peer_connection_fifo_del(&bm->connection_fifo, connection);
}
UNSET_FLAG(connection->thread_flags, PEER_THREAD_READS_ON); UNSET_FLAG(connection->thread_flags, PEER_THREAD_READS_ON);
} }
@ -255,7 +252,8 @@ static void bgp_process_reads(struct event *thread)
/* Handle the error in the main pthread, include the /* Handle the error in the main pthread, include the
* specific state change from 'bgp_read'. * specific state change from 'bgp_read'.
*/ */
bgp_enqueue_conn_err(peer->bgp, connection, code); event_add_event(bm->master, bgp_packet_process_error, connection,
code, &connection->t_process_packet_error);
goto done; goto done;
} }
@ -296,13 +294,9 @@ done:
event_add_read(fpt->master, bgp_process_reads, connection, event_add_read(fpt->master, bgp_process_reads, connection,
connection->fd, &connection->t_read); connection->fd, &connection->t_read);
if (added_pkt) { if (added_pkt)
frr_with_mutex (&bm->peer_connection_mtx) { event_add_event(bm->master, bgp_process_packet, connection, 0,
if (!peer_connection_fifo_member(&bm->connection_fifo, connection)) &connection->t_process_packet);
peer_connection_fifo_add_tail(&bm->connection_fifo, connection);
}
event_add_event(bm->master, bgp_process_packet, NULL, 0, &bm->e_process_packet);
}
} }
/* /*

View file

@ -10,7 +10,6 @@
#define BGP_WRITE_PACKET_MAX 64U #define BGP_WRITE_PACKET_MAX 64U
#define BGP_READ_PACKET_MAX 10U #define BGP_READ_PACKET_MAX 10U
#define BGP_PACKET_PROCESS_LIMIT 100
#include "bgpd/bgpd.h" #include "bgpd/bgpd.h"
#include "frr_pthread.h" #include "frr_pthread.h"

View file

@ -108,7 +108,7 @@ static void peer_process(struct hash_bucket *hb, void *arg)
zlog_debug("%s [FSM] Timer (keepalive timer expire)", zlog_debug("%s [FSM] Timer (keepalive timer expire)",
pkat->peer->host); pkat->peer->host);
bgp_keepalive_send(pkat->peer->connection); bgp_keepalive_send(pkat->peer);
monotime(&pkat->last); monotime(&pkat->last);
memset(&elapsed, 0, sizeof(elapsed)); memset(&elapsed, 0, sizeof(elapsed));
diff = ka; diff = ka;

View file

@ -26,7 +26,7 @@
#include "bgpd/bgp_debug.h" #include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h" #include "bgpd/bgp_errors.h"
extern struct zclient *bgp_zclient; extern struct zclient *zclient;
/* MPLS Labels hash routines. */ /* MPLS Labels hash routines. */
@ -157,7 +157,7 @@ int bgp_parse_fec_update(void)
afi_t afi; afi_t afi;
safi_t safi; safi_t safi;
s = bgp_zclient->ibuf; s = zclient->ibuf;
memset(&p, 0, sizeof(p)); memset(&p, 0, sizeof(p));
p.family = stream_getw(s); p.family = stream_getw(s);
@ -249,7 +249,7 @@ static void bgp_send_fec_register_label_msg(struct bgp_dest *dest, bool reg,
p = bgp_dest_get_prefix(dest); p = bgp_dest_get_prefix(dest);
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return; return;
if (BGP_DEBUG(labelpool, LABELPOOL)) if (BGP_DEBUG(labelpool, LABELPOOL))
@ -258,7 +258,7 @@ static void bgp_send_fec_register_label_msg(struct bgp_dest *dest, bool reg,
/* If the route node has a local_label assigned or the /* If the route node has a local_label assigned or the
* path node has an MPLS SR label index allowing zebra to * path node has an MPLS SR label index allowing zebra to
* derive the label, proceed with registration. */ * derive the label, proceed with registration. */
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER; command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER;
zclient_create_header(s, command, VRF_DEFAULT); zclient_create_header(s, command, VRF_DEFAULT);
@ -288,7 +288,7 @@ static void bgp_send_fec_register_label_msg(struct bgp_dest *dest, bool reg,
if (reg) if (reg)
stream_putw_at(s, flags_pos, flags); stream_putw_at(s, flags_pos, flags);
zclient_send_message(bgp_zclient); zclient_send_message(zclient);
} }
/** /**

View file

@ -7,6 +7,8 @@
#define _BGP_LABEL_H #define _BGP_LABEL_H
#define BGP_LABEL_BYTES 3 #define BGP_LABEL_BYTES 3
#define BGP_LABEL_BITS 24
#define BGP_WITHDRAW_LABEL 0x800000
#define BGP_PREVENT_VRF_2_VRF_LEAK 0xFFFFFFFE #define BGP_PREVENT_VRF_2_VRF_LEAK 0xFFFFFFFE
struct bgp_dest; struct bgp_dest;

View file

@ -219,9 +219,6 @@ static void bgp_mac_rescan_evpn_table(struct bgp *bgp, struct ethaddr *macaddr)
if (!peer_established(peer->connection)) if (!peer_established(peer->connection))
continue; continue;
if (!peer->afc[afi][safi])
continue;
if (bgp_debug_update(peer, NULL, NULL, 1)) if (bgp_debug_update(peer, NULL, NULL, 1))
zlog_debug( zlog_debug(
"Processing EVPN MAC interface change on peer %s %s", "Processing EVPN MAC interface change on peer %s %s",

View file

@ -161,14 +161,6 @@ __attribute__((__noreturn__)) void sigint(void)
bgp_exit(0); bgp_exit(0);
/*
* This is being done after bgp_exit because items may be removed
* from the connection_fifo
*/
peer_connection_fifo_fini(&bm->connection_fifo);
EVENT_OFF(bm->e_process_packet);
pthread_mutex_destroy(&bm->peer_connection_mtx);
exit(0); exit(0);
} }

View file

@ -136,6 +136,4 @@ DECLARE_MTYPE(BGP_SOFT_VERSION);
DECLARE_MTYPE(BGP_EVPN_OVERLAY); DECLARE_MTYPE(BGP_EVPN_OVERLAY);
DECLARE_MTYPE(CLEARING_BATCH);
#endif /* _QUAGGA_BGP_MEMORY_H */ #endif /* _QUAGGA_BGP_MEMORY_H */

View file

@ -46,7 +46,7 @@ DEFINE_MTYPE_STATIC(BGPD, MPLSVPN_NH_LABEL_BIND_CACHE,
/* /*
* Definitions and external declarations. * Definitions and external declarations.
*/ */
extern struct zclient *bgp_zclient; extern struct zclient *zclient;
extern int argv_find_and_parse_vpnvx(struct cmd_token **argv, int argc, extern int argv_find_and_parse_vpnvx(struct cmd_token **argv, int argc,
int *index, afi_t *afi) int *index, afi_t *afi)
@ -317,7 +317,7 @@ void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi)
if (label == BGP_PREVENT_VRF_2_VRF_LEAK) if (label == BGP_PREVENT_VRF_2_VRF_LEAK)
label = MPLS_LABEL_NONE; label = MPLS_LABEL_NONE;
zclient_send_vrf_label(bgp_zclient, bgp->vrf_id, afi, label, ZEBRA_LSP_BGP); zclient_send_vrf_label(zclient, bgp->vrf_id, afi, label, ZEBRA_LSP_BGP);
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label; bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label;
} }
@ -344,7 +344,7 @@ void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi)
bgp->name_pretty, bgp->vrf_id); bgp->name_pretty, bgp->vrf_id);
} }
zclient_send_vrf_label(bgp_zclient, bgp->vrf_id, afi, label, ZEBRA_LSP_BGP); zclient_send_vrf_label(zclient, bgp->vrf_id, afi, label, ZEBRA_LSP_BGP);
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label; bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label;
} }
@ -397,13 +397,11 @@ void vpn_leak_zebra_vrf_sid_update_per_af(struct bgp *bgp, afi_t afi)
ctx.argument_len = ctx.argument_len =
bgp->vpn_policy[afi] bgp->vpn_policy[afi]
.tovpn_sid_locator->argument_bits_length; .tovpn_sid_locator->argument_bits_length;
if (CHECK_FLAG(bgp->vpn_policy[afi].tovpn_sid_locator->flags, SRV6_LOCATOR_USID))
SET_SRV6_FLV_OP(ctx.flv.flv_ops, ZEBRA_SEG6_LOCAL_FLV_OP_NEXT_CSID);
} }
ctx.table = vrf->data.l.table_id; ctx.table = vrf->data.l.table_id;
act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4
: ZEBRA_SEG6_LOCAL_ACTION_END_DT6; : ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
zclient_send_localsid(bgp_zclient, tovpn_sid, bgp->vrf_id, act, &ctx); zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx);
tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
*tovpn_sid_ls = *tovpn_sid; *tovpn_sid_ls = *tovpn_sid;
@ -456,12 +454,10 @@ void vpn_leak_zebra_vrf_sid_update_per_vrf(struct bgp *bgp)
ctx.node_len = bgp->tovpn_sid_locator->node_bits_length; ctx.node_len = bgp->tovpn_sid_locator->node_bits_length;
ctx.function_len = bgp->tovpn_sid_locator->function_bits_length; ctx.function_len = bgp->tovpn_sid_locator->function_bits_length;
ctx.argument_len = bgp->tovpn_sid_locator->argument_bits_length; ctx.argument_len = bgp->tovpn_sid_locator->argument_bits_length;
if (CHECK_FLAG(bgp->tovpn_sid_locator->flags, SRV6_LOCATOR_USID))
SET_SRV6_FLV_OP(ctx.flv.flv_ops, ZEBRA_SEG6_LOCAL_FLV_OP_NEXT_CSID);
} }
ctx.table = vrf->data.l.table_id; ctx.table = vrf->data.l.table_id;
act = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; act = ZEBRA_SEG6_LOCAL_ACTION_END_DT46;
zclient_send_localsid(bgp_zclient, tovpn_sid, bgp->vrf_id, act, &ctx); zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx);
tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
*tovpn_sid_ls = *tovpn_sid; *tovpn_sid_ls = *tovpn_sid;
@ -523,7 +519,7 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi)
bgp->vpn_policy[afi] bgp->vpn_policy[afi]
.tovpn_sid_locator->argument_bits_length; .tovpn_sid_locator->argument_bits_length;
} }
zclient_send_localsid(bgp_zclient, zclient_send_localsid(zclient,
bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent, bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent,
bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC,
&seg6localctx); &seg6localctx);
@ -568,7 +564,7 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_vrf(struct bgp *bgp)
seg6localctx.argument_len = seg6localctx.argument_len =
bgp->tovpn_sid_locator->argument_bits_length; bgp->tovpn_sid_locator->argument_bits_length;
} }
zclient_send_localsid(bgp_zclient, bgp->tovpn_zebra_vrf_sid_last_sent, zclient_send_localsid(zclient, bgp->tovpn_zebra_vrf_sid_last_sent,
bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC,
&seg6localctx); &seg6localctx);
XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent); XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent);
@ -1092,37 +1088,32 @@ static bool leak_update_nexthop_valid(struct bgp *to_bgp, struct bgp_dest *bn,
/* the route is defined with the "network <prefix>" command */ /* the route is defined with the "network <prefix>" command */
if (CHECK_FLAG(bgp_nexthop->flags, BGP_FLAG_IMPORT_CHECK)) if (CHECK_FLAG(bgp_nexthop->flags, BGP_FLAG_IMPORT_CHECK))
nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop, afi, SAFI_UNICAST, nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop,
bpi_ultimate, NULL, 0, p, bpi_ultimate); afi, SAFI_UNICAST,
bpi_ultimate, NULL,
0, p);
else else
/* if "no bgp network import-check" is set, /* if "no bgp network import-check" is set,
* then mark the nexthop as valid. * then mark the nexthop as valid.
*/ */
nh_valid = true; nh_valid = true;
} else if (bpi_ultimate->type == ZEBRA_ROUTE_BGP &&
bpi_ultimate->sub_type == BGP_ROUTE_AGGREGATE) {
nh_valid = true;
} else } else
/* /*
* TBD do we need to do anything about the * TBD do we need to do anything about the
* 'connected' parameter? * 'connected' parameter?
*/ */
/* VPN paths: the new bpi may be altered like nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop, afi,
* with 'nexthop vpn export' command. Use the bpi_ultimate safi, bpi, NULL, 0, p);
* to find the original nexthop
*/
nh_valid = bgp_find_or_add_nexthop(to_bgp, bgp_nexthop, afi, safi, bpi, NULL, 0, p,
bpi_ultimate);
/* /*
* If you are using SRv6 VPN instead of MPLS, it need to check * If you are using SRv6 VPN instead of MPLS, it need to check
* the SID allocation. If the sid is not allocated, the rib * the SID allocation. If the sid is not allocated, the rib
* will be invalid. * will be invalid.
* If the SID per VRF is not available, also consider the rib as
* invalid.
*/ */
if (to_bgp->srv6_enabled && nh_valid) if (to_bgp->srv6_enabled &&
nh_valid = is_pi_srv6_valid(bpi, bgp_nexthop, afi, safi); (!new_attr->srv6_l3vpn && !new_attr->srv6_vpn)) {
nh_valid = false;
}
if (debug) if (debug)
zlog_debug("%s: %pFX nexthop is %svalid (in %s)", __func__, p, zlog_debug("%s: %pFX nexthop is %svalid (in %s)", __func__, p,
@ -1213,8 +1204,8 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn,
return NULL; return NULL;
} }
if (labelssame && !CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) && if (attrhash_cmp(bpi->attr, new_attr) && labelssame &&
attrhash_cmp(bpi->attr, new_attr) && !CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) &&
leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi, source_bpi, bpi, leak_update_nexthop_valid(to_bgp, bn, new_attr, afi, safi, source_bpi, bpi,
bgp_orig, p, bgp_orig, p,
debug) == !!CHECK_FLAG(bpi->flags, BGP_PATH_VALID)) { debug) == !!CHECK_FLAG(bpi->flags, BGP_PATH_VALID)) {
@ -1600,8 +1591,8 @@ vpn_leak_from_vrf_get_per_nexthop_label(afi_t afi, struct bgp_path_info *pi,
bgp_nexthop = from_bgp; bgp_nexthop = from_bgp;
nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr); nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi, SAFI_UNICAST, pi, NULL, 0, nh_valid = bgp_find_or_add_nexthop(from_bgp, bgp_nexthop, nh_afi,
NULL, NULL); SAFI_UNICAST, pi, NULL, 0, NULL);
if (!nh_valid && is_bgp_static_route && if (!nh_valid && is_bgp_static_route &&
!CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) { !CHECK_FLAG(from_bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
@ -1702,14 +1693,6 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */
return; return;
} }
/* Aggregate-address suppress check. */
if (bgp_path_suppressed(path_vrf)) {
if (debug)
zlog_debug("%s: %s skipping: suppressed path will not be exported",
__func__, from_bgp->name);
return;
}
/* shallow copy */ /* shallow copy */
static_attr = *path_vrf->attr; static_attr = *path_vrf->attr;
@ -2343,8 +2326,8 @@ static void vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
break; break;
} }
if (bpi && leak_update_nexthop_valid(to_bgp, bn, &static_attr, afi, safi, path_vpn, bpi, if (bpi && leak_update_nexthop_valid(to_bgp, bn, &static_attr, afi, safi,
src_vrf, p, debug)) path_vpn, bpi, src_vrf, p, debug))
SET_FLAG(static_attr.nh_flags, BGP_ATTR_NH_VALID); SET_FLAG(static_attr.nh_flags, BGP_ATTR_NH_VALID);
else else
UNSET_FLAG(static_attr.nh_flags, BGP_ATTR_NH_VALID); UNSET_FLAG(static_attr.nh_flags, BGP_ATTR_NH_VALID);

View file

@ -342,37 +342,6 @@ static inline bool is_pi_family_vpn(struct bgp_path_info *pi)
is_pi_family_matching(pi, AFI_IP6, SAFI_MPLS_VPN)); is_pi_family_matching(pi, AFI_IP6, SAFI_MPLS_VPN));
} }
/*
* If you are using SRv6 VPN instead of MPLS, it need to check
* the SID allocation. If the sid is not allocated, the rib
* will be invalid.
* If the SID per VRF is not available, also consider the rib as
* invalid.
*/
static inline bool is_pi_srv6_valid(struct bgp_path_info *pi, struct bgp *bgp_nexthop, afi_t afi,
safi_t safi)
{
if (!pi->attr->srv6_l3vpn && !pi->attr->srv6_vpn)
return false;
/* imported paths from VPN: srv6 enabled and nht reachability
* are enough to know if that path is valid
*/
if (safi == SAFI_UNICAST)
return true;
if (bgp_nexthop->vpn_policy[afi].tovpn_sid == NULL && bgp_nexthop->tovpn_sid == NULL)
return false;
if (bgp_nexthop->tovpn_sid_index == 0 &&
!CHECK_FLAG(bgp_nexthop->vrf_flags, BGP_VRF_TOVPN_SID_AUTO) &&
bgp_nexthop->vpn_policy[afi].tovpn_sid_index == 0 &&
!CHECK_FLAG(bgp_nexthop->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_SID_AUTO))
return false;
return true;
}
extern void vpn_policy_routemap_event(const char *rmap_name); extern void vpn_policy_routemap_event(const char *rmap_name);
extern vrf_id_t get_first_vrf_for_redirect_with_rt(struct ecommunity *eckey); extern vrf_id_t get_first_vrf_for_redirect_with_rt(struct ecommunity *eckey);

View file

@ -1460,6 +1460,8 @@ static struct bgp_path_info *bgpL3vpnRte_lookup(struct variable *v, oid name[],
pi = bgp_lookup_route_next(l3vpn_bgp, dest, &prefix, policy, pi = bgp_lookup_route_next(l3vpn_bgp, dest, &prefix, policy,
&nexthop); &nexthop);
if (pi) { if (pi) {
uint8_t vrf_name_len =
strnlen((*l3vpn_bgp)->name, VRF_NAMSIZ);
const struct prefix *p = bgp_dest_get_prefix(*dest); const struct prefix *p = bgp_dest_get_prefix(*dest);
uint8_t oid_index; uint8_t oid_index;
bool v4 = (p->family == AF_INET); bool v4 = (p->family == AF_INET);
@ -1467,8 +1469,6 @@ static struct bgp_path_info *bgpL3vpnRte_lookup(struct variable *v, oid name[],
: sizeof(struct in6_addr); : sizeof(struct in6_addr);
struct attr *attr = pi->attr; struct attr *attr = pi->attr;
vrf_name_len = strnlen((*l3vpn_bgp)->name, VRF_NAMSIZ);
/* copy the index parameters */ /* copy the index parameters */
oid_copy_str(&name[namelen], (*l3vpn_bgp)->name, oid_copy_str(&name[namelen], (*l3vpn_bgp)->name,
vrf_name_len); vrf_name_len);

View file

@ -389,23 +389,6 @@ static void bgp_socket_set_buffer_size(const int fd)
setsockopt_so_recvbuf(fd, bm->socket_buffer); setsockopt_so_recvbuf(fd, bm->socket_buffer);
} }
static const char *bgp_peer_active2str(enum bgp_peer_active active)
{
switch (active) {
case BGP_PEER_ACTIVE:
return "active";
case BGP_PEER_CONNECTION_UNSPECIFIED:
return "unspecified connection";
case BGP_PEER_BFD_DOWN:
return "BFD down";
case BGP_PEER_AF_UNCONFIGURED:
return "no AF activated";
}
assert(!"We should never get here this is a dev escape");
return "ERROR";
}
/* Accept bgp connection. */ /* Accept bgp connection. */
static void bgp_accept(struct event *thread) static void bgp_accept(struct event *thread)
{ {
@ -413,11 +396,10 @@ static void bgp_accept(struct event *thread)
int accept_sock; int accept_sock;
union sockunion su; union sockunion su;
struct bgp_listener *listener = EVENT_ARG(thread); struct bgp_listener *listener = EVENT_ARG(thread);
struct peer *doppelganger, *peer; struct peer *peer, *peer1;
struct peer_connection *connection, *incoming; struct peer_connection *connection, *connection1;
char buf[SU_ADDRSTRLEN]; char buf[SU_ADDRSTRLEN];
struct bgp *bgp = NULL; struct bgp *bgp = NULL;
enum bgp_peer_active active;
sockunion_init(&su); sockunion_init(&su);
@ -493,51 +475,53 @@ static void bgp_accept(struct event *thread)
bgp_update_setsockopt_tcp_keepalive(bgp, bgp_sock); bgp_update_setsockopt_tcp_keepalive(bgp, bgp_sock);
/* Check remote IP address */ /* Check remote IP address */
peer = peer_lookup(bgp, &su); peer1 = peer_lookup(bgp, &su);
if (!peer) { if (!peer1) {
struct peer *dynamic_peer = peer_lookup_dynamic_neighbor(bgp, &su); peer1 = peer_lookup_dynamic_neighbor(bgp, &su);
if (peer1) {
if (dynamic_peer) { connection1 = peer1->connection;
incoming = dynamic_peer->connection;
/* Dynamic neighbor has been created, let it proceed */ /* Dynamic neighbor has been created, let it proceed */
incoming->fd = bgp_sock; connection1->fd = bgp_sock;
incoming->dir = CONNECTION_INCOMING;
incoming->su_local = sockunion_getsockname(incoming->fd); connection1->su_local = sockunion_getsockname(connection1->fd);
incoming->su_remote = sockunion_dup(&su); connection1->su_remote = sockunion_dup(&su);
if (bgp_set_socket_ttl(incoming) < 0) { if (bgp_set_socket_ttl(connection1) < 0) {
dynamic_peer->last_reset = PEER_DOWN_SOCKET_ERROR; peer1->last_reset = PEER_DOWN_SOCKET_ERROR;
zlog_err("%s: Unable to set min/max TTL on peer %s (dynamic), error received: %s(%d)", zlog_err("%s: Unable to set min/max TTL on peer %s (dynamic), error received: %s(%d)",
__func__, dynamic_peer->host, safe_strerror(errno), errno); __func__, peer1->host,
safe_strerror(errno), errno);
return; return;
} }
/* Set the user configured MSS to TCP socket */ /* Set the user configured MSS to TCP socket */
if (CHECK_FLAG(dynamic_peer->flags, PEER_FLAG_TCP_MSS)) if (CHECK_FLAG(peer1->flags, PEER_FLAG_TCP_MSS))
sockopt_tcp_mss_set(bgp_sock, dynamic_peer->tcp_mss); sockopt_tcp_mss_set(bgp_sock, peer1->tcp_mss);
frr_with_privs (&bgpd_privs) { frr_with_privs (&bgpd_privs) {
vrf_bind(dynamic_peer->bgp->vrf_id, bgp_sock, vrf_bind(peer1->bgp->vrf_id, bgp_sock,
bgp_get_bound_name(incoming)); bgp_get_bound_name(connection1));
} }
bgp_peer_reg_with_nht(dynamic_peer); bgp_peer_reg_with_nht(peer1);
bgp_fsm_change_status(incoming, Active); bgp_fsm_change_status(connection1, Active);
EVENT_OFF(incoming->t_start); EVENT_OFF(connection1->t_start);
if (peer_active(incoming) == BGP_PEER_ACTIVE) { if (peer_active(peer1->connection)) {
if (CHECK_FLAG(dynamic_peer->flags, PEER_FLAG_TIMER_DELAYOPEN)) if (CHECK_FLAG(peer1->flags,
BGP_EVENT_ADD(incoming, TCP_connection_open_w_delay); PEER_FLAG_TIMER_DELAYOPEN))
BGP_EVENT_ADD(connection1,
TCP_connection_open_w_delay);
else else
BGP_EVENT_ADD(incoming, TCP_connection_open); BGP_EVENT_ADD(connection1,
TCP_connection_open);
} }
return; return;
} }
} }
if (!peer) { if (!peer1) {
if (bgp_debug_neighbor_events(NULL)) { if (bgp_debug_neighbor_events(NULL)) {
zlog_debug( zlog_debug(
"[Event] %s connection rejected(%s:%u:%s) - not configured and not valid for dynamic", "[Event] %s connection rejected(%s:%u:%s) - not configured and not valid for dynamic",
@ -548,12 +532,10 @@ static void bgp_accept(struct event *thread)
return; return;
} }
/* bgp pointer may be null, but since we have a peer data structure we know we have it */ connection1 = peer1->connection;
bgp = peer->bgp; if (CHECK_FLAG(peer1->flags, PEER_FLAG_SHUTDOWN)
connection = peer->connection; || CHECK_FLAG(peer1->bgp->flags, BGP_FLAG_SHUTDOWN)) {
if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN) || if (bgp_debug_neighbor_events(peer1))
CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHUTDOWN)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug( zlog_debug(
"[Event] connection from %s rejected(%s:%u:%s) due to admin shutdown", "[Event] connection from %s rejected(%s:%u:%s) due to admin shutdown",
inet_sutop(&su, buf), bgp->name_pretty, bgp->as, inet_sutop(&su, buf), bgp->name_pretty, bgp->as,
@ -568,20 +550,21 @@ static void bgp_accept(struct event *thread)
* Established and then the Clearing_Completed event is generated. Also, * Established and then the Clearing_Completed event is generated. Also,
* block incoming connection in Deleted state. * block incoming connection in Deleted state.
*/ */
if (connection->status == Clearing || connection->status == Deleted) { if (connection1->status == Clearing || connection1->status == Deleted) {
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer1))
zlog_debug("[Event] Closing incoming conn for %s (%p) state %d", peer->host, zlog_debug("[Event] Closing incoming conn for %s (%p) state %d",
peer, connection->status); peer1->host, peer1,
peer1->connection->status);
close(bgp_sock); close(bgp_sock);
return; return;
} }
/* Check that at least one AF is activated for the peer. */ /* Check that at least one AF is activated for the peer. */
active = peer_active(connection); if (!peer_active(connection1)) {
if (active != BGP_PEER_ACTIVE) { if (bgp_debug_neighbor_events(peer1))
if (bgp_debug_neighbor_events(peer)) zlog_debug(
zlog_debug("%s - incoming conn rejected - %s", peer->host, "%s - incoming conn rejected - no AF activated for peer",
bgp_peer_active2str(active)); peer1->host);
close(bgp_sock); close(bgp_sock);
return; return;
} }
@ -590,109 +573,117 @@ static void bgp_accept(struct event *thread)
* prefixes, restart timer is still running or the peer * prefixes, restart timer is still running or the peer
* is shutdown, or BGP identifier is not set (0.0.0.0). * is shutdown, or BGP identifier is not set (0.0.0.0).
*/ */
if (BGP_PEER_START_SUPPRESSED(peer)) { if (BGP_PEER_START_SUPPRESSED(peer1)) {
if (bgp_debug_neighbor_events(peer)) { if (bgp_debug_neighbor_events(peer1)) {
if (peer->shut_during_cfg) if (peer1->shut_during_cfg)
zlog_debug("[Event] Incoming BGP connection rejected from %s due to configuration being currently read in", zlog_debug(
peer->host); "[Event] Incoming BGP connection rejected from %s due to configuration being currently read in",
peer1->host);
else else
zlog_debug("[Event] Incoming BGP connection rejected from %s due to maximum-prefix or shutdown", zlog_debug(
peer->host); "[Event] Incoming BGP connection rejected from %s due to maximum-prefix or shutdown",
peer1->host);
} }
close(bgp_sock); close(bgp_sock);
return; return;
} }
if (peer->bgp->router_id.s_addr == INADDR_ANY) { if (peer1->bgp->router_id.s_addr == INADDR_ANY) {
zlog_warn("[Event] Incoming BGP connection rejected from %s due missing BGP identifier, set it with `bgp router-id`", zlog_warn("[Event] Incoming BGP connection rejected from %s due missing BGP identifier, set it with `bgp router-id`",
peer->host); peer1->host);
peer->last_reset = PEER_DOWN_ROUTER_ID_ZERO; peer1->last_reset = PEER_DOWN_ROUTER_ID_ZERO;
close(bgp_sock); close(bgp_sock);
return; return;
} }
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer1))
zlog_debug("[Event] connection from %s fd %d, active peer status %d fd %d", zlog_debug("[Event] connection from %s fd %d, active peer status %d fd %d",
inet_sutop(&su, buf), bgp_sock, connection->status, connection->fd); inet_sutop(&su, buf), bgp_sock, connection1->status,
connection1->fd);
if (peer->doppelganger) { if (peer1->doppelganger) {
/* We have an existing connection. Kill the existing one and run /* We have an existing connection. Kill the existing one and run
with this one. with this one.
*/ */
if (bgp_debug_neighbor_events(peer1))
zlog_debug(
"[Event] New active connection from peer %s, Killing previous active connection",
peer1->host);
peer_delete(peer1->doppelganger);
}
peer = peer_create(&su, peer1->conf_if, peer1->bgp, peer1->local_as,
peer1->as, peer1->as_type, NULL, false, NULL);
connection = peer->connection;
peer_xfer_config(peer, peer1);
bgp_peer_gr_flags_update(peer);
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(peer->bgp,
peer->bgp->peer);
if (bgp_peer_gr_mode_get(peer) == PEER_DISABLE) {
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_MODE);
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
peer_nsf_stop(peer);
}
}
peer->doppelganger = peer1;
peer1->doppelganger = peer;
connection->fd = bgp_sock;
connection->su_local = sockunion_getsockname(connection->fd);
connection->su_remote = sockunion_dup(&su);
if (bgp_set_socket_ttl(connection) < 0)
if (bgp_debug_neighbor_events(peer)) if (bgp_debug_neighbor_events(peer))
zlog_debug("[Event] New active connection from peer %s, Killing previous active connection",
peer->host);
peer_delete(peer->doppelganger);
}
doppelganger = peer_create(&su, peer->conf_if, bgp, peer->local_as, peer->as, peer->as_type,
NULL, false, NULL);
incoming = doppelganger->connection;
peer_xfer_config(doppelganger, peer);
bgp_peer_gr_flags_update(doppelganger);
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer);
if (bgp_peer_gr_mode_get(doppelganger) == PEER_DISABLE) {
UNSET_FLAG(doppelganger->sflags, PEER_STATUS_NSF_MODE);
if (CHECK_FLAG(doppelganger->sflags, PEER_STATUS_NSF_WAIT)) {
peer_nsf_stop(doppelganger);
}
}
doppelganger->doppelganger = peer;
peer->doppelganger = doppelganger;
incoming->fd = bgp_sock;
incoming->dir = CONNECTION_INCOMING;
incoming->su_local = sockunion_getsockname(incoming->fd);
incoming->su_remote = sockunion_dup(&su);
if (bgp_set_socket_ttl(incoming) < 0)
if (bgp_debug_neighbor_events(doppelganger))
zlog_debug("[Event] Unable to set min/max TTL on peer %s, Continuing", zlog_debug("[Event] Unable to set min/max TTL on peer %s, Continuing",
doppelganger->host); peer->host);
frr_with_privs(&bgpd_privs) { frr_with_privs(&bgpd_privs) {
vrf_bind(bgp->vrf_id, bgp_sock, bgp_get_bound_name(incoming)); vrf_bind(peer->bgp->vrf_id, bgp_sock,
bgp_get_bound_name(peer->connection));
} }
bgp_peer_reg_with_nht(doppelganger); bgp_peer_reg_with_nht(peer);
bgp_fsm_change_status(incoming, Active); bgp_fsm_change_status(connection, Active);
EVENT_OFF(incoming->t_start); /* created in peer_create() */ EVENT_OFF(connection->t_start); /* created in peer_create() */
SET_FLAG(doppelganger->sflags, PEER_STATUS_ACCEPT_PEER); SET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
/* Make dummy peer until read Open packet. */ /* Make dummy peer until read Open packet. */
if (peer_established(connection) && CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) { if (peer_established(connection1) &&
CHECK_FLAG(peer1->sflags, PEER_STATUS_NSF_MODE)) {
/* If we have an existing established connection with graceful /* If we have an existing established connection with graceful
* restart * restart
* capability announced with one or more address families, then * capability announced with one or more address families, then
* drop * drop
* existing established connection and move state to connect. * existing established connection and move state to connect.
*/ */
peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; peer1->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
if (CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) || if (CHECK_FLAG(peer1->flags, PEER_FLAG_GRACEFUL_RESTART)
CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART_HELPER)) || CHECK_FLAG(peer1->flags,
SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT); PEER_FLAG_GRACEFUL_RESTART_HELPER))
SET_FLAG(peer1->sflags, PEER_STATUS_NSF_WAIT);
bgp_event_update(connection, TCP_connection_closed); bgp_event_update(connection1, TCP_connection_closed);
} }
if (peer_active(incoming) == BGP_PEER_ACTIVE) { if (peer_active(peer->connection)) {
if (CHECK_FLAG(doppelganger->flags, PEER_FLAG_TIMER_DELAYOPEN)) if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_EVENT_ADD(incoming, TCP_connection_open_w_delay); BGP_EVENT_ADD(connection, TCP_connection_open_w_delay);
else else
BGP_EVENT_ADD(incoming, TCP_connection_open); BGP_EVENT_ADD(connection, TCP_connection_open);
} }
/* /*
* If we are doing nht for a peer that is v6 LL based * If we are doing nht for a peer that is v6 LL based
* massage the event system to make things happy * massage the event system to make things happy
*/ */
bgp_nht_interface_events(doppelganger); bgp_nht_interface_events(peer);
} }
/* BGP socket bind. */ /* BGP socket bind. */
@ -810,7 +801,6 @@ enum connect_result bgp_connect(struct peer_connection *connection)
connection->fd = connection->fd =
vrf_sockunion_socket(&connection->su, peer->bgp->vrf_id, vrf_sockunion_socket(&connection->su, peer->bgp->vrf_id,
bgp_get_bound_name(connection)); bgp_get_bound_name(connection));
connection->dir = CONNECTION_OUTGOING;
} }
if (connection->fd < 0) { if (connection->fd < 0) {
peer->last_reset = PEER_DOWN_SOCKET_ERROR; peer->last_reset = PEER_DOWN_SOCKET_ERROR;

View file

@ -444,7 +444,7 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc)
!peer_established(peer->connection) && !peer_established(peer->connection) &&
!CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) { !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) {
connection = peer->connection; connection = peer->connection;
if (peer_active(connection) == BGP_PEER_ACTIVE) if (peer_active(connection))
BGP_EVENT_ADD(connection, BGP_Stop); BGP_EVENT_ADD(connection, BGP_Stop);
BGP_EVENT_ADD(connection, BGP_Start); BGP_EVENT_ADD(connection, BGP_Start);
} }

View file

@ -34,12 +34,11 @@
#include "bgpd/bgp_mplsvpn.h" #include "bgpd/bgp_mplsvpn.h"
#include "bgpd/bgp_ecommunity.h" #include "bgpd/bgp_ecommunity.h"
extern struct zclient *bgp_zclient; extern struct zclient *zclient;
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc); static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc); static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p, static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
struct bgp *bgp_nexthop, struct bgp_path_info *pi_source);
static void bgp_nht_ifp_initial(struct event *thread); static void bgp_nht_ifp_initial(struct event *thread);
DEFINE_HOOK(bgp_nht_path_update, (struct bgp *bgp, struct bgp_path_info *pi, bool valid), DEFINE_HOOK(bgp_nht_path_update, (struct bgp *bgp, struct bgp_path_info *pi, bool valid),
@ -298,9 +297,10 @@ void bgp_unlink_nexthop_by_peer(struct peer *peer)
* A route and its nexthop might belong to different VRFs. Therefore, * A route and its nexthop might belong to different VRFs. Therefore,
* we need both the bgp_route and bgp_nexthop pointers. * we need both the bgp_route and bgp_nexthop pointers.
*/ */
int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, afi_t afi, safi_t safi, int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
struct bgp_path_info *pi, struct peer *peer, int connected, afi_t afi, safi_t safi, struct bgp_path_info *pi,
const struct prefix *orig_prefix, struct bgp_path_info *source_pi) struct peer *peer, int connected,
const struct prefix *orig_prefix)
{ {
struct bgp_nexthop_cache_head *tree = NULL; struct bgp_nexthop_cache_head *tree = NULL;
struct bgp_nexthop_cache *bnc; struct bgp_nexthop_cache *bnc;
@ -330,7 +330,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, afi_
/* This will return true if the global IPv6 NH is a link local /* This will return true if the global IPv6 NH is a link local
* addr */ * addr */
if (!make_prefix(afi, pi, &p, bgp_nexthop, source_pi)) if (!make_prefix(afi, pi, &p))
return 1; return 1;
/* /*
@ -667,7 +667,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
nexthop->vrf_id); nexthop->vrf_id);
if (ifp) if (ifp)
zclient_send_interface_radv_req( zclient_send_interface_radv_req(
bgp_zclient, nexthop->vrf_id, ifp, zclient, nexthop->vrf_id, ifp,
true, true,
BGP_UNNUM_DEFAULT_RA_INTERVAL); BGP_UNNUM_DEFAULT_RA_INTERVAL);
} }
@ -763,6 +763,10 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
struct interface *ifp, bool up) struct interface *ifp, bool up)
{ {
struct bgp_nexthop_cache *bnc; struct bgp_nexthop_cache *bnc;
struct nexthop *nhop;
uint16_t other_nh_count;
bool nhop_ll_found = false;
bool nhop_found = false;
if (ifp->ifindex == IFINDEX_INTERNAL) { if (ifp->ifindex == IFINDEX_INTERNAL) {
zlog_warn("%s: The interface %s ignored", __func__, ifp->name); zlog_warn("%s: The interface %s ignored", __func__, ifp->name);
@ -770,9 +774,42 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
} }
frr_each (bgp_nexthop_cache, table, bnc) { frr_each (bgp_nexthop_cache, table, bnc) {
if (bnc->ifindex_ipv6_ll != ifp->ifindex) other_nh_count = 0;
nhop_ll_found = bnc->ifindex_ipv6_ll == ifp->ifindex;
for (nhop = bnc->nexthop; nhop; nhop = nhop->next) {
if (nhop->ifindex == bnc->ifindex_ipv6_ll)
continue; continue;
if (nhop->ifindex != ifp->ifindex) {
other_nh_count++;
continue;
}
if (nhop->vrf_id != ifp->vrf->vrf_id) {
other_nh_count++;
continue;
}
nhop_found = true;
}
if (!nhop_found && !nhop_ll_found)
/* The event interface does not match the nexthop cache
* entry */
continue;
if (!up && other_nh_count > 0)
/* Down event ignored in case of multiple next-hop
* interfaces. The other might interfaces might be still
* up. The cases where all interfaces are down or a bnc
* is invalid are processed by a separate zebra rnh
* messages.
*/
continue;
if (!nhop_ll_found) {
evaluate_paths(bnc);
continue;
}
bnc->last_update = monotime(NULL); bnc->last_update = monotime(NULL);
bnc->change_flags = 0; bnc->change_flags = 0;
@ -785,6 +822,7 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp,
if (up) { if (up) {
SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID); SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED); SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED);
/* change nexthop number only for ll */
bnc->nexthop_num = 1; bnc->nexthop_num = 1;
} else { } else {
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED); UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
@ -804,9 +842,6 @@ static void bgp_nht_ifp_handle(struct interface *ifp, bool up)
if (!bgp) if (!bgp)
return; return;
if (!up)
bgp_clearing_batch_begin(bgp);
bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP], ifp, bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP], ifp,
up); up);
bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP], ifp, bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP], ifp,
@ -815,9 +850,6 @@ static void bgp_nht_ifp_handle(struct interface *ifp, bool up)
up); up);
bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP6], ifp, bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP6], ifp,
up); up);
if (!up)
bgp_clearing_batch_end_event_start(bgp);
} }
void bgp_nht_ifp_up(struct interface *ifp) void bgp_nht_ifp_up(struct interface *ifp)
@ -994,8 +1026,7 @@ void bgp_cleanup_nexthops(struct bgp *bgp)
* make_prefix - make a prefix structure from the path (essentially * make_prefix - make a prefix structure from the path (essentially
* path's node. * path's node.
*/ */
static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p, static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
struct bgp *bgp_nexthop, struct bgp_path_info *source_pi)
{ {
int is_bgp_static = ((pi->type == ZEBRA_ROUTE_BGP) int is_bgp_static = ((pi->type == ZEBRA_ROUTE_BGP)
@ -1005,19 +1036,8 @@ static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p,
struct bgp_dest *net = pi->net; struct bgp_dest *net = pi->net;
const struct prefix *p_orig = bgp_dest_get_prefix(net); const struct prefix *p_orig = bgp_dest_get_prefix(net);
struct in_addr ipv4; struct in_addr ipv4;
struct peer *peer; struct peer *peer = pi->peer;
struct attr *attr; struct attr *attr = pi->attr;
bool local_sid = false;
struct bgp *bgp = bgp_get_default();
struct prefix_ipv6 tmp_prefix;
if (source_pi) {
attr = source_pi->attr;
peer = source_pi->peer;
} else {
peer = pi->peer;
attr = pi->attr;
}
if (p_orig->family == AF_FLOWSPEC) { if (p_orig->family == AF_FLOWSPEC) {
if (!peer) if (!peer)
@ -1047,50 +1067,37 @@ static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p,
break; break;
case AFI_IP6: case AFI_IP6:
p->family = AF_INET6; p->family = AF_INET6;
if (bgp && bgp->srv6_locator && bgp->srv6_enabled && pi->attr->srv6_l3vpn) { if (attr->srv6_l3vpn) {
tmp_prefix.family = AF_INET6;
tmp_prefix.prefixlen = IPV6_MAX_BITLEN;
tmp_prefix.prefix = pi->attr->srv6_l3vpn->sid;
if (bgp_nexthop->vpn_policy[afi].tovpn_sid_locator &&
bgp_nexthop->vpn_policy[afi].tovpn_sid)
local_sid = prefix_match(&bgp_nexthop->vpn_policy[afi]
.tovpn_sid_locator->prefix,
&tmp_prefix);
else if (bgp_nexthop->tovpn_sid_locator && bgp_nexthop->tovpn_sid)
local_sid = prefix_match(&bgp_nexthop->tovpn_sid_locator->prefix,
&tmp_prefix);
}
if (local_sid == false && pi->attr->srv6_l3vpn) {
p->prefixlen = IPV6_MAX_BITLEN; p->prefixlen = IPV6_MAX_BITLEN;
if (pi->attr->srv6_l3vpn->transposition_len != 0 && if (attr->srv6_l3vpn->transposition_len != 0 &&
BGP_PATH_INFO_NUM_LABELS(pi)) { BGP_PATH_INFO_NUM_LABELS(pi)) {
IPV6_ADDR_COPY(&p->u.prefix6, &pi->attr->srv6_l3vpn->sid); IPV6_ADDR_COPY(&p->u.prefix6, &attr->srv6_l3vpn->sid);
transpose_sid(&p->u.prefix6, transpose_sid(&p->u.prefix6,
decode_label(&pi->extra->labels->label[0]), decode_label(&pi->extra->labels->label[0]),
pi->attr->srv6_l3vpn->transposition_offset, attr->srv6_l3vpn->transposition_offset,
pi->attr->srv6_l3vpn->transposition_len); attr->srv6_l3vpn->transposition_len);
} else } else
IPV6_ADDR_COPY(&(p->u.prefix6), &(pi->attr->srv6_l3vpn->sid)); IPV6_ADDR_COPY(&(p->u.prefix6), &(attr->srv6_l3vpn->sid));
} else if (is_bgp_static) { } else if (is_bgp_static) {
p->u.prefix6 = p_orig->u.prefix6; p->u.prefix6 = p_orig->u.prefix6;
p->prefixlen = p_orig->prefixlen; p->prefixlen = p_orig->prefixlen;
} else if (attr) { } else {
/* If we receive MP_REACH nexthop with ::(LL) /* If we receive MP_REACH nexthop with ::(LL)
* or LL(LL), use LL address as nexthop cache. * or LL(LL), use LL address as nexthop cache.
*/ */
if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL && if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL &&
(IN6_IS_ADDR_UNSPECIFIED(&attr->mp_nexthop_global) || (IN6_IS_ADDR_UNSPECIFIED(&attr->mp_nexthop_global) ||
IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global))) IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)))
p->u.prefix6 = attr->mp_nexthop_local; p->u.prefix6 = attr->mp_nexthop_local;
/* If we receive MR_REACH with (GA)::(LL) /* If we receive MR_REACH with (GA)::(LL)
* then check for route-map to choose GA or LL * then check for route-map to choose GA or LL
*/ */
else if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { else if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
if (CHECK_FLAG(attr->nh_flags, BGP_ATTR_NH_MP_PREFER_GLOBAL)) if (CHECK_FLAG(attr->nh_flags, BGP_ATTR_NH_MP_PREFER_GLOBAL))
p->u.prefix6 = attr->mp_nexthop_global; p->u.prefix6 = attr->mp_nexthop_global;
else else
p->u.prefix6 = attr->mp_nexthop_local; p->u.prefix6 = attr->mp_nexthop_local;
} else if (attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL && } else if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL &&
IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) { IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) {
/* If we receive MP_REACH with GUA as LL, we should /* If we receive MP_REACH with GUA as LL, we should
* check if we have Link-Local Next Hop capability also. * check if we have Link-Local Next Hop capability also.
@ -1131,11 +1138,11 @@ static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p,
*/ */
static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command) static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
{ {
bool match_p = false; bool exact_match = false;
bool resolve_via_default = false; bool resolve_via_default = false;
int ret; int ret;
if (!bgp_zclient) if (!zclient)
return; return;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -1155,7 +1162,7 @@ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
} }
if (command == ZEBRA_NEXTHOP_REGISTER) { if (command == ZEBRA_NEXTHOP_REGISTER) {
if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED))
match_p = true; exact_match = true;
if (CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH)) if (CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH))
resolve_via_default = true; resolve_via_default = true;
} }
@ -1165,8 +1172,8 @@ static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
zserv_command_string(command), &bnc->prefix, zserv_command_string(command), &bnc->prefix,
bnc->bgp->name_pretty); bnc->bgp->name_pretty);
ret = zclient_send_rnh(bgp_zclient, command, &bnc->prefix, SAFI_UNICAST, ret = zclient_send_rnh(zclient, command, &bnc->prefix, SAFI_UNICAST,
match_p, resolve_via_default, exact_match, resolve_via_default,
bnc->bgp->vrf_id); bnc->bgp->vrf_id);
if (ret == ZCLIENT_SEND_FAILURE) { if (ret == ZCLIENT_SEND_FAILURE) {
flog_warn(EC_BGP_ZEBRA_SEND, flog_warn(EC_BGP_ZEBRA_SEND,
@ -1593,7 +1600,7 @@ void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
if (!ifp) if (!ifp)
continue; continue;
zclient_send_interface_radv_req(bgp_zclient, zclient_send_interface_radv_req(zclient,
nhop->vrf_id, nhop->vrf_id,
ifp, true, ifp, true,
BGP_UNNUM_DEFAULT_RA_INTERVAL); BGP_UNNUM_DEFAULT_RA_INTERVAL);
@ -1643,7 +1650,7 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
if (!ifp) if (!ifp)
continue; continue;
zclient_send_interface_radv_req(bgp_zclient, nhop->vrf_id, ifp, 0, zclient_send_interface_radv_req(zclient, nhop->vrf_id, ifp, 0,
0); 0);
} }
} }

View file

@ -25,10 +25,11 @@ extern void bgp_nexthop_update(struct vrf *vrf, struct prefix *match,
* peer - The BGP peer associated with this NHT * peer - The BGP peer associated with this NHT
* connected - True if NH MUST be a connected route * connected - True if NH MUST be a connected route
*/ */
extern int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, afi_t a, extern int bgp_find_or_add_nexthop(struct bgp *bgp_route,
safi_t safi, struct bgp_path_info *p, struct peer *peer, struct bgp *bgp_nexthop, afi_t a,
int connected, const struct prefix *orig_prefix, safi_t safi, struct bgp_path_info *p,
struct bgp_path_info *source_pi); struct peer *peer, int connected,
const struct prefix *orig_prefix);
/** /**
* bgp_unlink_nexthop() - Unlink the nexthop object from the path structure. * bgp_unlink_nexthop() - Unlink the nexthop object from the path structure.

View file

@ -613,7 +613,7 @@ void bgp_generate_updgrp_packets(struct event *thread)
/* /*
* Creates a BGP Keepalive packet and appends it to the peer's output queue. * Creates a BGP Keepalive packet and appends it to the peer's output queue.
*/ */
void bgp_keepalive_send(struct peer_connection *connection) void bgp_keepalive_send(struct peer *peer)
{ {
struct stream *s; struct stream *s;
@ -628,13 +628,13 @@ void bgp_keepalive_send(struct peer_connection *connection)
/* Dump packet if debug option is set. */ /* Dump packet if debug option is set. */
/* bgp_packet_dump (s); */ /* bgp_packet_dump (s); */
if (bgp_debug_keepalive(connection->peer)) if (bgp_debug_keepalive(peer))
zlog_debug("%s sending KEEPALIVE", connection->peer->host); zlog_debug("%s sending KEEPALIVE", peer->host);
/* Add packet to the peer. */ /* Add packet to the peer. */
bgp_packet_add(connection, connection->peer, s); bgp_packet_add(peer->connection, peer, s);
bgp_writes_on(connection); bgp_writes_on(peer->connection);
} }
struct stream *bgp_open_make(struct peer *peer, uint16_t send_holdtime, as_t local_as, struct stream *bgp_open_make(struct peer *peer, uint16_t send_holdtime, as_t local_as,
@ -658,12 +658,17 @@ struct stream *bgp_open_make(struct peer *peer, uint16_t send_holdtime, as_t loc
ext_opt_params = true; ext_opt_params = true;
(void)bgp_open_capability(s, peer, ext_opt_params); (void)bgp_open_capability(s, peer, ext_opt_params);
} else { } else {
size_t endp = stream_get_endp(s); struct stream *tmp = stream_new(STREAM_SIZE(s));
if (bgp_open_capability(s, peer, ext_opt_params) > BGP_OPEN_NON_EXT_OPT_LEN) { stream_copy(tmp, s);
stream_set_endp(s, endp); if (bgp_open_capability(tmp, peer, ext_opt_params) >
BGP_OPEN_NON_EXT_OPT_LEN) {
stream_free(tmp);
ext_opt_params = true; ext_opt_params = true;
(void)bgp_open_capability(s, peer, ext_opt_params); (void)bgp_open_capability(s, peer, ext_opt_params);
} else {
stream_copy(s, tmp);
stream_free(tmp);
} }
} }
@ -1038,13 +1043,6 @@ static void bgp_notify_send_internal(struct peer_connection *connection,
/* Add packet to peer's output queue */ /* Add packet to peer's output queue */
stream_fifo_push(connection->obuf, s); stream_fifo_push(connection->obuf, s);
/* If Graceful-Restart N-bit (Notification) is exchanged,
* and it's not a Hard Reset, let's retain the routes.
*/
if (bgp_has_graceful_restart_notification(peer) && !hard_reset &&
CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE))
SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
bgp_peer_gr_flags_update(peer); bgp_peer_gr_flags_update(peer);
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(peer->bgp, BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(peer->bgp,
peer->bgp->peer); peer->bgp->peer);
@ -3148,6 +3146,8 @@ static void bgp_dynamic_capability_paths_limit(uint8_t *pnt, int action,
SET_FLAG(peer->cap, PEER_CAP_PATHS_LIMIT_RCV); SET_FLAG(peer->cap, PEER_CAP_PATHS_LIMIT_RCV);
while (data + CAPABILITY_CODE_PATHS_LIMIT_LEN <= end) { while (data + CAPABILITY_CODE_PATHS_LIMIT_LEN <= end) {
afi_t afi;
safi_t safi;
iana_afi_t pkt_afi; iana_afi_t pkt_afi;
iana_safi_t pkt_safi; iana_safi_t pkt_safi;
uint16_t paths_limit = 0; uint16_t paths_limit = 0;
@ -3506,6 +3506,8 @@ static void bgp_dynamic_capability_llgr(uint8_t *pnt, int action,
SET_FLAG(peer->cap, PEER_CAP_LLGR_RCV); SET_FLAG(peer->cap, PEER_CAP_LLGR_RCV);
while (data + BGP_CAP_LLGR_MIN_PACKET_LEN <= end) { while (data + BGP_CAP_LLGR_MIN_PACKET_LEN <= end) {
afi_t afi;
safi_t safi;
iana_afi_t pkt_afi; iana_afi_t pkt_afi;
iana_safi_t pkt_safi; iana_safi_t pkt_safi;
struct graceful_restart_af graf; struct graceful_restart_af graf;
@ -3612,6 +3614,8 @@ static void bgp_dynamic_capability_graceful_restart(uint8_t *pnt, int action,
while (data + GRACEFUL_RESTART_CAPABILITY_PER_AFI_SAFI_SIZE <= while (data + GRACEFUL_RESTART_CAPABILITY_PER_AFI_SAFI_SIZE <=
end) { end) {
afi_t afi;
safi_t safi;
iana_afi_t pkt_afi; iana_afi_t pkt_afi;
iana_safi_t pkt_safi; iana_safi_t pkt_safi;
struct graceful_restart_af graf; struct graceful_restart_af graf;
@ -3968,18 +3972,6 @@ int bgp_capability_receive(struct peer_connection *connection,
* would not, making event flow difficult to understand. Please think twice * would not, making event flow difficult to understand. Please think twice
* before hacking this. * before hacking this.
* *
* packet_processing is now a FIFO of connections that need to be handled
* This loop has a maximum run of 100(BGP_PACKET_PROCESS_LIMIT) packets,
* but each individual connection can only handle the quanta value as
* specified in bgp_vty.c. If the connection still has work to do, place it
* back on the back of the queue for more work. Do note that event_should_yield
* is also being called to figure out if processing should stop and work
* picked up after other items can run. This was added *After* withdrawals
* started being processed at scale and this function was taking cpu for 40+ seconds
* On my machine we are getting 2-3 packets before a yield should happen in the
* update case. Withdrawal is 1 packet being processed(note this is a very very
* fast computer) before other items should be run.
*
* Thread type: EVENT_EVENT * Thread type: EVENT_EVENT
* @param thread * @param thread
* @return 0 * @return 0
@ -3992,54 +3984,31 @@ void bgp_process_packet(struct event *thread)
uint32_t rpkt_quanta_old; // how many packets to read uint32_t rpkt_quanta_old; // how many packets to read
int fsm_update_result; // return code of bgp_event_update() int fsm_update_result; // return code of bgp_event_update()
int mprc; // message processing return code int mprc; // message processing return code
uint32_t processed = 0, curr_connection_processed = 0;
bool more_work = false;
size_t count;
uint32_t total_packets_to_process;
frr_with_mutex (&bm->peer_connection_mtx) connection = EVENT_ARG(thread);
connection = peer_connection_fifo_pop(&bm->connection_fifo);
if (!connection)
goto done;
total_packets_to_process = BGP_PACKET_PROCESS_LIMIT;
peer = connection->peer; peer = connection->peer;
rpkt_quanta_old = atomic_load_explicit(&peer->bgp->rpkt_quanta, rpkt_quanta_old = atomic_load_explicit(&peer->bgp->rpkt_quanta,
memory_order_relaxed); memory_order_relaxed);
fsm_update_result = 0; fsm_update_result = 0;
while ((processed < total_packets_to_process) && connection) {
/* Guard against scheduled events that occur after peer deletion. */ /* Guard against scheduled events that occur after peer deletion. */
if (connection->status == Deleted || connection->status == Clearing) { if (connection->status == Deleted || connection->status == Clearing)
frr_with_mutex (&bm->peer_connection_mtx) return;
connection = peer_connection_fifo_pop(&bm->connection_fifo);
if (connection) unsigned int processed = 0;
peer = connection->peer;
continue;
}
while (processed < rpkt_quanta_old) {
uint8_t type = 0; uint8_t type = 0;
bgp_size_t size; bgp_size_t size;
char notify_data_length[2]; char notify_data_length[2];
frr_with_mutex (&connection->io_mtx) frr_with_mutex (&connection->io_mtx) {
peer->curr = stream_fifo_pop(connection->ibuf); peer->curr = stream_fifo_pop(connection->ibuf);
if (peer->curr == NULL) {
frr_with_mutex (&bm->peer_connection_mtx)
connection = peer_connection_fifo_pop(&bm->connection_fifo);
if (connection)
peer = connection->peer;
continue;
} }
if (peer->curr == NULL) // no packets to process, hmm...
return;
/* skip the marker and copy the packet length */ /* skip the marker and copy the packet length */
stream_forward_getp(peer->curr, BGP_MARKER_SIZE); stream_forward_getp(peer->curr, BGP_MARKER_SIZE);
memcpy(notify_data_length, stream_pnt(peer->curr), 2); memcpy(notify_data_length, stream_pnt(peer->curr), 2);
@ -4142,81 +4111,32 @@ void bgp_process_packet(struct event *thread)
stream_free(peer->curr); stream_free(peer->curr);
peer->curr = NULL; peer->curr = NULL;
processed++; processed++;
curr_connection_processed++;
/* Update FSM */ /* Update FSM */
if (mprc != BGP_PACKET_NOOP) if (mprc != BGP_PACKET_NOOP)
fsm_update_result = bgp_event_update(connection, mprc); fsm_update_result = bgp_event_update(connection, mprc);
else
continue;
/* /*
* If peer was deleted, do not process any more packets. This * If peer was deleted, do not process any more packets. This
* is usually due to executing BGP_Stop or a stub deletion. * is usually due to executing BGP_Stop or a stub deletion.
*/ */
if (fsm_update_result == FSM_PEER_TRANSFERRED || if (fsm_update_result == FSM_PEER_TRANSFERRED
fsm_update_result == FSM_PEER_STOPPED) { || fsm_update_result == FSM_PEER_STOPPED)
frr_with_mutex (&bm->peer_connection_mtx) break;
connection = peer_connection_fifo_pop(&bm->connection_fifo);
if (connection)
peer = connection->peer;
continue;
}
bool yield = event_should_yield(thread);
if (curr_connection_processed >= rpkt_quanta_old || yield) {
curr_connection_processed = 0;
frr_with_mutex (&bm->peer_connection_mtx) {
if (!peer_connection_fifo_member(&bm->connection_fifo, connection))
peer_connection_fifo_add_tail(&bm->connection_fifo,
connection);
if (!yield)
connection = peer_connection_fifo_pop(&bm->connection_fifo);
else
connection = NULL;
}
if (connection)
peer = connection->peer;
continue;
} }
if (fsm_update_result != FSM_PEER_TRANSFERRED
&& fsm_update_result != FSM_PEER_STOPPED) {
frr_with_mutex (&connection->io_mtx) { frr_with_mutex (&connection->io_mtx) {
// more work to do, come back later
if (connection->ibuf->count > 0) if (connection->ibuf->count > 0)
more_work = true; event_add_event(bm->master, bgp_process_packet,
else connection, 0,
more_work = false; &connection->t_process_packet);
}
if (!more_work) {
frr_with_mutex (&bm->peer_connection_mtx)
connection = peer_connection_fifo_pop(&bm->connection_fifo);
if (connection)
peer = connection->peer;
} }
} }
if (connection) {
frr_with_mutex (&connection->io_mtx) {
if (connection->ibuf->count > 0)
more_work = true;
else
more_work = false;
}
frr_with_mutex (&bm->peer_connection_mtx) {
if (more_work &&
!peer_connection_fifo_member(&bm->connection_fifo, connection))
peer_connection_fifo_add_tail(&bm->connection_fifo, connection);
}
}
done:
frr_with_mutex (&bm->peer_connection_mtx)
count = peer_connection_fifo_count(&bm->connection_fifo);
if (count)
event_add_event(bm->master, bgp_process_packet, NULL, 0, &bm->e_process_packet);
} }
/* Send EOR when routes are processed by selection deferral timer */ /* Send EOR when routes are processed by selection deferral timer */
@ -4229,3 +4149,37 @@ void bgp_send_delayed_eor(struct bgp *bgp)
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer))
bgp_write_proceed_actions(peer); bgp_write_proceed_actions(peer);
} }
/*
* Task callback to handle socket error encountered in the io pthread. We avoid
* having the io pthread try to enqueue fsm events or mess with the peer
* struct.
*/
void bgp_packet_process_error(struct event *thread)
{
struct peer_connection *connection;
struct peer *peer;
int code;
connection = EVENT_ARG(thread);
peer = connection->peer;
code = EVENT_VAL(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP error %d on fd %d", peer->host, code,
connection->fd);
/* Closed connection or error on the socket */
if (peer_established(connection)) {
if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART)
|| CHECK_FLAG(peer->flags,
PEER_FLAG_GRACEFUL_RESTART_HELPER))
&& CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) {
peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
} else
peer->last_reset = PEER_DOWN_CLOSE_SESSION;
}
bgp_event_update(connection, code);
}

View file

@ -48,7 +48,7 @@ DECLARE_HOOK(bgp_packet_send,
} while (0) } while (0)
/* Packet send and receive function prototypes. */ /* Packet send and receive function prototypes. */
extern void bgp_keepalive_send(struct peer_connection *connection); extern void bgp_keepalive_send(struct peer *peer);
extern struct stream *bgp_open_make(struct peer *peer, uint16_t send_holdtime, as_t local_as, extern struct stream *bgp_open_make(struct peer *peer, uint16_t send_holdtime, as_t local_as,
struct in_addr *id); struct in_addr *id);
extern void bgp_open_send(struct peer_connection *connection); extern void bgp_open_send(struct peer_connection *connection);
@ -82,6 +82,8 @@ extern void bgp_process_packet(struct event *event);
extern void bgp_send_delayed_eor(struct bgp *bgp); extern void bgp_send_delayed_eor(struct bgp *bgp);
/* Task callback to handle socket error encountered in the io pthread */
void bgp_packet_process_error(struct event *thread);
extern struct bgp_notify extern struct bgp_notify
bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify); bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify);
extern bool bgp_has_graceful_restart_notification(struct peer *peer); extern bool bgp_has_graceful_restart_notification(struct peer *peer);

View file

@ -279,13 +279,6 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp,
static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add); static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add);
static void bgp_pbr_val_mask_free(void *arg)
{
struct bgp_pbr_val_mask *pbr_val_mask = arg;
XFREE(MTYPE_PBR_VALMASK, pbr_val_mask);
}
static bool bgp_pbr_extract_enumerate_unary_opposite( static bool bgp_pbr_extract_enumerate_unary_opposite(
uint8_t unary_operator, uint8_t unary_operator,
struct bgp_pbr_val_mask *and_valmask, struct bgp_pbr_val_mask *and_valmask,
@ -449,7 +442,7 @@ static bool bgp_pbr_extract(struct bgp_pbr_match_val list[],
struct bgp_pbr_range_port *range) struct bgp_pbr_range_port *range)
{ {
int i = 0; int i = 0;
bool match_p = false; bool exact_match = false;
if (range) if (range)
memset(range, 0, sizeof(struct bgp_pbr_range_port)); memset(range, 0, sizeof(struct bgp_pbr_range_port));
@ -464,9 +457,9 @@ static bool bgp_pbr_extract(struct bgp_pbr_match_val list[],
OPERATOR_COMPARE_EQUAL_TO)) { OPERATOR_COMPARE_EQUAL_TO)) {
if (range) if (range)
range->min_port = list[i].value; range->min_port = list[i].value;
match_p = true; exact_match = true;
} }
if (match_p && i > 0) if (exact_match && i > 0)
return false; return false;
if (list[i].compare_operator == if (list[i].compare_operator ==
(OPERATOR_COMPARE_GREATER_THAN + (OPERATOR_COMPARE_GREATER_THAN +
@ -972,12 +965,7 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p,
return 0; return 0;
} }
static void bgp_pbr_match_entry_free(struct bgp_pbr_match_entry *bpme) static void bgp_pbr_match_entry_free(void *arg)
{
XFREE(MTYPE_PBR_MATCH_ENTRY, bpme);
}
static void bgp_pbr_match_entry_hash_free(void *arg)
{ {
struct bgp_pbr_match_entry *bpme; struct bgp_pbr_match_entry *bpme;
@ -988,21 +976,16 @@ static void bgp_pbr_match_entry_hash_free(void *arg)
bpme->installed = false; bpme->installed = false;
bpme->backpointer = NULL; bpme->backpointer = NULL;
} }
bgp_pbr_match_entry_free(bpme); XFREE(MTYPE_PBR_MATCH_ENTRY, bpme);
} }
static void bgp_pbr_match_free(struct bgp_pbr_match *bpm) static void bgp_pbr_match_free(void *arg)
{
XFREE(MTYPE_PBR_MATCH, bpm);
}
static void bgp_pbr_match_hash_free(void *arg)
{ {
struct bgp_pbr_match *bpm; struct bgp_pbr_match *bpm;
bpm = (struct bgp_pbr_match *)arg; bpm = (struct bgp_pbr_match *)arg;
hash_clean(bpm->entry_hash, bgp_pbr_match_entry_hash_free); hash_clean(bpm->entry_hash, bgp_pbr_match_entry_free);
if (hashcount(bpm->entry_hash) == 0) { if (hashcount(bpm->entry_hash) == 0) {
/* delete iptable entry first */ /* delete iptable entry first */
@ -1021,7 +1004,7 @@ static void bgp_pbr_match_hash_free(void *arg)
} }
hash_clean_and_free(&bpm->entry_hash, NULL); hash_clean_and_free(&bpm->entry_hash, NULL);
bgp_pbr_match_free(bpm); XFREE(MTYPE_PBR_MATCH, bpm);
} }
static void *bgp_pbr_match_alloc_intern(void *arg) static void *bgp_pbr_match_alloc_intern(void *arg)
@ -1036,12 +1019,7 @@ static void *bgp_pbr_match_alloc_intern(void *arg)
return new; return new;
} }
static void bgp_pbr_rule_free(struct bgp_pbr_rule *pbr) static void bgp_pbr_rule_free(void *arg)
{
XFREE(MTYPE_PBR_RULE, pbr);
}
static void bgp_pbr_rule_hash_free(void *arg)
{ {
struct bgp_pbr_rule *bpr; struct bgp_pbr_rule *bpr;
@ -1054,7 +1032,7 @@ static void bgp_pbr_rule_hash_free(void *arg)
bpr->action->refcnt--; bpr->action->refcnt--;
bpr->action = NULL; bpr->action = NULL;
} }
bgp_pbr_rule_free(bpr); XFREE(MTYPE_PBR_RULE, bpr);
} }
static void *bgp_pbr_rule_alloc_intern(void *arg) static void *bgp_pbr_rule_alloc_intern(void *arg)
@ -1394,8 +1372,8 @@ struct bgp_pbr_match *bgp_pbr_match_iptable_lookup(vrf_id_t vrf_id,
void bgp_pbr_cleanup(struct bgp *bgp) void bgp_pbr_cleanup(struct bgp *bgp)
{ {
hash_clean_and_free(&bgp->pbr_match_hash, bgp_pbr_match_hash_free); hash_clean_and_free(&bgp->pbr_match_hash, bgp_pbr_match_free);
hash_clean_and_free(&bgp->pbr_rule_hash, bgp_pbr_rule_hash_free); hash_clean_and_free(&bgp->pbr_rule_hash, bgp_pbr_rule_free);
hash_clean_and_free(&bgp->pbr_action_hash, bgp_pbr_action_free); hash_clean_and_free(&bgp->pbr_action_hash, bgp_pbr_action_free);
if (bgp->bgp_pbr_cfg == NULL) if (bgp->bgp_pbr_cfg == NULL)
@ -1678,8 +1656,6 @@ static void bgp_pbr_flush_iprule(struct bgp *bgp, struct bgp_pbr_action *bpa,
} }
} }
hash_release(bgp->pbr_rule_hash, bpr); hash_release(bgp->pbr_rule_hash, bpr);
bgp_pbr_rule_free(bpr);
bgp_pbr_bpa_remove(bpa); bgp_pbr_bpa_remove(bpa);
} }
@ -1709,7 +1685,6 @@ static void bgp_pbr_flush_entry(struct bgp *bgp, struct bgp_pbr_action *bpa,
} }
} }
hash_release(bpm->entry_hash, bpme); hash_release(bpm->entry_hash, bpme);
bgp_pbr_match_entry_free(bpme);
if (hashcount(bpm->entry_hash) == 0) { if (hashcount(bpm->entry_hash) == 0) {
/* delete iptable entry first */ /* delete iptable entry first */
/* then delete ipset match */ /* then delete ipset match */
@ -1725,7 +1700,6 @@ static void bgp_pbr_flush_entry(struct bgp *bgp, struct bgp_pbr_action *bpa,
bpm->action = NULL; bpm->action = NULL;
} }
hash_release(bgp->pbr_match_hash, bpm); hash_release(bgp->pbr_match_hash, bpm);
bgp_pbr_match_free(bpm);
/* XXX release pbr_match_action if not used /* XXX release pbr_match_action if not used
* note that drop does not need to call send_pbr_action * note that drop does not need to call send_pbr_action
*/ */
@ -2137,6 +2111,17 @@ static void bgp_pbr_policyroute_remove_from_zebra(
bgp, path, bpf, bpof, FLOWSPEC_ICMP_TYPE); bgp, path, bpf, bpof, FLOWSPEC_ICMP_TYPE);
else else
bgp_pbr_policyroute_remove_from_zebra_unit(bgp, path, bpf); bgp_pbr_policyroute_remove_from_zebra_unit(bgp, path, bpf);
/* flush bpof */
if (bpof->tcpflags)
list_delete_all_node(bpof->tcpflags);
if (bpof->dscp)
list_delete_all_node(bpof->dscp);
if (bpof->flowlabel)
list_delete_all_node(bpof->flowlabel);
if (bpof->pkt_len)
list_delete_all_node(bpof->pkt_len);
if (bpof->fragment)
list_delete_all_node(bpof->fragment);
} }
static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add) static void bgp_pbr_dump_entry(struct bgp_pbr_filter *bpf, bool add)
@ -2621,6 +2606,19 @@ static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
bgp, path, bpf, bpof, nh, rate, FLOWSPEC_ICMP_TYPE); bgp, path, bpf, bpof, nh, rate, FLOWSPEC_ICMP_TYPE);
else else
bgp_pbr_policyroute_add_to_zebra_unit(bgp, path, bpf, nh, rate); bgp_pbr_policyroute_add_to_zebra_unit(bgp, path, bpf, nh, rate);
/* flush bpof */
if (bpof->tcpflags)
list_delete_all_node(bpof->tcpflags);
if (bpof->dscp)
list_delete_all_node(bpof->dscp);
if (bpof->pkt_len)
list_delete_all_node(bpof->pkt_len);
if (bpof->fragment)
list_delete_all_node(bpof->fragment);
if (bpof->icmp_type)
list_delete_all_node(bpof->icmp_type);
if (bpof->icmp_code)
list_delete_all_node(bpof->icmp_code);
} }
static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path, static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
@ -2686,7 +2684,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
srcp = &range; srcp = &range;
else { else {
bpof.icmp_type = list_new(); bpof.icmp_type = list_new();
bpof.icmp_type->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->icmp_type, bgp_pbr_extract_enumerate(api->icmp_type,
api->match_icmp_type_num, api->match_icmp_type_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
@ -2702,7 +2699,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
dstp = &range_icmp_code; dstp = &range_icmp_code;
else { else {
bpof.icmp_code = list_new(); bpof.icmp_code = list_new();
bpof.icmp_code->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->icmp_code, bgp_pbr_extract_enumerate(api->icmp_code,
api->match_icmp_code_num, api->match_icmp_code_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
@ -2723,7 +2719,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
FLOWSPEC_TCP_FLAGS); FLOWSPEC_TCP_FLAGS);
} else if (kind_enum == OPERATOR_UNARY_OR) { } else if (kind_enum == OPERATOR_UNARY_OR) {
bpof.tcpflags = list_new(); bpof.tcpflags = list_new();
bpof.tcpflags->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->tcpflags, bgp_pbr_extract_enumerate(api->tcpflags,
api->match_tcpflags_num, api->match_tcpflags_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
@ -2741,7 +2736,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
bpf.pkt_len = &pkt_len; bpf.pkt_len = &pkt_len;
else { else {
bpof.pkt_len = list_new(); bpof.pkt_len = list_new();
bpof.pkt_len->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->packet_length, bgp_pbr_extract_enumerate(api->packet_length,
api->match_packet_length_num, api->match_packet_length_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
@ -2751,14 +2745,12 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
} }
if (api->match_dscp_num >= 1) { if (api->match_dscp_num >= 1) {
bpof.dscp = list_new(); bpof.dscp = list_new();
bpof.dscp->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->dscp, api->match_dscp_num, bgp_pbr_extract_enumerate(api->dscp, api->match_dscp_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
bpof.dscp, FLOWSPEC_DSCP); bpof.dscp, FLOWSPEC_DSCP);
} }
if (api->match_fragment_num) { if (api->match_fragment_num) {
bpof.fragment = list_new(); bpof.fragment = list_new();
bpof.fragment->del = bgp_pbr_val_mask_free;
bgp_pbr_extract_enumerate(api->fragment, bgp_pbr_extract_enumerate(api->fragment,
api->match_fragment_num, api->match_fragment_num,
OPERATOR_UNARY_OR, OPERATOR_UNARY_OR,
@ -2774,7 +2766,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
bpf.family = afi2family(api->afi); bpf.family = afi2family(api->afi);
if (!add) { if (!add) {
bgp_pbr_policyroute_remove_from_zebra(bgp, path, &bpf, &bpof); bgp_pbr_policyroute_remove_from_zebra(bgp, path, &bpf, &bpof);
goto flush_bpof; return;
} }
/* no action for add = true */ /* no action for add = true */
for (i = 0; i < api->action_num; i++) { for (i = 0; i < api->action_num; i++) {
@ -2852,22 +2844,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
if (continue_loop == 0) if (continue_loop == 0)
break; break;
} }
flush_bpof:
if (bpof.tcpflags)
list_delete(&bpof.tcpflags);
if (bpof.dscp)
list_delete(&bpof.dscp);
if (bpof.flowlabel)
list_delete(&bpof.flowlabel);
if (bpof.pkt_len)
list_delete(&bpof.pkt_len);
if (bpof.fragment)
list_delete(&bpof.fragment);
if (bpof.icmp_type)
list_delete(&bpof.icmp_type);
if (bpof.icmp_code)
list_delete(&bpof.icmp_code);
} }
void bgp_pbr_update_entry(struct bgp *bgp, const struct prefix *p, void bgp_pbr_update_entry(struct bgp *bgp, const struct prefix *p,

View file

@ -151,6 +151,8 @@ struct bgp_pbr_config {
bool pbr_interface_any_ipv6; bool pbr_interface_any_ipv6;
}; };
extern struct bgp_pbr_config *bgp_pbr_cfg;
struct bgp_pbr_rule { struct bgp_pbr_rule {
uint32_t flags; uint32_t flags;
struct prefix src; struct prefix src;

View file

@ -80,8 +80,6 @@
DEFINE_MTYPE_STATIC(BGPD, BGP_EOIU_MARKER_INFO, "BGP EOIU Marker info"); DEFINE_MTYPE_STATIC(BGPD, BGP_EOIU_MARKER_INFO, "BGP EOIU Marker info");
DEFINE_MTYPE_STATIC(BGPD, BGP_METAQ, "BGP MetaQ"); DEFINE_MTYPE_STATIC(BGPD, BGP_METAQ, "BGP MetaQ");
/* Memory for batched clearing of peers from the RIB */
DEFINE_MTYPE(BGPD, CLEARING_BATCH, "Clearing batch");
DEFINE_HOOK(bgp_snmp_update_stats, DEFINE_HOOK(bgp_snmp_update_stats,
(struct bgp_dest *rn, struct bgp_path_info *pi, bool added), (struct bgp_dest *rn, struct bgp_path_info *pi, bool added),
@ -119,8 +117,6 @@ static const struct message bgp_pmsi_tnltype_str[] = {
#define VRFID_NONE_STR "-" #define VRFID_NONE_STR "-"
#define SOFT_RECONFIG_TASK_MAX_PREFIX 25000 #define SOFT_RECONFIG_TASK_MAX_PREFIX 25000
static int clear_batch_rib_helper(struct bgp_clearing_info *cinfo);
static inline char *bgp_route_dump_path_info_flags(struct bgp_path_info *pi, static inline char *bgp_route_dump_path_info_flags(struct bgp_path_info *pi,
char *buf, size_t len) char *buf, size_t len)
{ {
@ -2625,32 +2621,15 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
bgp_peer_remove_private_as(bgp, afi, safi, peer, attr); bgp_peer_remove_private_as(bgp, afi, safi, peer, attr);
bgp_peer_as_override(bgp, afi, safi, peer, attr); bgp_peer_as_override(bgp, afi, safi, peer, attr);
/* draft-ietf-idr-deprecate-as-set-confed-set-16 */ /* draft-ietf-idr-deprecate-as-set-confed-set
if (peer->bgp->reject_as_sets && aspath_check_as_sets(attr->aspath)) { * Filter routes having AS_SET or AS_CONFED_SET in the path.
struct aspath *aspath_new; * Eventually, This document (if approved) updates RFC 4271
* and RFC 5065 by eliminating AS_SET and AS_CONFED_SET types,
/* An aggregate prefix MUST NOT be announced to the contributing ASes */ * and obsoletes RFC 6472.
if (pi->sub_type == BGP_ROUTE_AGGREGATE &&
aspath_loop_check(attr->aspath, peer->as)) {
zlog_warn("%pBP [Update:SEND] %pFX is filtered by `bgp reject-as-sets`",
peer, p);
return false;
}
/* When aggregating prefixes, network operators MUST use consistent brief
* aggregation as described in Section 5.2. In consistent brief aggregation,
* the AGGREGATOR and ATOMIC_AGGREGATE Path Attributes are included, but the
* AS_PATH does not have AS_SET or AS_CONFED_SET path segment types.
* The ATOMIC_AGGREGATE Path Attribute is subsequently attached to the BGP
* route, if AS_SETs are dropped.
*/ */
if (attr->aspath->refcnt) if (peer->bgp->reject_as_sets)
aspath_new = aspath_dup(attr->aspath); if (aspath_check_as_sets(attr->aspath))
else return false;
aspath_new = attr->aspath;
attr->aspath = aspath_delete_as_set_seq(aspath_new);
}
/* If neighbor soo is configured, then check if the route has /* If neighbor soo is configured, then check if the route has
* SoO extended community and validate against the configured * SoO extended community and validate against the configured
@ -3285,8 +3264,11 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
if (worse->prev) if (worse->prev)
worse->prev->next = first; worse->prev->next = first;
first->next = worse; first->next = worse;
if (worse) {
first->prev = worse->prev; first->prev = worse->prev;
worse->prev = first; worse->prev = first;
} else
first->prev = NULL;
if (dest->info == worse) { if (dest->info == worse) {
bgp_dest_set_bgp_path_info(dest, first); bgp_dest_set_bgp_path_info(dest, first);
@ -3410,14 +3392,13 @@ void subgroup_process_announce_selected(struct update_subgroup *subgrp,
safi_t safi, uint32_t addpath_tx_id) safi_t safi, uint32_t addpath_tx_id)
{ {
const struct prefix *p; const struct prefix *p;
struct peer *onlypeer, *peer; struct peer *onlypeer;
struct attr attr = { 0 }, *pattr = &attr; struct attr attr = { 0 }, *pattr = &attr;
struct bgp *bgp; struct bgp *bgp;
bool advertise; bool advertise;
p = bgp_dest_get_prefix(dest); p = bgp_dest_get_prefix(dest);
bgp = SUBGRP_INST(subgrp); bgp = SUBGRP_INST(subgrp);
peer = SUBGRP_PEER(subgrp);
onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
: NULL); : NULL);
@ -3452,26 +3433,6 @@ void subgroup_process_announce_selected(struct update_subgroup *subgrp,
pattr, pattr,
selected)) selected))
bgp_attr_flush(pattr); bgp_attr_flush(pattr);
/* Remove paths from Adj-RIB-Out if it's not a best (selected) path.
* Why should we keep Adj-RIB-Out with stale paths?
*/
if (!bgp_addpath_encode_tx(peer, afi, safi)) {
struct bgp_adj_out *adj, *adj_next;
RB_FOREACH_SAFE (adj, bgp_adj_out_rb,
&dest->adj_out, adj_next) {
if (adj->subgroup != subgrp)
continue;
if (!adj->adv &&
adj->addpath_tx_id != addpath_tx_id) {
bgp_adj_out_unset_subgroup(dest,
subgrp, 1,
adj->addpath_tx_id);
}
}
}
} else { } else {
bgp_adj_out_unset_subgroup( bgp_adj_out_unset_subgroup(
dest, subgrp, 1, addpath_tx_id); dest, subgrp, 1, addpath_tx_id);
@ -4201,30 +4162,12 @@ static wq_item_status meta_queue_process(struct work_queue *dummy, void *data)
{ {
struct meta_queue *mq = data; struct meta_queue *mq = data;
uint32_t i; uint32_t i;
uint32_t peers_on_fifo;
static uint32_t total_runs = 0;
total_runs++;
frr_with_mutex (&bm->peer_connection_mtx)
peers_on_fifo = peer_connection_fifo_count(&bm->connection_fifo);
/*
* If the number of peers on the fifo is greater than 10
* let's yield this run of the MetaQ to allow the packet processing to make
* progress against the incoming packets. But we should also
* attempt to allow this to run occassionally. Let's run
* something every 10 attempts to process the work queue.
*/
if (peers_on_fifo > 10 && total_runs % 10 != 0)
return WQ_QUEUE_BLOCKED;
for (i = 0; i < MQ_SIZE; i++) for (i = 0; i < MQ_SIZE; i++)
if (process_subq(mq->subq[i], i)) { if (process_subq(mq->subq[i], i)) {
mq->size--; mq->size--;
break; break;
} }
return mq->size ? WQ_REQUEUE : WQ_SUCCESS; return mq->size ? WQ_REQUEUE : WQ_SUCCESS;
} }
@ -4341,14 +4284,9 @@ static void early_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *
struct bgp_dest *dest; struct bgp_dest *dest;
while (!STAILQ_EMPTY(l)) { while (!STAILQ_EMPTY(l)) {
struct bgp_table *table;
dest = STAILQ_FIRST(l); dest = STAILQ_FIRST(l);
STAILQ_REMOVE_HEAD(l, pq); STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
table = bgp_dest_table(dest);
bgp_table_unlock(table);
mq->size--; mq->size--;
} }
} }
@ -4359,14 +4297,9 @@ static void other_meta_queue_free(struct meta_queue *mq, struct bgp_dest_queue *
struct bgp_dest *dest; struct bgp_dest *dest;
while (!STAILQ_EMPTY(l)) { while (!STAILQ_EMPTY(l)) {
struct bgp_table *table;
dest = STAILQ_FIRST(l); dest = STAILQ_FIRST(l);
STAILQ_REMOVE_HEAD(l, pq); STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */ STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
table = bgp_dest_table(dest);
bgp_table_unlock(table);
mq->size--; mq->size--;
} }
} }
@ -4937,7 +4870,6 @@ bgp_update_nexthop_reachability_check(struct bgp *bgp, struct peer *peer, struct
{ {
bool connected; bool connected;
afi_t nh_afi; afi_t nh_afi;
struct bgp_path_info *bpi_ultimate = NULL;
if (((afi == AFI_IP || afi == AFI_IP6) && if (((afi == AFI_IP || afi == AFI_IP6) &&
(safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST || (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST ||
@ -4953,16 +4885,13 @@ bgp_update_nexthop_reachability_check(struct bgp *bgp, struct peer *peer, struct
struct bgp *bgp_nexthop = bgp; struct bgp *bgp_nexthop = bgp;
if (pi->extra && pi->extra->vrfleak && pi->extra->vrfleak->bgp_orig) { if (pi->extra && pi->extra->vrfleak && pi->extra->vrfleak->bgp_orig)
bgp_nexthop = pi->extra->vrfleak->bgp_orig; bgp_nexthop = pi->extra->vrfleak->bgp_orig;
if (pi->sub_type == BGP_ROUTE_IMPORTED)
bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
}
nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr); nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr);
if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, nh_afi, safi, pi, NULL, connected, if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, nh_afi, safi, pi, NULL, connected,
bgp_nht_param_prefix, bpi_ultimate) || bgp_nht_param_prefix) ||
CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) { CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) {
if (accept_own) if (accept_own)
bgp_path_info_set_flag(dest, pi, BGP_PATH_ACCEPT_OWN); bgp_path_info_set_flag(dest, pi, BGP_PATH_ACCEPT_OWN);
@ -5222,8 +5151,7 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
* attr->evpn_overlay with evpn directly. Instead memcpy * attr->evpn_overlay with evpn directly. Instead memcpy
* evpn to new_atr.evpn_overlay before it is interned. * evpn to new_atr.evpn_overlay before it is interned.
*/ */
if (evpn && afi == AFI_L2VPN && if (soft_reconfig && evpn && afi == AFI_L2VPN) {
(soft_reconfig || !CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SOFT_RECONFIG))) {
bgp_attr_set_evpn_overlay(&new_attr, evpn); bgp_attr_set_evpn_overlay(&new_attr, evpn);
p_evpn = NULL; p_evpn = NULL;
} }
@ -6512,380 +6440,11 @@ void bgp_clear_route(struct peer *peer, afi_t afi, safi_t safi)
peer_unlock(peer); peer_unlock(peer);
} }
/*
* Clear one path-info during clearing processing
*/
static void clearing_clear_one_pi(struct bgp_table *table, struct bgp_dest *dest,
struct bgp_path_info *pi)
{
afi_t afi;
safi_t safi;
struct bgp *bgp;
bgp = table->bgp;
afi = table->afi;
safi = table->safi;
/* graceful restart STALE flag set. */
if (((CHECK_FLAG(pi->peer->sflags, PEER_STATUS_NSF_WAIT)
&& pi->peer->nsf[afi][safi])
|| CHECK_FLAG(pi->peer->af_sflags[afi][safi],
PEER_STATUS_ENHANCED_REFRESH))
&& !CHECK_FLAG(pi->flags, BGP_PATH_STALE)
&& !CHECK_FLAG(pi->flags, BGP_PATH_UNUSEABLE)) {
bgp_path_info_set_flag(dest, pi, BGP_PATH_STALE);
} else {
/* If this is an EVPN route, process for
* un-import. */
if (safi == SAFI_EVPN)
bgp_evpn_unimport_route(
bgp, afi, safi,
bgp_dest_get_prefix(dest), pi);
/* Handle withdraw for VRF route-leaking and L3VPN */
if (SAFI_UNICAST == safi
&& (bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
vpn_leak_from_vrf_withdraw(bgp_get_default(),
bgp, pi);
}
if (SAFI_MPLS_VPN == safi &&
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
vpn_leak_to_vrf_withdraw(pi);
}
bgp_rib_remove(dest, pi, pi->peer, afi, safi);
}
}
/*
* Helper to capture interrupt/resume context info for clearing processing. We
* may be iterating at two levels, so we may need to capture two levels of context
* or keying data.
*/
static void set_clearing_resume_info(struct bgp_clearing_info *cinfo,
const struct bgp_table *table,
const struct prefix *p, bool inner_p)
{
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: %sinfo for %s/%s %pFX", __func__,
inner_p ? "inner " : "", afi2str(table->afi),
safi2str(table->safi), p);
SET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_RESUME);
if (inner_p) {
cinfo->inner_afi = table->afi;
cinfo->inner_safi = table->safi;
memcpy(&cinfo->inner_pfx, p, sizeof(struct prefix));
SET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_INNER);
} else {
cinfo->last_afi = table->afi;
cinfo->last_safi = table->safi;
memcpy(&cinfo->last_pfx, p, sizeof(struct prefix));
}
}
/*
* Helper to establish position in a table, possibly using "resume" info stored
* during an iteration
*/
static struct bgp_dest *clearing_dest_helper(struct bgp_table *table,
struct bgp_clearing_info *cinfo,
bool inner_p)
{
struct bgp_dest *dest;
const struct prefix *pfx;
/* Iterate at start of table, or resume using inner or outer prefix */
dest = bgp_table_top(table);
if (CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_RESUME)) {
pfx = NULL;
if (inner_p) {
if (CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_INNER))
pfx = &(cinfo->inner_pfx);
} else {
pfx = &(cinfo->last_pfx);
}
if (pfx) {
dest = bgp_node_match(table, pfx);
if (dest) {
/* if 'dest' matches or precedes the 'last' prefix
* visited, then advance.
*/
while (dest && (prefix_cmp(&(dest->rn->p), pfx) <= 0))
dest = bgp_route_next(dest);
}
}
}
return dest;
}
/*
* Callback to begin or resume the rib-walk for peer clearing, with info carried in
* a clearing context.
*/
static void clear_dests_callback(struct event *event)
{
int ret;
struct bgp_clearing_info *cinfo = EVENT_ARG(event);
/* Begin, or continue, work */
ret = clear_batch_rib_helper(cinfo);
if (ret == 0) {
/* All done, clean up context */
bgp_clearing_batch_completed(cinfo);
} else {
/* Need to resume the work, with 'cinfo' */
event_add_event(bm->master, clear_dests_callback, cinfo, 0,
&cinfo->t_sched);
}
}
/*
* Walk a single table for batch peer clearing processing. Limit the number of dests
* examined, and return when reaching the limit. Capture "last" info about the
* last dest we process so we can resume later.
*/
static int walk_batch_table_helper(struct bgp_clearing_info *cinfo,
struct bgp_table *table, bool inner_p)
{
int ret = 0;
struct bgp_dest *dest;
bool force = (cinfo->bgp->process_queue == NULL);
uint32_t examined = 0, processed = 0;
struct prefix pfx;
/* Locate starting dest, possibly using "resume" info */
dest = clearing_dest_helper(table, cinfo, inner_p);
if (dest == NULL) {
/* Nothing more to do for this table? */
return 0;
}
for ( ; dest; dest = bgp_route_next(dest)) {
struct bgp_path_info *pi, *next;
struct bgp_adj_in *ain;
struct bgp_adj_in *ain_next;
examined++;
cinfo->curr_counter++;
/* Save dest's prefix */
memcpy(&pfx, &dest->rn->p, sizeof(struct prefix));
ain = dest->adj_in;
while (ain) {
ain_next = ain->next;
if (bgp_clearing_batch_check_peer(cinfo, ain->peer))
bgp_adj_in_remove(&dest, ain);
ain = ain_next;
assert(dest != NULL);
}
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = next) {
next = pi->next;
if (!bgp_clearing_batch_check_peer(cinfo, pi->peer))
continue;
processed++;
if (force) {
bgp_path_info_reap(dest, pi);
} else {
/* Do clearing for this pi */
clearing_clear_one_pi(table, dest, pi);
}
}
if (cinfo->curr_counter >= bm->peer_clearing_batch_max_dests) {
/* Capture info about last dest seen and break */
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: %s/%s: pfx %pFX reached limit %u", __func__,
afi2str(table->afi), safi2str(table->safi), &pfx,
cinfo->curr_counter);
/* Reset the counter */
cinfo->curr_counter = 0;
set_clearing_resume_info(cinfo, table, &pfx, inner_p);
ret = -1;
break;
}
}
if (examined > 0) {
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: %s/%s: examined %u dests, processed %u paths",
__func__, afi2str(table->afi),
safi2str(table->safi), examined, processed);
}
return ret;
}
/*
* RIB-walking helper for batch clearing work: walk all tables, identify
* dests that are affected by the peers in the batch, enqueue the dests for
* async processing.
*/
static int clear_batch_rib_helper(struct bgp_clearing_info *cinfo)
{
int ret = 0;
afi_t afi;
safi_t safi;
struct bgp_dest *dest;
struct bgp_table *table, *outer_table;
struct prefix pfx;
/* Maybe resume afi/safi iteration */
if (CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_RESUME)) {
afi = cinfo->last_afi;
safi = cinfo->last_safi;
} else {
afi = AFI_IP;
safi = SAFI_UNICAST;
}
/* Iterate through afi/safi combos */
for (; afi < AFI_MAX; afi++) {
for (; safi < SAFI_MAX; safi++) {
/* Identify table to be examined: special handling
* for some SAFIs
*/
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: examining AFI/SAFI %s/%s", __func__, afi2str(afi),
safi2str(safi));
/* Record the tables we've seen and don't repeat */
if (cinfo->table_map[afi][safi] > 0)
continue;
if (safi != SAFI_MPLS_VPN && safi != SAFI_ENCAP && safi != SAFI_EVPN) {
table = cinfo->bgp->rib[afi][safi];
if (!table) {
/* Invalid table: don't use 'resume' info */
UNSET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_RESUME);
continue;
}
ret = walk_batch_table_helper(cinfo, table, false /*inner*/);
if (ret != 0)
break;
cinfo->table_map[afi][safi] = 1;
} else {
/* Process "inner" table for these SAFIs */
outer_table = cinfo->bgp->rib[afi][safi];
/* Begin or resume iteration in "outer" table */
dest = clearing_dest_helper(outer_table, cinfo, false);
for (; dest; dest = bgp_route_next(dest)) {
table = bgp_dest_get_bgp_table_info(dest);
if (!table) {
/* If we resumed to an inner afi/safi, but
* it's no longer valid, reset resume info.
*/
UNSET_FLAG(cinfo->flags,
BGP_CLEARING_INFO_FLAG_RESUME);
continue;
}
/* Capture last prefix */
memcpy(&pfx, &dest->rn->p, sizeof(struct prefix));
/* This will resume the "inner" walk if necessary */
ret = walk_batch_table_helper(cinfo, table, true /*inner*/);
if (ret != 0) {
/* The "inner" resume info will be set;
* capture the resume info we need
* from the outer afi/safi and dest
*/
set_clearing_resume_info(cinfo, outer_table, &pfx,
false);
break;
}
}
if (ret != 0)
break;
cinfo->table_map[afi][safi] = 1;
}
/* We've finished with a table: ensure we don't try to use stale
* resume info.
*/
UNSET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_RESUME);
}
/* Return immediately, otherwise the 'ret' state will be overwritten
* by next afi/safi. Also resume state stored for current afi/safi
* in walk_batch_table_helper, will be overwritten. This may cause to
* skip the nets to be walked again, so they won't be marked for deletion
* from BGP table
*/
if (ret != 0)
return ret;
safi = SAFI_UNICAST;
}
return ret;
}
/*
* Identify prefixes that need to be cleared for a batch of peers in 'cinfo'.
* The actual clearing processing will be done async...
*/
void bgp_clear_route_batch(struct bgp_clearing_info *cinfo)
{
int ret;
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: BGP %s, batch %u", __func__,
cinfo->bgp->name_pretty, cinfo->id);
/* Walk the rib, checking the peers in the batch. If the rib walk needs
* to continue, a task will be scheduled
*/
ret = clear_batch_rib_helper(cinfo);
if (ret == 0) {
/* All done - clean up. */
bgp_clearing_batch_completed(cinfo);
} else {
/* Handle pause/resume for the walk: we've captured key info
* in cinfo so we can resume later.
*/
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: reschedule cinfo at %s/%s, %pFX", __func__,
afi2str(cinfo->last_afi),
safi2str(cinfo->last_safi), &(cinfo->last_pfx));
event_add_event(bm->master, clear_dests_callback, cinfo, 0,
&cinfo->t_sched);
}
}
void bgp_clear_route_all(struct peer *peer) void bgp_clear_route_all(struct peer *peer)
{ {
afi_t afi; afi_t afi;
safi_t safi; safi_t safi;
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s: peer %pBP", __func__, peer);
/* We may be able to batch multiple peers' clearing work: check
* and see.
*/
if (bgp_clearing_batch_add_peer(peer->bgp, peer))
return;
FOREACH_AFI_SAFI (afi, safi) FOREACH_AFI_SAFI (afi, safi)
bgp_clear_route(peer, afi, safi); bgp_clear_route(peer, afi, safi);
@ -7346,8 +6905,8 @@ static void bgp_nexthop_reachability_check(afi_t afi, safi_t safi,
/* Nexthop reachability check. */ /* Nexthop reachability check. */
if (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST) { if (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST) {
if (CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK)) { if (CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK)) {
if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi, bpi, NULL, 0, p, if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi,
NULL)) bpi, NULL, 0, p))
bgp_path_info_set_flag(dest, bpi, bgp_path_info_set_flag(dest, bpi,
BGP_PATH_VALID); BGP_PATH_VALID);
else { else {
@ -7519,9 +7078,9 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
break; break;
if (pi) { if (pi) {
if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED) && if (attrhash_cmp(pi->attr, attr_new)
!CHECK_FLAG(bgp->flags, BGP_FLAG_FORCE_STATIC_PROCESS) && && !CHECK_FLAG(pi->flags, BGP_PATH_REMOVED)
attrhash_cmp(pi->attr, attr_new)) { && !CHECK_FLAG(bgp->flags, BGP_FLAG_FORCE_STATIC_PROCESS)) {
bgp_dest_unlock_node(dest); bgp_dest_unlock_node(dest);
bgp_attr_unintern(&attr_new); bgp_attr_unintern(&attr_new);
aspath_unintern(&attr.aspath); aspath_unintern(&attr.aspath);
@ -7568,7 +7127,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p,
&pi->extra->labels->label[0]); &pi->extra->labels->label[0]);
} }
#endif #endif
if (pi->extra && pi->extra->vrfleak && pi->extra->vrfleak->bgp_orig) if (pi->extra && pi->extra->vrfleak->bgp_orig)
bgp_nexthop = pi->extra->vrfleak->bgp_orig; bgp_nexthop = pi->extra->vrfleak->bgp_orig;
bgp_nexthop_reachability_check(afi, safi, pi, p, dest, bgp_nexthop_reachability_check(afi, safi, pi, p, dest,
@ -8015,8 +7574,6 @@ void bgp_static_delete(struct bgp *bgp)
rm = bgp_dest_unlock_node(rm); rm = bgp_dest_unlock_node(rm);
assert(rm); assert(rm);
} }
bgp_table_unlock(table);
} else { } else {
bgp_static = bgp_dest_get_bgp_static_info(dest); bgp_static = bgp_dest_get_bgp_static_info(dest);
bgp_static_withdraw(bgp, bgp_static_withdraw(bgp,
@ -8458,9 +8015,6 @@ static void bgp_aggregate_install(
bgp_process(bgp, dest, new, afi, safi); bgp_process(bgp, dest, new, afi, safi);
if (debug) if (debug)
zlog_debug(" aggregate %pFX: installed", p); zlog_debug(" aggregate %pFX: installed", p);
if (SAFI_UNICAST == safi && (bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_update(bgp_get_default(), bgp, new);
} else { } else {
uninstall_aggregate_route: uninstall_aggregate_route:
/* Withdraw the aggregate route from routing table. */ /* Withdraw the aggregate route from routing table. */
@ -8469,11 +8023,6 @@ static void bgp_aggregate_install(
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (debug) if (debug)
zlog_debug(" aggregate %pFX: uninstall", p); zlog_debug(" aggregate %pFX: uninstall", p);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp, pi);
}
} }
} }
@ -8578,25 +8127,14 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
/* We are toggling suppression back. */ /* We are toggling suppression back. */
if (suppress) { if (suppress) {
/* Suppress route if not suppressed already. */ /* Suppress route if not suppressed already. */
if (aggr_suppress_path(aggregate, pi)) { if (aggr_suppress_path(aggregate, pi))
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp,
pi);
}
continue; continue;
} }
/* Install route if there is no more suppression. */ /* Install route if there is no more suppression. */
if (aggr_unsuppress_path(aggregate, pi)) { if (aggr_unsuppress_path(aggregate, pi))
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_update(bgp_get_default(), bgp, pi);
}
} }
} }
bgp_dest_unlock_node(top); bgp_dest_unlock_node(top);
@ -8727,14 +8265,8 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
*/ */
if (aggregate->summary_only if (aggregate->summary_only
&& AGGREGATE_MED_VALID(aggregate)) { && AGGREGATE_MED_VALID(aggregate)) {
if (aggr_suppress_path(aggregate, pi)) { if (aggr_suppress_path(aggregate, pi))
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp,
pi);
}
} }
/* /*
@ -8749,14 +8281,8 @@ bool bgp_aggregate_route(struct bgp *bgp, const struct prefix *p, afi_t afi,
if (aggregate->suppress_map_name if (aggregate->suppress_map_name
&& AGGREGATE_MED_VALID(aggregate) && AGGREGATE_MED_VALID(aggregate)
&& aggr_suppress_map_test(bgp, aggregate, pi)) { && aggr_suppress_map_test(bgp, aggregate, pi)) {
if (aggr_suppress_path(aggregate, pi)) { if (aggr_suppress_path(aggregate, pi))
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_withdraw(bgp_get_default(), bgp,
pi);
}
} }
aggregate->count++; aggregate->count++;
@ -8904,13 +8430,8 @@ void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p, afi_t afi,
*/ */
if (pi->extra && pi->extra->aggr_suppressors && if (pi->extra && pi->extra->aggr_suppressors &&
listcount(pi->extra->aggr_suppressors)) { listcount(pi->extra->aggr_suppressors)) {
if (aggr_unsuppress_path(aggregate, pi)) { if (aggr_unsuppress_path(aggregate, pi))
bgp_process(bgp, dest, pi, afi, safi); bgp_process(bgp, dest, pi, afi, safi);
if (SAFI_UNICAST == safi &&
(bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_update(bgp_get_default(), bgp, pi);
}
} }
if (aggregate->count > 0) if (aggregate->count > 0)
@ -9116,21 +8637,13 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi,
return; return;
if (aggregate->summary_only && AGGREGATE_MED_VALID(aggregate)) if (aggregate->summary_only && AGGREGATE_MED_VALID(aggregate))
if (aggr_unsuppress_path(aggregate, pi)) { if (aggr_unsuppress_path(aggregate, pi))
bgp_process(bgp, pi->net, pi, afi, safi); bgp_process(bgp, pi->net, pi, afi, safi);
if (SAFI_UNICAST == safi && (bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_update(bgp_get_default(), bgp, pi);
}
if (aggregate->suppress_map_name && AGGREGATE_MED_VALID(aggregate) if (aggregate->suppress_map_name && AGGREGATE_MED_VALID(aggregate)
&& aggr_suppress_map_test(bgp, aggregate, pi)) && aggr_suppress_map_test(bgp, aggregate, pi))
if (aggr_unsuppress_path(aggregate, pi)) { if (aggr_unsuppress_path(aggregate, pi))
bgp_process(bgp, pi->net, pi, afi, safi); bgp_process(bgp, pi->net, pi, afi, safi);
if (SAFI_UNICAST == safi && (bgp->inst_type == BGP_INSTANCE_TYPE_VRF ||
bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT))
vpn_leak_from_vrf_update(bgp_get_default(), bgp, pi);
}
/* /*
* This must be called after `summary`, `suppress-map` check to avoid * This must be called after `summary`, `suppress-map` check to avoid
@ -9389,6 +8902,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
struct prefix p; struct prefix p;
struct bgp_dest *dest; struct bgp_dest *dest;
struct bgp_aggregate *aggregate; struct bgp_aggregate *aggregate;
uint8_t as_set_new = as_set;
if (suppress_map && summary_only) { if (suppress_map && summary_only) {
vty_out(vty, vty_out(vty,
@ -9446,6 +8960,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
*/ */
if (bgp->reject_as_sets) { if (bgp->reject_as_sets) {
if (as_set == AGGREGATE_AS_SET) { if (as_set == AGGREGATE_AS_SET) {
as_set_new = AGGREGATE_AS_UNSET;
zlog_warn( zlog_warn(
"%s: Ignoring as-set because `bgp reject-as-sets` is enabled.", "%s: Ignoring as-set because `bgp reject-as-sets` is enabled.",
__func__); __func__);
@ -9454,7 +8969,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
} }
} }
aggregate->as_set = as_set; aggregate->as_set = as_set_new;
/* Override ORIGIN attribute if defined. /* Override ORIGIN attribute if defined.
* E.g.: Cisco and Juniper set ORIGIN for aggregated address * E.g.: Cisco and Juniper set ORIGIN for aggregated address
@ -9775,8 +9290,8 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
if (bpi) { if (bpi) {
/* Ensure the (source route) type is updated. */ /* Ensure the (source route) type is updated. */
bpi->type = type; bpi->type = type;
if (!CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) && if (attrhash_cmp(bpi->attr, new_attr)
attrhash_cmp(bpi->attr, new_attr)) { && !CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)) {
bgp_attr_unintern(&new_attr); bgp_attr_unintern(&new_attr);
aspath_unintern(&attr.aspath); aspath_unintern(&attr.aspath);
bgp_dest_unlock_node(bn); bgp_dest_unlock_node(bn);
@ -12046,6 +11561,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
/* Line 7 display Originator, Cluster-id */ /* Line 7 display Originator, Cluster-id */
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID)) || if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID)) ||
CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST))) { CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST))) {
char buf[BUFSIZ] = {0};
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))) { if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))) {
if (json_paths) if (json_paths)
json_object_string_addf(json_path, json_object_string_addf(json_path,
@ -12057,7 +11574,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
} }
if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST))) { if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST))) {
struct cluster_list *cluster = bgp_attr_get_cluster(attr); struct cluster_list *cluster =
bgp_attr_get_cluster(attr);
int i;
if (json_paths) { if (json_paths) {
json_cluster_list = json_object_new_object(); json_cluster_list = json_object_new_object();
@ -13193,9 +12712,6 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
* though then we must display Advertised to on a path-by-path basis. */ * though then we must display Advertised to on a path-by-path basis. */
if (!bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) { if (!bgp_addpath_is_addpath_used(&bgp->tx_addpath, afi, safi)) {
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (peer->group)
continue;
if (bgp_adj_out_lookup(peer, dest, 0)) { if (bgp_adj_out_lookup(peer, dest, 0)) {
if (json && !json_adv_to) if (json && !json_adv_to)
json_adv_to = json_object_new_object(); json_adv_to = json_object_new_object();
@ -13337,6 +12853,8 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, struct bgp_
return CMD_WARNING; return CMD_WARNING;
} }
match.family = afi2family(afi);
if (use_json) if (use_json)
json = json_object_new_object(); json = json_object_new_object();
@ -13585,7 +13103,7 @@ DEFUN (show_ip_bgp_large_community_list,
afi_t afi = AFI_IP6; afi_t afi = AFI_IP6;
safi_t safi = SAFI_UNICAST; safi_t safi = SAFI_UNICAST;
int idx = 0; int idx = 0;
bool match_p = 0; bool exact_match = 0;
struct bgp *bgp = NULL; struct bgp *bgp = NULL;
bool uj = use_json(argc, argv); bool uj = use_json(argc, argv);
@ -13602,10 +13120,10 @@ DEFUN (show_ip_bgp_large_community_list,
const char *clist_number_or_name = argv[++idx]->arg; const char *clist_number_or_name = argv[++idx]->arg;
if (++idx < argc && strmatch(argv[idx]->text, "exact-match")) if (++idx < argc && strmatch(argv[idx]->text, "exact-match"))
match_p = 1; exact_match = 1;
return bgp_show_lcommunity_list(vty, bgp, clist_number_or_name, return bgp_show_lcommunity_list(vty, bgp, clist_number_or_name,
match_p, afi, safi, uj); exact_match, afi, safi, uj);
} }
DEFUN (show_ip_bgp_large_community, DEFUN (show_ip_bgp_large_community,
show_ip_bgp_large_community_cmd, show_ip_bgp_large_community_cmd,
@ -13624,7 +13142,7 @@ DEFUN (show_ip_bgp_large_community,
afi_t afi = AFI_IP6; afi_t afi = AFI_IP6;
safi_t safi = SAFI_UNICAST; safi_t safi = SAFI_UNICAST;
int idx = 0; int idx = 0;
bool match_p = false; bool exact_match = 0;
struct bgp *bgp = NULL; struct bgp *bgp = NULL;
bool uj = use_json(argc, argv); bool uj = use_json(argc, argv);
uint16_t show_flags = 0; uint16_t show_flags = 0;
@ -13642,10 +13160,10 @@ DEFUN (show_ip_bgp_large_community,
if (argv_find(argv, argc, "AA:BB:CC", &idx)) { if (argv_find(argv, argc, "AA:BB:CC", &idx)) {
if (argv_find(argv, argc, "exact-match", &idx)) { if (argv_find(argv, argc, "exact-match", &idx)) {
argc--; argc--;
match_p = true; exact_match = 1;
} }
return bgp_show_lcommunity(vty, bgp, argc, argv, return bgp_show_lcommunity(vty, bgp, argc, argv,
match_p, afi, safi, uj); exact_match, afi, safi, uj);
} else } else
return bgp_show(vty, bgp, afi, safi, return bgp_show(vty, bgp, afi, safi,
bgp_show_type_lcommunity_all, NULL, show_flags, bgp_show_type_lcommunity_all, NULL, show_flags,
@ -13916,7 +13434,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
void *output_arg = NULL; void *output_arg = NULL;
struct bgp *bgp = NULL; struct bgp *bgp = NULL;
int idx = 0; int idx = 0;
int match_p = 0; int exact_match = 0;
char *community = NULL; char *community = NULL;
bool first = true; bool first = true;
uint16_t show_flags = 0; uint16_t show_flags = 0;
@ -13981,7 +13499,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
community = maybecomm; community = maybecomm;
if (argv_find(argv, argc, "exact-match", &idx)) if (argv_find(argv, argc, "exact-match", &idx))
match_p = 1; exact_match = 1;
if (!community) if (!community)
sh_type = bgp_show_type_community_all; sh_type = bgp_show_type_community_all;
@ -13992,7 +13510,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
struct community_list *list; struct community_list *list;
if (argv_find(argv, argc, "exact-match", &idx)) if (argv_find(argv, argc, "exact-match", &idx))
match_p = 1; exact_match = 1;
list = community_list_lookup(bgp_clist, clist_number_or_name, 0, list = community_list_lookup(bgp_clist, clist_number_or_name, 0,
COMMUNITY_LIST_MASTER); COMMUNITY_LIST_MASTER);
@ -14002,7 +13520,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
return CMD_WARNING; return CMD_WARNING;
} }
if (match_p) if (exact_match)
sh_type = bgp_show_type_community_list_exact; sh_type = bgp_show_type_community_list_exact;
else else
sh_type = bgp_show_type_community_list; sh_type = bgp_show_type_community_list;
@ -14112,7 +13630,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
/* show bgp: AFI_IP6, show ip bgp: AFI_IP */ /* show bgp: AFI_IP6, show ip bgp: AFI_IP */
if (community) if (community)
return bgp_show_community(vty, bgp, community, return bgp_show_community(vty, bgp, community,
match_p, afi, safi, exact_match, afi, safi,
show_flags); show_flags);
else else
return bgp_show(vty, bgp, afi, safi, sh_type, return bgp_show(vty, bgp, afi, safi, sh_type,
@ -14157,7 +13675,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
if (community) if (community)
bgp_show_community( bgp_show_community(
vty, abgp, community, vty, abgp, community,
match_p, afi, safi, exact_match, afi, safi,
show_flags); show_flags);
else else
bgp_show(vty, abgp, afi, safi, bgp_show(vty, abgp, afi, safi,
@ -14205,7 +13723,7 @@ DEFPY(show_ip_bgp, show_ip_bgp_cmd,
if (community) if (community)
bgp_show_community( bgp_show_community(
vty, abgp, community, vty, abgp, community,
match_p, afi, safi, exact_match, afi, safi,
show_flags); show_flags);
else else
bgp_show(vty, abgp, afi, safi, bgp_show(vty, abgp, afi, safi,
@ -15475,15 +14993,15 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table,
json_net = json_net =
json_object_new_object(); json_object_new_object();
struct bgp_path_info pathi; struct bgp_path_info bpi;
struct bgp_dest buildit = *dest; struct bgp_dest buildit = *dest;
struct bgp_dest *pass_in; struct bgp_dest *pass_in;
if (route_filtered || if (route_filtered ||
ret == RMAP_DENY) { ret == RMAP_DENY) {
pathi.attr = &attr; bpi.attr = &attr;
pathi.peer = peer; bpi.peer = peer;
buildit.info = &pathi; buildit.info = &bpi;
pass_in = &buildit; pass_in = &buildit;
} else } else
@ -15754,15 +15272,11 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
} else { } else {
json_object_object_add(json_ar, rd_str, json_routes); json_object_object_add(json_ar, rd_str, json_routes);
} }
} else if (json_routes) }
json_object_free(json_routes);
output_count += output_count_per_rd; output_count += output_count_per_rd;
filtered_count += filtered_count_per_rd; filtered_count += filtered_count_per_rd;
} }
if (json_ar &&
(type == bgp_show_adj_route_advertised || type == bgp_show_adj_route_received))
json_object_free(json_ar);
if (first == false && json_routes) if (first == false && json_routes)
vty_out(vty, "}"); vty_out(vty, "}");
} else { } else {
@ -16747,6 +16261,8 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name,
return CMD_WARNING; return CMD_WARNING;
} }
match.family = afi2family(afi);
if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP) if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
|| (safi == SAFI_EVPN)) { || (safi == SAFI_EVPN)) {
for (dest = bgp_table_top(bgp->rib[AFI_IP][safi]); dest; for (dest = bgp_table_top(bgp->rib[AFI_IP][safi]); dest;

View file

@ -88,6 +88,7 @@ enum bgp_show_adj_route_type {
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE4_SIZE -9 #define BGP_NLRI_PARSE_ERROR_EVPN_TYPE4_SIZE -9
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE5_SIZE -10 #define BGP_NLRI_PARSE_ERROR_EVPN_TYPE5_SIZE -10
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_IPV6_NOT_SUPPORTED -11 #define BGP_NLRI_PARSE_ERROR_FLOWSPEC_IPV6_NOT_SUPPORTED -11
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_NLRI_SIZELIMIT -12
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_BAD_FORMAT -13 #define BGP_NLRI_PARSE_ERROR_FLOWSPEC_BAD_FORMAT -13
#define BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY -14 #define BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY -14
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE1_SIZE -15 #define BGP_NLRI_PARSE_ERROR_EVPN_TYPE1_SIZE -15
@ -778,9 +779,6 @@ extern void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
extern bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi); extern bool bgp_soft_reconfig_in(struct peer *peer, afi_t afi, safi_t safi);
extern void bgp_clear_route(struct peer *, afi_t, safi_t); extern void bgp_clear_route(struct peer *, afi_t, safi_t);
extern void bgp_clear_route_all(struct peer *); extern void bgp_clear_route_all(struct peer *);
/* Clear routes for a batch of peers */
void bgp_clear_route_batch(struct bgp_clearing_info *cinfo);
extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t); extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t);
extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t); extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t);
extern void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi); extern void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi);
@ -936,6 +934,9 @@ extern bool subgroup_announce_check(struct bgp_dest *dest,
const struct prefix *p, struct attr *attr, const struct prefix *p, struct attr *attr,
struct attr *post_attr); struct attr *post_attr);
extern void bgp_peer_clear_node_queue_drain_immediate(struct peer *peer);
extern void bgp_process_queues_drain_immediate(void);
/* for encap/vpn */ /* for encap/vpn */
extern struct bgp_dest *bgp_safi_node_lookup(struct bgp_table *table, extern struct bgp_dest *bgp_safi_node_lookup(struct bgp_table *table,
safi_t safi, safi_t safi,

View file

@ -1441,7 +1441,7 @@ route_set_evpn_gateway_ip(void *rule, const struct prefix *prefix, void *object)
/* Set gateway-ip value. */ /* Set gateway-ip value. */
bre->type = OVERLAY_INDEX_GATEWAY_IP; bre->type = OVERLAY_INDEX_GATEWAY_IP;
bre->gw_ip = *gw_ip; memcpy(&bre->gw_ip, &gw_ip->ip.addr, IPADDRSZ(gw_ip));
bgp_attr_set_evpn_overlay(path->attr, bre); bgp_attr_set_evpn_overlay(path->attr, bre);
return RMAP_OKAY; return RMAP_OKAY;
@ -2615,9 +2615,6 @@ route_set_aspath_exclude(void *rule, const struct prefix *dummy, void *object)
path->attr->aspath = path->attr->aspath =
aspath_filter_exclude_acl(new_path, aspath_filter_exclude_acl(new_path,
ase->exclude_aspath_acl); ase->exclude_aspath_acl);
else
aspath_free(new_path);
return RMAP_OKAY; return RMAP_OKAY;
} }

View file

@ -1355,7 +1355,7 @@ lib_route_map_entry_match_condition_rmap_match_condition_comm_list_finish(
{ {
struct routemap_hook_context *rhc; struct routemap_hook_context *rhc;
const char *value; const char *value;
bool match_p = false; bool exact_match = false;
bool any = false; bool any = false;
char *argstr; char *argstr;
const char *condition; const char *condition;
@ -1367,13 +1367,13 @@ lib_route_map_entry_match_condition_rmap_match_condition_comm_list_finish(
value = yang_dnode_get_string(args->dnode, "comm-list-name"); value = yang_dnode_get_string(args->dnode, "comm-list-name");
if (yang_dnode_exists(args->dnode, "comm-list-name-exact-match")) if (yang_dnode_exists(args->dnode, "comm-list-name-exact-match"))
match_p = yang_dnode_get_bool( exact_match = yang_dnode_get_bool(
args->dnode, "./comm-list-name-exact-match"); args->dnode, "./comm-list-name-exact-match");
if (yang_dnode_exists(args->dnode, "comm-list-name-any")) if (yang_dnode_exists(args->dnode, "comm-list-name-any"))
any = yang_dnode_get_bool(args->dnode, "comm-list-name-any"); any = yang_dnode_get_bool(args->dnode, "comm-list-name-any");
if (match_p) { if (exact_match) {
argstr = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, argstr = XMALLOC(MTYPE_ROUTE_MAP_COMPILED,
strlen(value) + strlen("exact-match") + 2); strlen(value) + strlen("exact-match") + 2);

View file

@ -529,10 +529,7 @@ static struct rtr_mgr_group *get_groups(struct list *cache_list)
inline bool is_synchronized(struct rpki_vrf *rpki_vrf) inline bool is_synchronized(struct rpki_vrf *rpki_vrf)
{ {
if (is_running(rpki_vrf))
return rpki_vrf->rtr_is_synced; return rpki_vrf->rtr_is_synced;
else
return false;
} }
inline bool is_running(struct rpki_vrf *rpki_vrf) inline bool is_running(struct rpki_vrf *rpki_vrf)

View file

@ -967,10 +967,10 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
if (ctx->uj) { if (ctx->uj) {
json_peers = json_object_new_array(); json_peers = json_object_new_array();
SUBGRP_FOREACH_PEER (subgrp, paf) { SUBGRP_FOREACH_PEER (subgrp, paf) {
json_object *jpeer = json_object *peer =
json_object_new_string( json_object_new_string(
paf->peer->host); paf->peer->host);
json_object_array_add(json_peers, jpeer); json_object_array_add(json_peers, peer);
} }
json_object_object_add(json_subgrp, "peers", json_object_object_add(json_subgrp, "peers",
json_peers); json_peers);

View file

@ -1111,12 +1111,9 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
int ret = 0; int ret = 0;
bool found = false; bool found = false;
struct peer *peer; struct peer *peer;
bool afi_safi_unspec = false;
VTY_BGP_GR_DEFINE_LOOP_VARIABLE; VTY_BGP_GR_DEFINE_LOOP_VARIABLE;
afi_safi_unspec = ((afi == AFI_UNSPEC) && (safi == SAFI_UNSPEC));
/* Clear all neighbors. */ /* Clear all neighbors. */
/* /*
* Pass along pointer to next node to peer_clear() when walking all * Pass along pointer to next node to peer_clear() when walking all
@ -1124,8 +1121,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
* doppelganger * doppelganger
*/ */
if (sort == clear_all) { if (sort == clear_all) {
if (afi_safi_unspec)
bgp_clearing_batch_begin(bgp);
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
bgp_peer_gr_flags_update(peer); bgp_peer_gr_flags_update(peer);
@ -1152,8 +1147,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (stype == BGP_CLEAR_SOFT_NONE) if (stype == BGP_CLEAR_SOFT_NONE)
bgp->update_delay_over = 0; bgp->update_delay_over = 0;
if (afi_safi_unspec)
bgp_clearing_batch_end_event_start(bgp);
return CMD_SUCCESS; return CMD_SUCCESS;
} }
@ -1209,8 +1202,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
return CMD_WARNING; return CMD_WARNING;
} }
if (afi_safi_unspec)
bgp_clearing_batch_begin(bgp);
for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
ret = bgp_peer_clear(peer, afi, safi, &nnode, stype); ret = bgp_peer_clear(peer, afi, safi, &nnode, stype);
@ -1219,8 +1210,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
else else
found = true; found = true;
} }
if (afi_safi_unspec)
bgp_clearing_batch_end_event_start(bgp);
if (!found) if (!found)
vty_out(vty, vty_out(vty,
@ -1232,8 +1221,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
/* Clear all external (eBGP) neighbors. */ /* Clear all external (eBGP) neighbors. */
if (sort == clear_external) { if (sort == clear_external) {
if (afi_safi_unspec)
bgp_clearing_batch_begin(bgp);
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (peer->sort == BGP_PEER_IBGP) if (peer->sort == BGP_PEER_IBGP)
continue; continue;
@ -1258,8 +1245,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
&& bgp->present_zebra_gr_state == ZEBRA_GR_ENABLE) { && bgp->present_zebra_gr_state == ZEBRA_GR_ENABLE) {
bgp_zebra_send_capabilities(bgp, true); bgp_zebra_send_capabilities(bgp, true);
} }
if (afi_safi_unspec)
bgp_clearing_batch_end_event_start(bgp);
if (!found) if (!found)
vty_out(vty, vty_out(vty,
"%% BGP: No external %s peer is configured\n", "%% BGP: No external %s peer is configured\n",
@ -1277,8 +1263,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
return CMD_WARNING; return CMD_WARNING;
} }
if (afi_safi_unspec)
bgp_clearing_batch_begin(bgp);
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (peer->as != as) if (peer->as != as)
continue; continue;
@ -1304,8 +1288,6 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
bgp_zebra_send_capabilities(bgp, true); bgp_zebra_send_capabilities(bgp, true);
} }
if (afi_safi_unspec)
bgp_clearing_batch_end_event_start(bgp);
if (!found) if (!found)
vty_out(vty, vty_out(vty,
"%% BGP: No %s peer is configured with AS %s\n", "%% BGP: No %s peer is configured with AS %s\n",
@ -11080,6 +11062,7 @@ static int bgp_clear_prefix(struct vty *vty, const char *view_name,
return CMD_WARNING; return CMD_WARNING;
} }
match.family = afi2family(afi);
rib = bgp->rib[afi][safi]; rib = bgp->rib[afi][safi];
if (safi == SAFI_MPLS_VPN) { if (safi == SAFI_MPLS_VPN) {
@ -11503,7 +11486,7 @@ DEFPY (show_bgp_vrfs,
json_vrfs = json_object_new_object(); json_vrfs = json_object_new_object();
for (ALL_LIST_ELEMENTS_RO(inst, node, bgp)) { for (ALL_LIST_ELEMENTS_RO(inst, node, bgp)) {
const char *bname; const char *name;
/* Skip Views. */ /* Skip Views. */
if (bgp->inst_type == BGP_INSTANCE_TYPE_VIEW) if (bgp->inst_type == BGP_INSTANCE_TYPE_VIEW)
@ -11522,18 +11505,18 @@ DEFPY (show_bgp_vrfs,
json_vrf = json_object_new_object(); json_vrf = json_object_new_object();
if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) { if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
bname = VRF_DEFAULT_NAME; name = VRF_DEFAULT_NAME;
type = "DFLT"; type = "DFLT";
} else { } else {
bname = bgp->name; name = bgp->name;
type = "VRF"; type = "VRF";
} }
show_bgp_vrfs_detail_common(vty, bgp, json_vrf, bname, type, show_bgp_vrfs_detail_common(vty, bgp, json_vrf, name, type,
false); false);
if (uj) if (uj)
json_object_object_add(json_vrfs, bname, json_vrf); json_object_object_add(json_vrfs, name, json_vrf);
} }
if (uj) { if (uj) {
@ -14088,12 +14071,7 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
? "Advertise" ? "Advertise"
: "Withdraw"); : "Withdraw");
/* Receive and sent prefix count, if available */ /* Receive prefix count */
paf = peer_af_find(p, afi, safi);
if (paf && PAF_SUBGRP(paf))
vty_out(vty, " %u accepted, %u sent prefixes\n",
p->pcount[afi][safi], PAF_SUBGRP(paf)->scount);
else
vty_out(vty, " %u accepted prefixes\n", vty_out(vty, " %u accepted prefixes\n",
p->pcount[afi][safi]); p->pcount[afi][safi]);

View file

@ -56,8 +56,8 @@
#include "bgpd/bgp_lcommunity.h" #include "bgpd/bgp_lcommunity.h"
/* All information about zebra. */ /* All information about zebra. */
struct zclient *bgp_zclient = NULL; struct zclient *zclient = NULL;
struct zclient *bgp_zclient_sync; struct zclient *zclient_sync;
static bool bgp_zebra_label_manager_connect(void); static bool bgp_zebra_label_manager_connect(void);
/* hook to indicate vrf status change for SNMP */ /* hook to indicate vrf status change for SNMP */
@ -69,7 +69,7 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_IF_INFO, "BGP interface context");
/* Can we install into zebra? */ /* Can we install into zebra? */
static inline bool bgp_install_info_to_zebra(struct bgp *bgp) static inline bool bgp_install_info_to_zebra(struct bgp *bgp)
{ {
if (bgp_zclient->sock <= 0) if (zclient->sock <= 0)
return false; return false;
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) { if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
@ -137,7 +137,7 @@ static void bgp_start_interface_nbrs(struct bgp *bgp, struct interface *ifp)
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0) && if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0) &&
!peer_established(peer->connection)) { !peer_established(peer->connection)) {
if (peer_active(peer->connection) == BGP_PEER_ACTIVE) if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start); BGP_EVENT_ADD(peer->connection, BGP_Start);
} }
@ -1010,15 +1010,15 @@ struct bgp *bgp_tm_bgp;
static void bgp_zebra_tm_connect(struct event *t) static void bgp_zebra_tm_connect(struct event *t)
{ {
struct zclient *zc; struct zclient *zclient;
int delay = 10, ret = 0; int delay = 10, ret = 0;
zc = EVENT_ARG(t); zclient = EVENT_ARG(t);
if (bgp_tm_status_connected && zc->sock > 0) if (bgp_tm_status_connected && zclient->sock > 0)
delay = 60; delay = 60;
else { else {
bgp_tm_status_connected = false; bgp_tm_status_connected = false;
ret = tm_table_manager_connect(zc); ret = tm_table_manager_connect(zclient);
} }
if (ret < 0) { if (ret < 0) {
zlog_err("Error connecting to table manager!"); zlog_err("Error connecting to table manager!");
@ -1031,7 +1031,7 @@ static void bgp_zebra_tm_connect(struct event *t)
} }
bgp_tm_status_connected = true; bgp_tm_status_connected = true;
if (!bgp_tm_chunk_obtained) { if (!bgp_tm_chunk_obtained) {
if (bgp_zebra_get_table_range(zc, bgp_tm_chunk_size, if (bgp_zebra_get_table_range(zclient, bgp_tm_chunk_size,
&bgp_tm_min, &bgp_tm_min,
&bgp_tm_max) >= 0) { &bgp_tm_max) >= 0) {
bgp_tm_chunk_obtained = true; bgp_tm_chunk_obtained = true;
@ -1040,7 +1040,7 @@ static void bgp_zebra_tm_connect(struct event *t)
} }
} }
} }
event_add_timer(bm->master, bgp_zebra_tm_connect, zc, delay, event_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect); &bgp_tm_thread_connect);
} }
@ -1071,7 +1071,7 @@ void bgp_zebra_init_tm_connect(struct bgp *bgp)
bgp_tm_min = bgp_tm_max = 0; bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK; bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
bgp_tm_bgp = bgp; bgp_tm_bgp = bgp;
event_add_timer(bm->master, bgp_zebra_tm_connect, bgp_zclient_sync, delay, event_add_timer(bm->master, bgp_zebra_tm_connect, zclient_sync, delay,
&bgp_tm_thread_connect); &bgp_tm_thread_connect);
} }
@ -1650,7 +1650,7 @@ bgp_zebra_announce_actual(struct bgp_dest *dest, struct bgp_path_info *info,
__func__, p, (allow_recursion ? "" : "NOT ")); __func__, p, (allow_recursion ? "" : "NOT "));
} }
return zclient_route_send(ZEBRA_ROUTE_ADD, bgp_zclient, &api); return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
} }
@ -1747,7 +1747,7 @@ enum zclient_send_status bgp_zebra_withdraw_actual(struct bgp_dest *dest,
zlog_debug("Tx route delete %s (table id %u) %pFX", zlog_debug("Tx route delete %s (table id %u) %pFX",
bgp->name_pretty, api.tableid, &api.prefix); bgp->name_pretty, api.tableid, &api.prefix);
return zclient_route_send(ZEBRA_ROUTE_DELETE, bgp_zclient, &api); return zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
} }
/* /*
@ -2071,19 +2071,19 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
.table_id = instance, .table_id = instance,
.vrf_id = bgp->vrf_id, .vrf_id = bgp->vrf_id,
}; };
if (redist_lookup_table_direct(&bgp_zclient->mi_redist[afi][type], if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) !=
&table) != NULL) NULL)
return CMD_WARNING; return CMD_WARNING;
redist_add_table_direct(&bgp_zclient->mi_redist[afi][type], &table); redist_add_table_direct(&zclient->mi_redist[afi][type], &table);
} else { } else {
if (redist_check_instance(&bgp_zclient->mi_redist[afi][type], instance)) if (redist_check_instance(&zclient->mi_redist[afi][type], instance))
return CMD_WARNING; return CMD_WARNING;
redist_add_instance(&bgp_zclient->mi_redist[afi][type], instance); redist_add_instance(&zclient->mi_redist[afi][type], instance);
} }
} else { } else {
if (vrf_bitmap_check(&bgp_zclient->redist[afi][type], bgp->vrf_id)) if (vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id))
return CMD_WARNING; return CMD_WARNING;
#ifdef ENABLE_BGP_VNC #ifdef ENABLE_BGP_VNC
@ -2093,7 +2093,7 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
} }
#endif #endif
vrf_bitmap_set(&bgp_zclient->redist[afi][type], bgp->vrf_id); vrf_bitmap_set(&zclient->redist[afi][type], bgp->vrf_id);
} }
/* /*
@ -2111,7 +2111,7 @@ int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
instance); instance);
/* Send distribute add message to zebra. */ /* Send distribute add message to zebra. */
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, bgp_zclient, afi, type, zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, zclient, afi, type,
instance, bgp->vrf_id); instance, bgp->vrf_id);
return CMD_SUCCESS; return CMD_SUCCESS;
@ -2132,9 +2132,9 @@ int bgp_redistribute_resend(struct bgp *bgp, afi_t afi, int type,
instance); instance);
/* Send distribute add message to zebra. */ /* Send distribute add message to zebra. */
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, bgp_zclient, afi, type, zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, zclient, afi, type,
instance, bgp->vrf_id); instance, bgp->vrf_id);
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, bgp_zclient, afi, type, zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, zclient, afi, type,
instance, bgp->vrf_id); instance, bgp->vrf_id);
return 0; return 0;
@ -2214,21 +2214,21 @@ int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
.table_id = instance, .table_id = instance,
.vrf_id = bgp->vrf_id, .vrf_id = bgp->vrf_id,
}; };
if (redist_lookup_table_direct(&bgp_zclient->mi_redist[afi][type], &table) == if (redist_lookup_table_direct(&zclient->mi_redist[afi][type], &table) ==
NULL) NULL)
return CMD_WARNING; return CMD_WARNING;
redist_del_table_direct(&bgp_zclient->mi_redist[afi][type], &table); redist_del_table_direct(&zclient->mi_redist[afi][type], &table);
} else { } else {
if (!redist_check_instance(&bgp_zclient->mi_redist[afi][type], instance)) if (!redist_check_instance(&zclient->mi_redist[afi][type], instance))
return CMD_WARNING; return CMD_WARNING;
redist_del_instance(&bgp_zclient->mi_redist[afi][type], instance); redist_del_instance(&zclient->mi_redist[afi][type], instance);
} }
} else { } else {
if (!vrf_bitmap_check(&bgp_zclient->redist[afi][type], bgp->vrf_id)) if (!vrf_bitmap_check(&zclient->redist[afi][type], bgp->vrf_id))
return CMD_WARNING; return CMD_WARNING;
vrf_bitmap_unset(&bgp_zclient->redist[afi][type], bgp->vrf_id); vrf_bitmap_unset(&zclient->redist[afi][type], bgp->vrf_id);
} }
if (bgp_install_info_to_zebra(bgp)) { if (bgp_install_info_to_zebra(bgp)) {
@ -2237,7 +2237,7 @@ int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
zlog_debug("Tx redistribute del %s afi %d %s %d", zlog_debug("Tx redistribute del %s afi %d %s %d",
bgp->name_pretty, afi, bgp->name_pretty, afi,
zebra_route_string(type), instance); zebra_route_string(type), instance);
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, bgp_zclient, afi, zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, zclient, afi,
type, instance, bgp->vrf_id); type, instance, bgp->vrf_id);
} }
@ -2325,7 +2325,7 @@ void bgp_redistribute_redo(struct bgp *bgp)
void bgp_zclient_reset(void) void bgp_zclient_reset(void)
{ {
zclient_reset(bgp_zclient); zclient_reset(zclient);
} }
/* Register this instance with Zebra. Invoked upon connect (for /* Register this instance with Zebra. Invoked upon connect (for
@ -2335,14 +2335,14 @@ void bgp_zclient_reset(void)
void bgp_zebra_instance_register(struct bgp *bgp) void bgp_zebra_instance_register(struct bgp *bgp)
{ {
/* Don't try to register if we're not connected to Zebra */ /* Don't try to register if we're not connected to Zebra */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return; return;
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("Registering %s", bgp->name_pretty); zlog_debug("Registering %s", bgp->name_pretty);
/* Register for router-id, interfaces, redistributed routes. */ /* Register for router-id, interfaces, redistributed routes. */
zclient_send_reg_requests(bgp_zclient, bgp->vrf_id); zclient_send_reg_requests(zclient, bgp->vrf_id);
/* For EVPN instance, register to learn about VNIs, if appropriate. */ /* For EVPN instance, register to learn about VNIs, if appropriate. */
if (bgp->advertise_all_vni) if (bgp->advertise_all_vni)
@ -2364,7 +2364,7 @@ void bgp_zebra_instance_register(struct bgp *bgp)
void bgp_zebra_instance_deregister(struct bgp *bgp) void bgp_zebra_instance_deregister(struct bgp *bgp)
{ {
/* Don't try to deregister if we're not connected to Zebra */ /* Don't try to deregister if we're not connected to Zebra */
if (bgp_zclient->sock < 0) if (zclient->sock < 0)
return; return;
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
@ -2375,7 +2375,7 @@ void bgp_zebra_instance_deregister(struct bgp *bgp)
bgp_zebra_advertise_all_vni(bgp, 0); bgp_zebra_advertise_all_vni(bgp, 0);
/* Deregister for router-id, interfaces, redistributed routes. */ /* Deregister for router-id, interfaces, redistributed routes. */
zclient_send_dereg_requests(bgp_zclient, bgp->vrf_id); zclient_send_dereg_requests(zclient, bgp->vrf_id);
} }
void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer) void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer)
@ -2386,7 +2386,7 @@ void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer)
return; return;
/* Don't try to initiate if we're not connected to Zebra */ /* Don't try to initiate if we're not connected to Zebra */
if (bgp_zclient->sock < 0) if (zclient->sock < 0)
return; return;
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
@ -2398,7 +2398,7 @@ void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer)
* If we don't have an ifp pointer, call function to find the * If we don't have an ifp pointer, call function to find the
* ifps for a numbered enhe peer to turn RAs on. * ifps for a numbered enhe peer to turn RAs on.
*/ */
peer->ifp ? zclient_send_interface_radv_req(bgp_zclient, bgp->vrf_id, peer->ifp ? zclient_send_interface_radv_req(zclient, bgp->vrf_id,
peer->ifp, 1, ra_interval) peer->ifp, 1, ra_interval)
: bgp_nht_reg_enhe_cap_intfs(peer); : bgp_nht_reg_enhe_cap_intfs(peer);
} }
@ -2406,7 +2406,7 @@ void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer)
void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer) void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer)
{ {
/* Don't try to terminate if we're not connected to Zebra */ /* Don't try to terminate if we're not connected to Zebra */
if (bgp_zclient->sock < 0) if (zclient->sock < 0)
return; return;
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
@ -2418,7 +2418,7 @@ void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer)
* If we don't have an ifp pointer, call function to find the * If we don't have an ifp pointer, call function to find the
* ifps for a numbered enhe peer to turn RAs off. * ifps for a numbered enhe peer to turn RAs off.
*/ */
peer->ifp ? zclient_send_interface_radv_req(bgp_zclient, bgp->vrf_id, peer->ifp ? zclient_send_interface_radv_req(zclient, bgp->vrf_id,
peer->ifp, 0, 0) peer->ifp, 0, 0)
: bgp_nht_dereg_enhe_cap_intfs(peer); : bgp_nht_dereg_enhe_cap_intfs(peer);
} }
@ -2428,7 +2428,7 @@ int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise, vni_t vni)
struct stream *s = NULL; struct stream *s = NULL;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -2440,7 +2440,7 @@ int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise, vni_t vni)
return 0; return 0;
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_ADVERTISE_SUBNET, bgp->vrf_id); zclient_create_header(s, ZEBRA_ADVERTISE_SUBNET, bgp->vrf_id);
@ -2448,7 +2448,7 @@ int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise, vni_t vni)
stream_put3(s, vni); stream_put3(s, vni);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise, vni_t vni) int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise, vni_t vni)
@ -2456,14 +2456,14 @@ int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise, vni_t vni)
struct stream *s = NULL; struct stream *s = NULL;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp))
return 0; return 0;
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_ADVERTISE_SVI_MACIP, bgp->vrf_id); zclient_create_header(s, ZEBRA_ADVERTISE_SVI_MACIP, bgp->vrf_id);
@ -2471,7 +2471,7 @@ int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise, vni_t vni)
stream_putl(s, vni); stream_putl(s, vni);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni) int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni)
@ -2479,7 +2479,7 @@ int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni)
struct stream *s = NULL; struct stream *s = NULL;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -2491,7 +2491,7 @@ int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni)
return 0; return 0;
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_ADVERTISE_DEFAULT_GW, bgp->vrf_id); zclient_create_header(s, ZEBRA_ADVERTISE_DEFAULT_GW, bgp->vrf_id);
@ -2499,7 +2499,7 @@ int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni)
stream_putl(s, vni); stream_putl(s, vni);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
int bgp_zebra_vxlan_flood_control(struct bgp *bgp, int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
@ -2508,7 +2508,7 @@ int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
struct stream *s; struct stream *s;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -2520,14 +2520,14 @@ int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
return 0; return 0;
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_VXLAN_FLOOD_CONTROL, bgp->vrf_id); zclient_create_header(s, ZEBRA_VXLAN_FLOOD_CONTROL, bgp->vrf_id);
stream_putc(s, flood_ctrl); stream_putc(s, flood_ctrl);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise) int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise)
@ -2535,14 +2535,14 @@ int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise)
struct stream *s; struct stream *s;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp))
return 0; return 0;
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_ADVERTISE_ALL_VNI, bgp->vrf_id); zclient_create_header(s, ZEBRA_ADVERTISE_ALL_VNI, bgp->vrf_id);
@ -2553,7 +2553,7 @@ int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise)
stream_putc(s, bgp->vxlan_flood_ctrl); stream_putc(s, bgp->vxlan_flood_ctrl);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
int bgp_zebra_dup_addr_detection(struct bgp *bgp) int bgp_zebra_dup_addr_detection(struct bgp *bgp)
@ -2561,7 +2561,7 @@ int bgp_zebra_dup_addr_detection(struct bgp *bgp)
struct stream *s; struct stream *s;
/* Check socket. */ /* Check socket. */
if (!bgp_zclient || bgp_zclient->sock < 0) if (!zclient || zclient->sock < 0)
return 0; return 0;
/* Don't try to register if Zebra doesn't know of this instance. */ /* Don't try to register if Zebra doesn't know of this instance. */
@ -2578,7 +2578,7 @@ int bgp_zebra_dup_addr_detection(struct bgp *bgp)
"enable" : "disable", "enable" : "disable",
bgp->evpn_info->dad_freeze_time); bgp->evpn_info->dad_freeze_time);
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, ZEBRA_DUPLICATE_ADDR_DETECTION, zclient_create_header(s, ZEBRA_DUPLICATE_ADDR_DETECTION,
bgp->vrf_id); bgp->vrf_id);
@ -2589,7 +2589,7 @@ int bgp_zebra_dup_addr_detection(struct bgp *bgp)
stream_putl(s, bgp->evpn_info->dad_freeze_time); stream_putl(s, bgp->evpn_info->dad_freeze_time);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
return zclient_send_message(bgp_zclient); return zclient_send_message(zclient);
} }
static int rule_notify_owner(ZAPI_CALLBACK_ARGS) static int rule_notify_owner(ZAPI_CALLBACK_ARGS)
@ -3965,7 +3965,7 @@ void bgp_if_init(void)
static bool bgp_zebra_label_manager_ready(void) static bool bgp_zebra_label_manager_ready(void)
{ {
return (bgp_zclient_sync->sock > 0); return (zclient_sync->sock > 0);
} }
static void bgp_start_label_manager(struct event *start) static void bgp_start_label_manager(struct event *start)
@ -3979,29 +3979,29 @@ static void bgp_start_label_manager(struct event *start)
static bool bgp_zebra_label_manager_connect(void) static bool bgp_zebra_label_manager_connect(void)
{ {
/* Connect to label manager. */ /* Connect to label manager. */
if (zclient_socket_connect(bgp_zclient_sync) < 0) { if (zclient_socket_connect(zclient_sync) < 0) {
zlog_warn("%s: failed connecting synchronous zclient!", zlog_warn("%s: failed connecting synchronous zclient!",
__func__); __func__);
return false; return false;
} }
/* make socket non-blocking */ /* make socket non-blocking */
set_nonblocking(bgp_zclient_sync->sock); set_nonblocking(zclient_sync->sock);
/* Send hello to notify zebra this is a synchronous client */ /* Send hello to notify zebra this is a synchronous client */
if (zclient_send_hello(bgp_zclient_sync) == ZCLIENT_SEND_FAILURE) { if (zclient_send_hello(zclient_sync) == ZCLIENT_SEND_FAILURE) {
zlog_warn("%s: failed sending hello for synchronous zclient!", zlog_warn("%s: failed sending hello for synchronous zclient!",
__func__); __func__);
close(bgp_zclient_sync->sock); close(zclient_sync->sock);
bgp_zclient_sync->sock = -1; zclient_sync->sock = -1;
return false; return false;
} }
/* Connect to label manager */ /* Connect to label manager */
if (lm_label_manager_connect(bgp_zclient_sync, 0) != 0) { if (lm_label_manager_connect(zclient_sync, 0) != 0) {
zlog_warn("%s: failed connecting to label manager!", __func__); zlog_warn("%s: failed connecting to label manager!", __func__);
if (bgp_zclient_sync->sock > 0) { if (zclient_sync->sock > 0) {
close(bgp_zclient_sync->sock); close(zclient_sync->sock);
bgp_zclient_sync->sock = -1; zclient_sync->sock = -1;
} }
return false; return false;
} }
@ -4030,22 +4030,22 @@ void bgp_zebra_init(struct event_loop *master, unsigned short instance)
hook_register_prio(if_unreal, 0, bgp_ifp_destroy); hook_register_prio(if_unreal, 0, bgp_ifp_destroy);
/* Set default values. */ /* Set default values. */
bgp_zclient = zclient_new(master, &zclient_options_default, bgp_handlers, zclient = zclient_new(master, &zclient_options_default, bgp_handlers,
array_size(bgp_handlers)); array_size(bgp_handlers));
zclient_init(bgp_zclient, ZEBRA_ROUTE_BGP, 0, &bgpd_privs); zclient_init(zclient, ZEBRA_ROUTE_BGP, 0, &bgpd_privs);
bgp_zclient->zebra_buffer_write_ready = bgp_zebra_buffer_write_ready; zclient->zebra_buffer_write_ready = bgp_zebra_buffer_write_ready;
bgp_zclient->zebra_connected = bgp_zebra_connected; zclient->zebra_connected = bgp_zebra_connected;
bgp_zclient->zebra_capabilities = bgp_zebra_capabilities; zclient->zebra_capabilities = bgp_zebra_capabilities;
bgp_zclient->nexthop_update = bgp_nexthop_update; zclient->nexthop_update = bgp_nexthop_update;
bgp_zclient->instance = instance; zclient->instance = instance;
/* Initialize special zclient for synchronous message exchanges. */ /* Initialize special zclient for synchronous message exchanges. */
bgp_zclient_sync = zclient_new(master, &zclient_options_sync, NULL, 0); zclient_sync = zclient_new(master, &zclient_options_sync, NULL, 0);
bgp_zclient_sync->sock = -1; zclient_sync->sock = -1;
bgp_zclient_sync->redist_default = ZEBRA_ROUTE_BGP; zclient_sync->redist_default = ZEBRA_ROUTE_BGP;
bgp_zclient_sync->instance = instance; zclient_sync->instance = instance;
bgp_zclient_sync->session_id = 1; zclient_sync->session_id = 1;
bgp_zclient_sync->privs = &bgpd_privs; zclient_sync->privs = &bgpd_privs;
if (!bgp_zebra_label_manager_ready()) if (!bgp_zebra_label_manager_ready())
event_add_timer(master, bgp_start_label_manager, NULL, 1, event_add_timer(master, bgp_start_label_manager, NULL, 1,
@ -4054,17 +4054,17 @@ void bgp_zebra_init(struct event_loop *master, unsigned short instance)
void bgp_zebra_destroy(void) void bgp_zebra_destroy(void)
{ {
if (bgp_zclient == NULL) if (zclient == NULL)
return; return;
zclient_stop(bgp_zclient); zclient_stop(zclient);
zclient_free(bgp_zclient); zclient_free(zclient);
bgp_zclient = NULL; zclient = NULL;
if (bgp_zclient_sync == NULL) if (zclient_sync == NULL)
return; return;
zclient_stop(bgp_zclient_sync); zclient_stop(zclient_sync);
zclient_free(bgp_zclient_sync); zclient_free(zclient_sync);
bgp_zclient_sync = NULL; zclient_sync = NULL;
} }
int bgp_zebra_num_connects(void) int bgp_zebra_num_connects(void)
@ -4090,7 +4090,7 @@ void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra,
zlog_debug("%s: table %d fwmark %d %d", __func__, zlog_debug("%s: table %d fwmark %d %d", __func__,
pbra->table_id, pbra->fwmark, install); pbra->table_id, pbra->fwmark, install);
} }
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, zclient_create_header(s,
@ -4099,7 +4099,7 @@ void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra,
bgp_encode_pbr_rule_action(s, pbra, pbr); bgp_encode_pbr_rule_action(s, pbra, pbr);
if ((zclient_send_message(bgp_zclient) != ZCLIENT_SEND_FAILURE) if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE)
&& install) { && install) {
if (!pbr) if (!pbr)
pbra->install_in_progress = true; pbra->install_in_progress = true;
@ -4118,7 +4118,7 @@ void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim, bool install)
zlog_debug("%s: name %s type %d %d, ID %u", __func__, zlog_debug("%s: name %s type %d %d, ID %u", __func__,
pbrim->ipset_name, pbrim->type, install, pbrim->ipset_name, pbrim->type, install,
pbrim->unique); pbrim->unique);
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, zclient_create_header(s,
@ -4131,7 +4131,7 @@ void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim, bool install)
bgp_encode_pbr_ipset_match(s, pbrim); bgp_encode_pbr_ipset_match(s, pbrim);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
if ((zclient_send_message(bgp_zclient) != ZCLIENT_SEND_FAILURE) && install) if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE) && install)
pbrim->install_in_progress = true; pbrim->install_in_progress = true;
} }
@ -4146,7 +4146,7 @@ void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
zlog_debug("%s: name %s %d %d, ID %u", __func__, zlog_debug("%s: name %s %d %d, ID %u", __func__,
pbrime->backpointer->ipset_name, pbrime->unique, pbrime->backpointer->ipset_name, pbrime->unique,
install, pbrime->unique); install, pbrime->unique);
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, zclient_create_header(s,
@ -4159,7 +4159,7 @@ void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
bgp_encode_pbr_ipset_entry_match(s, pbrime); bgp_encode_pbr_ipset_entry_match(s, pbrime);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
if ((zclient_send_message(bgp_zclient) != ZCLIENT_SEND_FAILURE) && install) if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE) && install)
pbrime->install_in_progress = true; pbrime->install_in_progress = true;
} }
@ -4218,7 +4218,7 @@ void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
zlog_debug("%s: name %s type %d mark %d %d, ID %u", __func__, zlog_debug("%s: name %s type %d mark %d %d, ID %u", __func__,
pbm->ipset_name, pbm->type, pba->fwmark, install, pbm->ipset_name, pbm->type, pba->fwmark, install,
pbm->unique2); pbm->unique2);
s = bgp_zclient->obuf; s = zclient->obuf;
stream_reset(s); stream_reset(s);
zclient_create_header(s, zclient_create_header(s,
@ -4232,7 +4232,7 @@ void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
if (nb_interface) if (nb_interface)
bgp_encode_pbr_interface_list(pba->bgp, s, pbm->family); bgp_encode_pbr_interface_list(pba->bgp, s, pbm->family);
stream_putw_at(s, 0, stream_get_endp(s)); stream_putw_at(s, 0, stream_get_endp(s));
ret = zclient_send_message(bgp_zclient); ret = zclient_send_message(zclient);
if (install) { if (install) {
if (ret != ZCLIENT_SEND_FAILURE) if (ret != ZCLIENT_SEND_FAILURE)
pba->refcnt++; pba->refcnt++;
@ -4319,7 +4319,7 @@ void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
} }
zclient_route_send(announce ? ZEBRA_ROUTE_ADD : ZEBRA_ROUTE_DELETE, zclient_route_send(announce ? ZEBRA_ROUTE_ADD : ZEBRA_ROUTE_DELETE,
bgp_zclient, &api); zclient, &api);
} }
/* Send capabilities to RIB */ /* Send capabilities to RIB */
@ -4332,7 +4332,7 @@ int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable)
zlog_debug("%s: Sending %sable for %s", __func__, zlog_debug("%s: Sending %sable for %s", __func__,
disable ? "dis" : "en", bgp->name_pretty); disable ? "dis" : "en", bgp->name_pretty);
if (bgp_zclient == NULL) { if (zclient == NULL) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s zclient invalid", __func__, zlog_debug("%s: %s zclient invalid", __func__,
bgp->name_pretty); bgp->name_pretty);
@ -4340,7 +4340,7 @@ int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable)
} }
/* Check if the client is connected */ /* Check if the client is connected */
if ((bgp_zclient->sock < 0) || (bgp_zclient->t_connect)) { if ((zclient->sock < 0) || (zclient->t_connect)) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s client not connected", __func__, zlog_debug("%s: %s client not connected", __func__,
bgp->name_pretty); bgp->name_pretty);
@ -4365,7 +4365,7 @@ int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable)
api.vrf_id = bgp->vrf_id; api.vrf_id = bgp->vrf_id;
} }
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, bgp_zclient, &api) if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
== ZCLIENT_SEND_FAILURE) { == ZCLIENT_SEND_FAILURE) {
zlog_err("%s(%d): Error sending GR capability to zebra", zlog_err("%s(%d): Error sending GR capability to zebra",
bgp->name_pretty, bgp->vrf_id); bgp->name_pretty, bgp->vrf_id);
@ -4394,7 +4394,7 @@ int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
bgp->name_pretty, afi, safi, bgp->name_pretty, afi, safi,
zserv_gr_client_cap_string(type)); zserv_gr_client_cap_string(type));
if (bgp_zclient == NULL) { if (zclient == NULL) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s zclient == NULL, invalid", __func__, zlog_debug("%s: %s zclient == NULL, invalid", __func__,
bgp->name_pretty); bgp->name_pretty);
@ -4402,7 +4402,7 @@ int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
} }
/* Check if the client is connected */ /* Check if the client is connected */
if ((bgp_zclient->sock < 0) || (bgp_zclient->t_connect)) { if ((zclient->sock < 0) || (zclient->t_connect)) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s client not connected", __func__, zlog_debug("%s: %s client not connected", __func__,
bgp->name_pretty); bgp->name_pretty);
@ -4414,7 +4414,7 @@ int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
api.vrf_id = bgp->vrf_id; api.vrf_id = bgp->vrf_id;
api.cap = type; api.cap = type;
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, bgp_zclient, &api) if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
== ZCLIENT_SEND_FAILURE) { == ZCLIENT_SEND_FAILURE) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s error sending capability", __func__, zlog_debug("%s: %s error sending capability", __func__,
@ -4434,14 +4434,14 @@ int bgp_zebra_stale_timer_update(struct bgp *bgp)
zlog_debug("%s: %s Timer Update to %u", __func__, zlog_debug("%s: %s Timer Update to %u", __func__,
bgp->name_pretty, bgp->rib_stale_time); bgp->name_pretty, bgp->rib_stale_time);
if (bgp_zclient == NULL) { if (zclient == NULL) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("zclient invalid"); zlog_debug("zclient invalid");
return BGP_GR_FAILURE; return BGP_GR_FAILURE;
} }
/* Check if the client is connected */ /* Check if the client is connected */
if ((bgp_zclient->sock < 0) || (bgp_zclient->t_connect)) { if ((zclient->sock < 0) || (zclient->t_connect)) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s client not connected", __func__, zlog_debug("%s: %s client not connected", __func__,
bgp->name_pretty); bgp->name_pretty);
@ -4452,7 +4452,7 @@ int bgp_zebra_stale_timer_update(struct bgp *bgp)
api.cap = ZEBRA_CLIENT_RIB_STALE_TIME; api.cap = ZEBRA_CLIENT_RIB_STALE_TIME;
api.stale_removal_time = bgp->rib_stale_time; api.stale_removal_time = bgp->rib_stale_time;
api.vrf_id = bgp->vrf_id; api.vrf_id = bgp->vrf_id;
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, bgp_zclient, &api) if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
== ZCLIENT_SEND_FAILURE) { == ZCLIENT_SEND_FAILURE) {
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: %s error sending capability", __func__, zlog_debug("%s: %s error sending capability", __func__,
@ -4465,12 +4465,12 @@ int bgp_zebra_stale_timer_update(struct bgp *bgp)
int bgp_zebra_srv6_manager_get_locator_chunk(const char *name) int bgp_zebra_srv6_manager_get_locator_chunk(const char *name)
{ {
return srv6_manager_get_locator_chunk(bgp_zclient, name); return srv6_manager_get_locator_chunk(zclient, name);
} }
int bgp_zebra_srv6_manager_release_locator_chunk(const char *name) int bgp_zebra_srv6_manager_release_locator_chunk(const char *name)
{ {
return srv6_manager_release_locator_chunk(bgp_zclient, name); return srv6_manager_release_locator_chunk(zclient, name);
} }
/** /**
@ -4488,7 +4488,7 @@ int bgp_zebra_srv6_manager_get_locator(const char *name)
* Send the Get Locator request to the SRv6 Manager and return the * Send the Get Locator request to the SRv6 Manager and return the
* result * result
*/ */
return srv6_manager_get_locator(bgp_zclient, name); return srv6_manager_get_locator(zclient, name);
} }
/** /**
@ -4520,7 +4520,7 @@ bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx,
* Send the Get SRv6 SID request to the SRv6 Manager and check the * Send the Get SRv6 SID request to the SRv6 Manager and check the
* result * result
*/ */
ret = srv6_manager_get_sid(bgp_zclient, ctx, sid_value, locator_name, ret = srv6_manager_get_sid(zclient, ctx, sid_value, locator_name,
sid_func); sid_func);
if (ret < 0) { if (ret < 0) {
zlog_warn("%s: error getting SRv6 SID!", __func__); zlog_warn("%s: error getting SRv6 SID!", __func__);
@ -4549,7 +4549,7 @@ void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx)
* Send the Release SRv6 SID request to the SRv6 Manager and check the * Send the Release SRv6 SID request to the SRv6 Manager and check the
* result * result
*/ */
ret = srv6_manager_release_sid(bgp_zclient, ctx); ret = srv6_manager_release_sid(zclient, ctx);
if (ret < 0) { if (ret < 0) {
zlog_warn("%s: error releasing SRv6 SID!", __func__); zlog_warn("%s: error releasing SRv6 SID!", __func__);
return; return;
@ -4592,7 +4592,7 @@ void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
znh->labels[i] = out_labels[i]; znh->labels[i] = out_labels[i];
} }
/* vrf_id is DEFAULT_VRF */ /* vrf_id is DEFAULT_VRF */
zebra_send_mpls_labels(bgp_zclient, cmd, &zl); zebra_send_mpls_labels(zclient, cmd, &zl);
} }
bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size, bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size,
@ -4601,10 +4601,10 @@ bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size,
int ret; int ret;
uint32_t start, end; uint32_t start, end;
if (!bgp_zclient_sync || !bgp_zebra_label_manager_ready()) if (!zclient_sync || !bgp_zebra_label_manager_ready())
return false; return false;
ret = lm_get_label_chunk(bgp_zclient_sync, 0, base, chunk_size, &start, ret = lm_get_label_chunk(zclient_sync, 0, base, chunk_size, &start,
&end); &end);
if (ret < 0) { if (ret < 0) {
zlog_warn("%s: error getting label range!", __func__); zlog_warn("%s: error getting label range!", __func__);
@ -4633,10 +4633,10 @@ void bgp_zebra_release_label_range(uint32_t start, uint32_t end)
{ {
int ret; int ret;
if (!bgp_zclient_sync || !bgp_zebra_label_manager_ready()) if (!zclient_sync || !bgp_zebra_label_manager_ready())
return; return;
ret = lm_release_label_chunk(bgp_zclient_sync, start, end); ret = lm_release_label_chunk(zclient_sync, start, end);
if (ret < 0) if (ret < 0)
zlog_warn("%s: error releasing label range!", __func__); zlog_warn("%s: error releasing label range!", __func__);
} }

View file

@ -8,9 +8,6 @@
#include "vxlan.h" #include "vxlan.h"
/* The global zapi session handle */
extern struct zclient *bgp_zclient;
/* Macro to update bgp_original based on bpg_path_info */ /* Macro to update bgp_original based on bpg_path_info */
#define BGP_ORIGINAL_UPDATE(_bgp_orig, _mpinfo, _bgp) \ #define BGP_ORIGINAL_UPDATE(_bgp_orig, _mpinfo, _bgp) \
((_mpinfo->extra && _mpinfo->extra->vrfleak && \ ((_mpinfo->extra && _mpinfo->extra->vrfleak && \

View file

@ -88,22 +88,6 @@ DEFINE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp));
DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp)); DEFINE_HOOK(bgp_instance_state, (struct bgp *bgp), (bgp));
DEFINE_HOOK(bgp_routerid_update, (struct bgp *bgp, bool withdraw), (bgp, withdraw)); DEFINE_HOOK(bgp_routerid_update, (struct bgp *bgp, bool withdraw), (bgp, withdraw));
/* Peers with connection error/failure, per bgp instance */
DECLARE_DLIST(bgp_peer_conn_errlist, struct peer_connection, conn_err_link);
/* List of info about peers that are being cleared from BGP RIBs in a batch */
DECLARE_DLIST(bgp_clearing_info, struct bgp_clearing_info, link);
/* List of dests that need to be processed in a clearing batch */
DECLARE_LIST(bgp_clearing_destlist, struct bgp_clearing_dest, link);
/* Hash of peers in clearing info object */
static int peer_clearing_hash_cmp(const struct peer *p1, const struct peer *p2);
static uint32_t peer_clearing_hashfn(const struct peer *p1);
DECLARE_HASH(bgp_clearing_hash, struct peer, clear_hash_link,
peer_clearing_hash_cmp, peer_clearing_hashfn);
/* BGP process wide configuration. */ /* BGP process wide configuration. */
static struct bgp_master bgp_master; static struct bgp_master bgp_master;
@ -121,7 +105,7 @@ unsigned int bgp_suppress_fib_count;
static void bgp_if_finish(struct bgp *bgp); static void bgp_if_finish(struct bgp *bgp);
static void peer_drop_dynamic_neighbor(struct peer *peer); static void peer_drop_dynamic_neighbor(struct peer *peer);
extern struct zclient *bgp_zclient; extern struct zclient *zclient;
/* handle main socket creation or deletion */ /* handle main socket creation or deletion */
static int bgp_check_main_socket(bool create, struct bgp *bgp) static int bgp_check_main_socket(bool create, struct bgp *bgp)
@ -447,9 +431,9 @@ void bm_wait_for_fib_set(bool set)
send_msg = true; send_msg = true;
} }
if (send_msg && bgp_zclient) if (send_msg && zclient)
zebra_route_notify_send(ZEBRA_ROUTE_NOTIFY_REQUEST, zebra_route_notify_send(ZEBRA_ROUTE_NOTIFY_REQUEST,
bgp_zclient, set); zclient, set);
/* /*
* If this is configed at a time when peers are already set * If this is configed at a time when peers are already set
@ -507,9 +491,9 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set)
if (BGP_DEBUG(zebra, ZEBRA)) if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("Sending ZEBRA_ROUTE_NOTIFY_REQUEST"); zlog_debug("Sending ZEBRA_ROUTE_NOTIFY_REQUEST");
if (bgp_zclient) if (zclient)
zebra_route_notify_send(ZEBRA_ROUTE_NOTIFY_REQUEST, zebra_route_notify_send(ZEBRA_ROUTE_NOTIFY_REQUEST,
bgp_zclient, set); zclient, set);
} }
/* /*
@ -1196,22 +1180,6 @@ void bgp_peer_connection_free(struct peer_connection **connection)
connection = NULL; connection = NULL;
} }
const char *bgp_peer_get_connection_direction(struct peer_connection *connection)
{
switch (connection->dir) {
case UNKNOWN:
return "Unknown";
case CONNECTION_INCOMING:
return "Incoming";
case CONNECTION_OUTGOING:
return "Outgoing";
case ESTABLISHED:
return "Established";
}
assert(!"DEV Escape: Expected switch to take care of this state");
}
struct peer_connection *bgp_peer_connection_new(struct peer *peer) struct peer_connection *bgp_peer_connection_new(struct peer *peer)
{ {
struct peer_connection *connection; struct peer_connection *connection;
@ -1559,7 +1527,6 @@ struct peer *peer_new(struct bgp *bgp)
/* Create buffers. */ /* Create buffers. */
peer->connection = bgp_peer_connection_new(peer); peer->connection = bgp_peer_connection_new(peer);
peer->connection->dir = CONNECTION_OUTGOING;
/* Set default value. */ /* Set default value. */
peer->v_start = BGP_INIT_START_TIMER; peer->v_start = BGP_INIT_START_TIMER;
@ -1976,7 +1943,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
enum peer_asn_type as_type, struct peer_group *group, enum peer_asn_type as_type, struct peer_group *group,
bool config_node, const char *as_str) bool config_node, const char *as_str)
{ {
enum bgp_peer_active active; int active;
struct peer *peer; struct peer *peer;
char buf[SU_ADDRSTRLEN]; char buf[SU_ADDRSTRLEN];
afi_t afi; afi_t afi;
@ -2030,7 +1997,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
} }
active = peer_active(peer->connection); active = peer_active(peer->connection);
if (active != BGP_PEER_ACTIVE) { if (!active) {
if (peer->connection->su.sa.sa_family == AF_UNSPEC) if (peer->connection->su.sa.sa_family == AF_UNSPEC)
peer->last_reset = PEER_DOWN_NBR_ADDR; peer->last_reset = PEER_DOWN_NBR_ADDR;
else else
@ -2062,7 +2029,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
if (bgp->autoshutdown) if (bgp->autoshutdown)
peer_flag_set(peer, PEER_FLAG_SHUTDOWN); peer_flag_set(peer, PEER_FLAG_SHUTDOWN);
/* Set up peer's events and timers. */ /* Set up peer's events and timers. */
else if (active != BGP_PEER_ACTIVE && peer_active(peer->connection) == BGP_PEER_ACTIVE) { else if (!active && peer_active(peer->connection)) {
if (peer->last_reset == PEER_DOWN_NOAFI_ACTIVATED) if (peer->last_reset == PEER_DOWN_NOAFI_ACTIVATED)
peer->last_reset = 0; peer->last_reset = 0;
bgp_timer_set(peer->connection); bgp_timer_set(peer->connection);
@ -2435,7 +2402,7 @@ static void peer_group2peer_config_copy_af(struct peer_group *group,
static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
{ {
enum bgp_peer_active active; int active;
struct peer *other; struct peer *other;
if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
@ -2463,7 +2430,7 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi)
if (peer->group) if (peer->group)
peer_group2peer_config_copy_af(peer->group, peer, afi, safi); peer_group2peer_config_copy_af(peer->group, peer, afi, safi);
if (active != BGP_PEER_ACTIVE && peer_active(peer->connection) == BGP_PEER_ACTIVE) { if (!active && peer_active(peer->connection)) {
bgp_timer_set(peer->connection); bgp_timer_set(peer->connection);
} else { } else {
peer->last_reset = PEER_DOWN_AF_ACTIVATE; peer->last_reset = PEER_DOWN_AF_ACTIVATE;
@ -2713,9 +2680,6 @@ int peer_delete(struct peer *peer)
assert(peer->connection->status != Deleted); assert(peer->connection->status != Deleted);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s: peer %pBP", __func__, peer);
bgp = peer->bgp; bgp = peer->bgp;
accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER); accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
@ -2731,13 +2695,6 @@ int peer_delete(struct peer *peer)
PEER_THREAD_READS_ON)); PEER_THREAD_READS_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON)); assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
/* Ensure the peer is removed from the connection error list */
frr_with_mutex (&bgp->peer_errs_mtx) {
if (bgp_peer_conn_errlist_anywhere(peer->connection))
bgp_peer_conn_errlist_del(&bgp->peer_conn_errlist,
peer->connection);
}
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop(peer); peer_nsf_stop(peer);
@ -3417,7 +3374,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer,
} }
/* Set up peer's events and timers. */ /* Set up peer's events and timers. */
if (peer_active(peer->connection) == BGP_PEER_ACTIVE) if (peer_active(peer->connection))
bgp_timer_set(peer->connection); bgp_timer_set(peer->connection);
} }
@ -3589,7 +3546,6 @@ peer_init:
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent =
MPLS_LABEL_NONE; MPLS_LABEL_NONE;
if (!bgp->vpn_policy[afi].import_vrf)
bgp->vpn_policy[afi].import_vrf = list_new(); bgp->vpn_policy[afi].import_vrf = list_new();
bgp->vpn_policy[afi].import_vrf->del = bgp->vpn_policy[afi].import_vrf->del =
bgp_vrf_string_name_delete; bgp_vrf_string_name_delete;
@ -3608,7 +3564,7 @@ peer_init:
bgp_mplsvpn_nh_label_bind_cache_init(&bgp->mplsvpn_nh_label_bind); bgp_mplsvpn_nh_label_bind_cache_init(&bgp->mplsvpn_nh_label_bind);
if (name && !bgp->name) if (name)
bgp->name = XSTRDUP(MTYPE_BGP_NAME, name); bgp->name = XSTRDUP(MTYPE_BGP_NAME, name);
event_add_timer(bm->master, bgp_startup_timer_expire, bgp, event_add_timer(bm->master, bgp_startup_timer_expire, bgp,
@ -3664,11 +3620,6 @@ peer_init:
memset(&bgp->ebgprequirespolicywarning, 0, memset(&bgp->ebgprequirespolicywarning, 0,
sizeof(bgp->ebgprequirespolicywarning)); sizeof(bgp->ebgprequirespolicywarning));
/* Init peer connection error info */
pthread_mutex_init(&bgp->peer_errs_mtx, NULL);
bgp_peer_conn_errlist_init(&bgp->peer_conn_errlist);
bgp_clearing_info_init(&bgp->clearing_list);
return bgp; return bgp;
} }
@ -3831,7 +3782,6 @@ int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, const char *as_pr
hidden); hidden);
UNSET_FLAG(bgp->flags, UNSET_FLAG(bgp->flags,
BGP_FLAG_INSTANCE_HIDDEN); BGP_FLAG_INSTANCE_HIDDEN);
UNSET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS);
} else { } else {
bgp->as = *as; bgp->as = *as;
if (force_config == false) if (force_config == false)
@ -3929,16 +3879,16 @@ static void bgp_zclient_set_redist(afi_t afi, int type, unsigned short instance,
{ {
if (instance) { if (instance) {
if (set) if (set)
redist_add_instance(&bgp_zclient->mi_redist[afi][type], redist_add_instance(&zclient->mi_redist[afi][type],
instance); instance);
else else
redist_del_instance(&bgp_zclient->mi_redist[afi][type], redist_del_instance(&zclient->mi_redist[afi][type],
instance); instance);
} else { } else {
if (set) if (set)
vrf_bitmap_set(&bgp_zclient->redist[afi][type], vrf_id); vrf_bitmap_set(&zclient->redist[afi][type], vrf_id);
else else
vrf_bitmap_unset(&bgp_zclient->redist[afi][type], vrf_id); vrf_bitmap_unset(&zclient->redist[afi][type], vrf_id);
} }
} }
@ -4052,13 +4002,11 @@ int bgp_delete(struct bgp *bgp)
struct bgp *bgp_to_proc = NULL; struct bgp *bgp_to_proc = NULL;
struct bgp *bgp_to_proc_next = NULL; struct bgp *bgp_to_proc_next = NULL;
struct bgp *bgp_default = bgp_get_default(); struct bgp *bgp_default = bgp_get_default();
struct bgp_clearing_info *cinfo;
struct peer_connection *connection;
assert(bgp); assert(bgp);
/* /*
* Iterate the pending dest list and remove all the dest pertaining to * Iterate the pending dest list and remove all the dest pertaininig to
* the bgp under delete. * the bgp under delete.
*/ */
b_ann_cnt = zebra_announce_count(&bm->zebra_announce_head); b_ann_cnt = zebra_announce_count(&bm->zebra_announce_head);
@ -4104,10 +4052,6 @@ int bgp_delete(struct bgp *bgp)
a_l3_cnt); a_l3_cnt);
} }
/* Cleanup for peer connection batching */
while ((cinfo = bgp_clearing_info_first(&bgp->clearing_list)) != NULL)
bgp_clearing_batch_completed(cinfo);
bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL); bgp_soft_reconfig_table_task_cancel(bgp, NULL, NULL);
/* make sure we withdraw any exported routes */ /* make sure we withdraw any exported routes */
@ -4154,7 +4098,6 @@ int bgp_delete(struct bgp *bgp)
EVENT_OFF(bgp->t_maxmed_onstartup); EVENT_OFF(bgp->t_maxmed_onstartup);
EVENT_OFF(bgp->t_update_delay); EVENT_OFF(bgp->t_update_delay);
EVENT_OFF(bgp->t_establish_wait); EVENT_OFF(bgp->t_establish_wait);
EVENT_OFF(bgp->clearing_end);
/* Set flag indicating bgp instance delete in progress */ /* Set flag indicating bgp instance delete in progress */
SET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS); SET_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS);
@ -4233,56 +4176,26 @@ int bgp_delete(struct bgp *bgp)
if (i != ZEBRA_ROUTE_BGP) if (i != ZEBRA_ROUTE_BGP)
bgp_redistribute_unset(bgp, afi, i, 0); bgp_redistribute_unset(bgp, afi, i, 0);
/* Clear list of peers with connection errors - each
* peer will need to check again, in case the io pthread is racing
* with us, but this batch cleanup should make the per-peer check
* cheaper.
*/
frr_with_mutex (&bgp->peer_errs_mtx) {
do {
connection = bgp_peer_conn_errlist_pop(
&bgp->peer_conn_errlist);
} while (connection != NULL);
}
/* Free peers and peer-groups. */ /* Free peers and peer-groups. */
for (ALL_LIST_ELEMENTS(bgp->group, node, next, group)) for (ALL_LIST_ELEMENTS(bgp->group, node, next, group))
peer_group_delete(group); peer_group_delete(group);
while (listcount(bgp->peer)) { while (listcount(bgp->peer)) {
peer = listnode_head(bgp->peer); peer = listnode_head(bgp->peer);
if (peer->ifp || CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE))
bgp_zebra_terminate_radv(peer->bgp, peer);
if (BGP_PEER_GRACEFUL_RESTART_CAPABLE(peer)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP configured Graceful-Restart, skipping unconfig notification",
peer);
peer_delete(peer);
} else {
peer_notify_unconfig(peer->connection);
peer_delete(peer); peer_delete(peer);
} }
}
if (bgp->peer_self && (!IS_BGP_INSTANCE_HIDDEN(bgp) || bm->terminating)) { if (bgp->peer_self && !IS_BGP_INSTANCE_HIDDEN(bgp)) {
peer_delete(bgp->peer_self); peer_delete(bgp->peer_self);
bgp->peer_self = NULL; bgp->peer_self = NULL;
} }
update_bgp_group_free(bgp); update_bgp_group_free(bgp);
/* Cancel peer connection errors event */
EVENT_OFF(bgp->t_conn_errors);
/* Cleanup for peer connection batching */
while ((cinfo = bgp_clearing_info_pop(&bgp->clearing_list)) != NULL)
bgp_clearing_batch_completed(cinfo);
/* TODO - Other memory may need to be freed - e.g., NHT */ /* TODO - Other memory may need to be freed - e.g., NHT */
#ifdef ENABLE_BGP_VNC #ifdef ENABLE_BGP_VNC
if (!IS_BGP_INSTANCE_HIDDEN(bgp) || bm->terminating) if (!IS_BGP_INSTANCE_HIDDEN(bgp))
rfapi_delete(bgp); rfapi_delete(bgp);
#endif #endif
@ -4290,7 +4203,8 @@ int bgp_delete(struct bgp *bgp)
FOREACH_AFI_SAFI (afi, safi) { FOREACH_AFI_SAFI (afi, safi) {
struct bgp_aggregate *aggregate = NULL; struct bgp_aggregate *aggregate = NULL;
for (dest = bgp_table_top(bgp->aggregate[afi][safi]); for (struct bgp_dest *dest =
bgp_table_top(bgp->aggregate[afi][safi]);
dest; dest = bgp_route_next(dest)) { dest; dest = bgp_route_next(dest)) {
aggregate = bgp_dest_get_bgp_aggregate_info(dest); aggregate = bgp_dest_get_bgp_aggregate_info(dest);
if (aggregate == NULL) if (aggregate == NULL)
@ -4332,7 +4246,7 @@ int bgp_delete(struct bgp *bgp)
bgp_zebra_instance_deregister(bgp); bgp_zebra_instance_deregister(bgp);
} }
if (!IS_BGP_INSTANCE_HIDDEN(bgp) || bm->terminating) { if (!IS_BGP_INSTANCE_HIDDEN(bgp)) {
/* Remove visibility via the master list - /* Remove visibility via the master list -
* there may however still be routes to be processed * there may however still be routes to be processed
* still referencing the struct bgp. * still referencing the struct bgp.
@ -4344,7 +4258,7 @@ int bgp_delete(struct bgp *bgp)
vrf = bgp_vrf_lookup_by_instance_type(bgp); vrf = bgp_vrf_lookup_by_instance_type(bgp);
bgp_handle_socket(bgp, vrf, VRF_UNKNOWN, false); bgp_handle_socket(bgp, vrf, VRF_UNKNOWN, false);
if (vrf && (!IS_BGP_INSTANCE_HIDDEN(bgp) || bm->terminating)) if (vrf && !IS_BGP_INSTANCE_HIDDEN(bgp))
bgp_vrf_unlink(bgp, vrf); bgp_vrf_unlink(bgp, vrf);
/* Update EVPN VRF pointer */ /* Update EVPN VRF pointer */
@ -4355,7 +4269,7 @@ int bgp_delete(struct bgp *bgp)
bgp_set_evpn(bgp_get_default()); bgp_set_evpn(bgp_get_default());
} }
if (!IS_BGP_INSTANCE_HIDDEN(bgp) || bm->terminating) { if (!IS_BGP_INSTANCE_HIDDEN(bgp)) {
if (bgp->process_queue) if (bgp->process_queue)
work_queue_free_and_null(&bgp->process_queue); work_queue_free_and_null(&bgp->process_queue);
bgp_unlock(bgp); /* initial reference */ bgp_unlock(bgp); /* initial reference */
@ -4453,9 +4367,6 @@ void bgp_free(struct bgp *bgp)
bgp_srv6_cleanup(bgp); bgp_srv6_cleanup(bgp);
bgp_confederation_id_unset(bgp); bgp_confederation_id_unset(bgp);
bgp_peer_conn_errlist_init(&bgp->peer_conn_errlist);
pthread_mutex_destroy(&bgp->peer_errs_mtx);
for (int i = 0; i < bgp->confed_peers_cnt; i++) for (int i = 0; i < bgp->confed_peers_cnt; i++)
XFREE(MTYPE_BGP_NAME, bgp->confed_peers[i].as_pretty); XFREE(MTYPE_BGP_NAME, bgp->confed_peers[i].as_pretty);
@ -4781,16 +4692,16 @@ bool bgp_path_attribute_treat_as_withdraw(struct peer *peer, char *buf,
} }
/* If peer is configured at least one address family return 1. */ /* If peer is configured at least one address family return 1. */
enum bgp_peer_active peer_active(struct peer_connection *connection) bool peer_active(struct peer_connection *connection)
{ {
struct peer *peer = connection->peer; struct peer *peer = connection->peer;
if (BGP_CONNECTION_SU_UNSPEC(connection)) if (BGP_CONNECTION_SU_UNSPEC(connection))
return BGP_PEER_CONNECTION_UNSPECIFIED; return false;
if (peer->bfd_config) { if (peer->bfd_config) {
if (peer_established(connection) && bfd_session_is_down(peer->bfd_config->session)) if (bfd_session_is_down(peer->bfd_config->session))
return BGP_PEER_BFD_DOWN; return false;
} }
if (peer->afc[AFI_IP][SAFI_UNICAST] || peer->afc[AFI_IP][SAFI_MULTICAST] if (peer->afc[AFI_IP][SAFI_UNICAST] || peer->afc[AFI_IP][SAFI_MULTICAST]
@ -4804,9 +4715,8 @@ enum bgp_peer_active peer_active(struct peer_connection *connection)
|| peer->afc[AFI_IP6][SAFI_ENCAP] || peer->afc[AFI_IP6][SAFI_ENCAP]
|| peer->afc[AFI_IP6][SAFI_FLOWSPEC] || peer->afc[AFI_IP6][SAFI_FLOWSPEC]
|| peer->afc[AFI_L2VPN][SAFI_EVPN]) || peer->afc[AFI_L2VPN][SAFI_EVPN])
return BGP_PEER_ACTIVE; return true;
return false;
return BGP_PEER_AF_UNCONFIGURED;
} }
/* If peer is negotiated at least one address family return 1. */ /* If peer is negotiated at least one address family return 1. */
@ -6490,7 +6400,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
/* Skip peer-group mechanics for regular peers. */ /* Skip peer-group mechanics for regular peers. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
if (!peer_established(peer->connection)) { if (!peer_established(peer->connection)) {
if (peer_active(peer->connection) == BGP_PEER_ACTIVE) if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start); BGP_EVENT_ADD(peer->connection, BGP_Start);
} }
@ -6511,7 +6421,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect)
member->v_connect = connect; member->v_connect = connect;
if (!peer_established(member->connection)) { if (!peer_established(member->connection)) {
if (peer_active(member->connection) == BGP_PEER_ACTIVE) if (peer_active(member->connection))
BGP_EVENT_ADD(member->connection, BGP_Stop); BGP_EVENT_ADD(member->connection, BGP_Stop);
BGP_EVENT_ADD(member->connection, BGP_Start); BGP_EVENT_ADD(member->connection, BGP_Start);
} }
@ -6544,7 +6454,7 @@ int peer_timers_connect_unset(struct peer *peer)
/* Skip peer-group mechanics for regular peers. */ /* Skip peer-group mechanics for regular peers. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
if (!peer_established(peer->connection)) { if (!peer_established(peer->connection)) {
if (peer_active(peer->connection) == BGP_PEER_ACTIVE) if (peer_active(peer->connection))
BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start); BGP_EVENT_ADD(peer->connection, BGP_Start);
} }
@ -6565,7 +6475,7 @@ int peer_timers_connect_unset(struct peer *peer)
member->v_connect = peer->bgp->default_connect_retry; member->v_connect = peer->bgp->default_connect_retry;
if (!peer_established(member->connection)) { if (!peer_established(member->connection)) {
if (peer_active(member->connection) == BGP_PEER_ACTIVE) if (peer_active(member->connection))
BGP_EVENT_ADD(member->connection, BGP_Stop); BGP_EVENT_ADD(member->connection, BGP_Stop);
BGP_EVENT_ADD(member->connection, BGP_Start); BGP_EVENT_ADD(member->connection, BGP_Start);
} }
@ -8683,10 +8593,6 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm = &bgp_master; bm = &bgp_master;
/* Initialize the peer connection FIFO list */
peer_connection_fifo_init(&bm->connection_fifo);
pthread_mutex_init(&bm->peer_connection_mtx, NULL);
zebra_announce_init(&bm->zebra_announce_head); zebra_announce_init(&bm->zebra_announce_head);
zebra_l2_vni_init(&bm->zebra_l2_vni_head); zebra_l2_vni_init(&bm->zebra_l2_vni_head);
zebra_l3_vni_init(&bm->zebra_l3_vni_head); zebra_l3_vni_init(&bm->zebra_l3_vni_head);
@ -8716,11 +8622,6 @@ void bgp_master_init(struct event_loop *master, const int buffer_size,
bm->t_bgp_zebra_l2_vni = NULL; bm->t_bgp_zebra_l2_vni = NULL;
bm->t_bgp_zebra_l3_vni = NULL; bm->t_bgp_zebra_l3_vni = NULL;
bm->peer_clearing_batch_id = 1;
/* TODO -- make these configurable */
bm->peer_conn_errs_dequeue_limit = BGP_CONN_ERROR_DEQUEUE_MAX;
bm->peer_clearing_batch_max_dests = BGP_CLEARING_BATCH_MAX_DESTS;
bgp_mac_init(); bgp_mac_init();
/* init the rd id space. /* init the rd id space.
assign 0th index in the bitfield, assign 0th index in the bitfield,
@ -8853,8 +8754,7 @@ static int peer_unshut_after_cfg(struct bgp *bgp)
peer->host); peer->host);
peer->shut_during_cfg = false; peer->shut_during_cfg = false;
if (peer_active(peer->connection) == BGP_PEER_ACTIVE && if (peer_active(peer->connection) && peer->connection->status != Established) {
peer->connection->status != Established) {
if (peer->connection->status != Idle) if (peer->connection->status != Idle)
BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Stop);
BGP_EVENT_ADD(peer->connection, BGP_Start); BGP_EVENT_ADD(peer->connection, BGP_Start);
@ -8972,9 +8872,6 @@ void bgp_terminate(void)
EVENT_OFF(bm->t_bgp_zebra_l3_vni); EVENT_OFF(bm->t_bgp_zebra_l3_vni);
bgp_mac_finish(); bgp_mac_finish();
#ifdef ENABLE_BGP_VNC
rfapi_terminate();
#endif
} }
struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp, struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp,
@ -9063,373 +8960,6 @@ void bgp_gr_apply_running_config(void)
} }
} }
/* Hash of peers in clearing info object */
static int peer_clearing_hash_cmp(const struct peer *p1, const struct peer *p2)
{
if (p1 == p2)
return 0;
else if (p1 < p2)
return -1;
else
return 1;
}
static uint32_t peer_clearing_hashfn(const struct peer *p1)
{
return (uint32_t)((intptr_t)p1 & 0xffffffffULL);
}
/*
* Free a clearing batch: this really just does the memory cleanup; the
* clearing code is expected to manage the peer, dest, table, etc refcounts
*/
static void bgp_clearing_batch_free(struct bgp *bgp,
struct bgp_clearing_info **pinfo)
{
struct bgp_clearing_info *cinfo = *pinfo;
struct bgp_clearing_dest *destinfo;
if (bgp_clearing_info_anywhere(cinfo))
bgp_clearing_info_del(&bgp->clearing_list, cinfo);
while ((destinfo = bgp_clearing_destlist_pop(&cinfo->destlist)) != NULL)
XFREE(MTYPE_CLEARING_BATCH, destinfo);
bgp_clearing_hash_fini(&cinfo->peers);
XFREE(MTYPE_CLEARING_BATCH, *pinfo);
}
/*
* Done with a peer that was part of a clearing batch
*/
static void bgp_clearing_peer_done(struct peer *peer)
{
UNSET_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH);
/* Tickle FSM to start moving again */
BGP_EVENT_ADD(peer->connection, Clearing_Completed);
peer_unlock(peer); /* bgp_clear_route */
}
/*
* Initialize a new batch struct for clearing peer(s) from the RIB
*/
void bgp_clearing_batch_begin(struct bgp *bgp)
{
struct bgp_clearing_info *cinfo;
if (event_is_scheduled(bgp->clearing_end))
return;
cinfo = XCALLOC(MTYPE_CLEARING_BATCH, sizeof(struct bgp_clearing_info));
cinfo->bgp = bgp;
cinfo->id = bm->peer_clearing_batch_id++;
/* Init hash of peers and list of dests */
bgp_clearing_hash_init(&cinfo->peers);
bgp_clearing_destlist_init(&cinfo->destlist);
/* Batch is open for more peers */
SET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN);
bgp_clearing_info_add_head(&bgp->clearing_list, cinfo);
}
/*
* Close a batch of clearing peers, and begin working on the RIB
*/
static void bgp_clearing_batch_end(struct bgp *bgp)
{
struct bgp_clearing_info *cinfo;
if (event_is_scheduled(bgp->clearing_end))
return;
cinfo = bgp_clearing_info_first(&bgp->clearing_list);
assert(cinfo != NULL);
assert(CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN));
/* Batch is closed */
UNSET_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN);
/* If we have no peers to examine, just discard the batch info */
if (bgp_clearing_hash_count(&cinfo->peers) == 0) {
bgp_clearing_batch_free(bgp, &cinfo);
return;
}
/* Do a RIB walk for the current batch. If it finds dests/prefixes
* to work on, this will schedule a task to process
* the dests/prefixes in the batch.
* NB this will free the batch if it finishes, or if there was no work
* to do.
*/
bgp_clear_route_batch(cinfo);
}
static void bgp_clearing_batch_end_event(struct event *event)
{
struct bgp *bgp = EVENT_ARG(event);
bgp_clearing_batch_end(bgp);
bgp_unlock(bgp);
}
void bgp_clearing_batch_end_event_start(struct bgp *bgp)
{
if (!event_is_scheduled(bgp->clearing_end))
bgp_lock(bgp);
EVENT_OFF(bgp->clearing_end);
event_add_timer_msec(bm->master, bgp_clearing_batch_end_event, bgp, 100, &bgp->clearing_end);
}
/* Check whether a dest's peer is relevant to a clearing batch */
bool bgp_clearing_batch_check_peer(struct bgp_clearing_info *cinfo,
const struct peer *peer)
{
struct peer *p;
p = bgp_clearing_hash_find(&cinfo->peers, peer);
return (p != NULL);
}
/*
* Check whether a clearing batch has any dests to process
*/
bool bgp_clearing_batch_dests_present(struct bgp_clearing_info *cinfo)
{
return (bgp_clearing_destlist_count(&cinfo->destlist) > 0);
}
/*
* Done with a peer clearing batch; deal with refcounts, free memory
*/
void bgp_clearing_batch_completed(struct bgp_clearing_info *cinfo)
{
struct peer *peer;
struct bgp_dest *dest;
struct bgp_clearing_dest *destinfo;
struct bgp_table *table;
/* Ensure event is not scheduled */
event_cancel_event(bm->master, &cinfo->t_sched);
/* Remove all peers and un-ref */
while ((peer = bgp_clearing_hash_pop(&cinfo->peers)) != NULL)
bgp_clearing_peer_done(peer);
/* Remove any dests/prefixes and unlock */
destinfo = bgp_clearing_destlist_pop(&cinfo->destlist);
while (destinfo) {
dest = destinfo->dest;
XFREE(MTYPE_CLEARING_BATCH, destinfo);
table = bgp_dest_table(dest);
bgp_dest_unlock_node(dest);
bgp_table_unlock(table);
destinfo = bgp_clearing_destlist_pop(&cinfo->destlist);
}
/* Free memory */
bgp_clearing_batch_free(cinfo->bgp, &cinfo);
}
/*
* Add a prefix/dest to a clearing batch
*/
void bgp_clearing_batch_add_dest(struct bgp_clearing_info *cinfo,
struct bgp_dest *dest)
{
struct bgp_clearing_dest *destinfo;
destinfo = XCALLOC(MTYPE_CLEARING_BATCH,
sizeof(struct bgp_clearing_dest));
destinfo->dest = dest;
bgp_clearing_destlist_add_tail(&cinfo->destlist, destinfo);
}
/*
* Return the next dest for batch clear processing
*/
struct bgp_dest *bgp_clearing_batch_next_dest(struct bgp_clearing_info *cinfo)
{
struct bgp_clearing_dest *destinfo;
struct bgp_dest *dest = NULL;
destinfo = bgp_clearing_destlist_pop(&cinfo->destlist);
if (destinfo) {
dest = destinfo->dest;
XFREE(MTYPE_CLEARING_BATCH, destinfo);
}
return dest;
}
/* If a clearing batch is available for 'peer', add it and return 'true',
* else return 'false'.
*/
bool bgp_clearing_batch_add_peer(struct bgp *bgp, struct peer *peer)
{
struct bgp_clearing_info *cinfo;
cinfo = bgp_clearing_info_first(&bgp->clearing_list);
if (cinfo && CHECK_FLAG(cinfo->flags, BGP_CLEARING_INFO_FLAG_OPEN)) {
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH)) {
/* Add a peer ref */
peer_lock(peer);
bgp_clearing_hash_add(&cinfo->peers, peer);
SET_FLAG(peer->flags, PEER_FLAG_CLEARING_BATCH);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s: peer %pBP batched in %#x", __func__,
peer, cinfo->id);
}
return true;
}
return false;
}
/*
* Task callback in the main pthread to handle socket errors
* encountered in the io pthread. We avoid having the io pthread try
* to enqueue fsm events or mess with the peer struct.
*/
static void bgp_process_conn_error(struct event *event)
{
struct bgp *bgp;
struct peer *peer;
struct peer_connection *connection;
uint32_t counter = 0;
size_t list_count = 0;
bool more_p = false;
bgp = EVENT_ARG(event);
frr_with_mutex (&bgp->peer_errs_mtx) {
connection = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist);
list_count =
bgp_peer_conn_errlist_count(&bgp->peer_conn_errlist);
}
/* If we have multiple peers with errors, try to batch some
* clearing work.
*/
if (list_count > 0)
bgp_clearing_batch_begin(bgp);
/* Dequeue peers from the error list */
while (connection != NULL) {
peer = connection->peer;
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP error %d on fd %d",
peer->host, connection->connection_errcode,
connection->fd);
/* Closed connection or error on the socket */
if (peer_established(connection)) {
if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART)
|| CHECK_FLAG(peer->flags,
PEER_FLAG_GRACEFUL_RESTART_HELPER))
&& CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) {
peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
} else
peer->last_reset = PEER_DOWN_CLOSE_SESSION;
}
/* No need for keepalives, if enabled */
bgp_keepalives_off(peer->connection);
/* Drive into state-machine changes */
bgp_event_update(connection, connection->connection_errcode);
counter++;
if (counter >= bm->peer_conn_errs_dequeue_limit)
break;
connection = bgp_dequeue_conn_err(bgp, &more_p);
}
/* Reschedule event if necessary */
if (more_p)
bgp_conn_err_reschedule(bgp);
/* Done with a clearing batch */
if (list_count > 0)
bgp_clearing_batch_end(bgp);
if (bgp_debug_neighbor_events(NULL))
zlog_debug("%s: dequeued and processed %d peers", __func__,
counter);
}
/*
* Enqueue a connection with an error to be handled in the main pthread;
* this is called from the io pthread.
*/
int bgp_enqueue_conn_err(struct bgp *bgp, struct peer_connection *connection,
int errcode)
{
frr_with_mutex (&bgp->peer_errs_mtx) {
connection->connection_errcode = errcode;
/* Careful not to double-enqueue */
if (!bgp_peer_conn_errlist_anywhere(connection)) {
bgp_peer_conn_errlist_add_tail(&bgp->peer_conn_errlist,
connection);
}
}
/* Ensure an event is scheduled */
event_add_event(bm->master, bgp_process_conn_error, bgp, 0,
&bgp->t_conn_errors);
return 0;
}
/*
* Dequeue a connection that encountered a connection error; signal whether there
* are more queued peers.
*/
struct peer_connection *bgp_dequeue_conn_err(struct bgp *bgp, bool *more_p)
{
struct peer_connection *connection = NULL;
bool more = false;
frr_with_mutex (&bgp->peer_errs_mtx) {
connection = bgp_peer_conn_errlist_pop(&bgp->peer_conn_errlist);
if (bgp_peer_conn_errlist_const_first(
&bgp->peer_conn_errlist) != NULL)
more = true;
}
if (more_p)
*more_p = more;
return connection;
}
/*
* Reschedule the connection error event - probably after processing
* some of the peers on the list.
*/
void bgp_conn_err_reschedule(struct bgp *bgp)
{
event_add_event(bm->master, bgp_process_conn_error, bgp, 0,
&bgp->t_conn_errors);
}
printfrr_ext_autoreg_p("BP", printfrr_bp); printfrr_ext_autoreg_p("BP", printfrr_bp);
static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea, static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea,
const void *ptr) const void *ptr)

View file

@ -48,11 +48,6 @@ DECLARE_HOOK(bgp_hook_config_write_vrf, (struct vty *vty, struct vrf *vrf),
/* Default interval for IPv6 RAs when triggered by BGP unnumbered neighbor. */ /* Default interval for IPv6 RAs when triggered by BGP unnumbered neighbor. */
#define BGP_UNNUM_DEFAULT_RA_INTERVAL 10 #define BGP_UNNUM_DEFAULT_RA_INTERVAL 10
/* Max number of peers to process without rescheduling */
#define BGP_CONN_ERROR_DEQUEUE_MAX 10
/* Limit the number of clearing dests we'll process per callback */
#define BGP_CLEARING_BATCH_MAX_DESTS 100
struct update_subgroup; struct update_subgroup;
struct bpacket; struct bpacket;
struct bgp_pbr_config; struct bgp_pbr_config;
@ -107,9 +102,6 @@ enum bgp_af_index {
extern struct frr_pthread *bgp_pth_io; extern struct frr_pthread *bgp_pth_io;
extern struct frr_pthread *bgp_pth_ka; extern struct frr_pthread *bgp_pth_ka;
/* FIFO list for peer connections */
PREDECL_LIST(peer_connection_fifo);
/* BGP master for system wide configurations and variables. */ /* BGP master for system wide configurations and variables. */
struct bgp_master { struct bgp_master {
/* BGP instance list. */ /* BGP instance list. */
@ -124,11 +116,6 @@ struct bgp_master {
/* BGP port number. */ /* BGP port number. */
uint16_t port; uint16_t port;
/* FIFO list head for peer connections */
struct peer_connection_fifo_head connection_fifo;
struct event *e_process_packet;
pthread_mutex_t peer_connection_mtx;
/* Listener addresses */ /* Listener addresses */
struct list *addresses; struct list *addresses;
@ -227,16 +214,6 @@ struct bgp_master {
/* To preserve ordering of processing of BGP-VRFs for L3 VNIs */ /* To preserve ordering of processing of BGP-VRFs for L3 VNIs */
struct zebra_l3_vni_head zebra_l3_vni_head; struct zebra_l3_vni_head zebra_l3_vni_head;
/* ID value for peer clearing batches */
uint32_t peer_clearing_batch_id;
/* Limits for batched peer clearing code:
* Max number of errored peers to process without rescheduling
*/
uint32_t peer_conn_errs_dequeue_limit;
/* Limit the number of clearing dests we'll process per callback */
uint32_t peer_clearing_batch_max_dests;
QOBJ_FIELDS; QOBJ_FIELDS;
}; };
DECLARE_QOBJ_TYPE(bgp_master); DECLARE_QOBJ_TYPE(bgp_master);
@ -408,81 +385,6 @@ struct as_confed {
struct bgp_mplsvpn_nh_label_bind_cache; struct bgp_mplsvpn_nh_label_bind_cache;
PREDECL_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache); PREDECL_RBTREE_UNIQ(bgp_mplsvpn_nh_label_bind_cache);
/* List of peers that have connection errors in the io pthread */
PREDECL_DLIST(bgp_peer_conn_errlist);
/* List of info about peers that are being cleared from BGP RIBs in a batch */
PREDECL_DLIST(bgp_clearing_info);
/* Hash of peers in clearing info object */
PREDECL_HASH(bgp_clearing_hash);
/* List of dests that need to be processed in a clearing batch */
PREDECL_LIST(bgp_clearing_destlist);
struct bgp_clearing_dest {
struct bgp_dest *dest;
struct bgp_clearing_destlist_item link;
};
/* Info about a batch of peers that need to be cleared from the RIB.
* If many peers need to be cleared, we process them in batches, taking
* one walk through the RIB for each batch. This is only used for "all"
* afi/safis, typically when processing peer connection errors.
*/
struct bgp_clearing_info {
/* Owning bgp instance */
struct bgp *bgp;
/* Hash of peers */
struct bgp_clearing_hash_head peers;
/* Batch ID, for debugging/logging */
uint32_t id;
/* Flags */
uint32_t flags;
/* List of dests - wrapped by a small wrapper struct */
struct bgp_clearing_destlist_head destlist;
/* Event to schedule/reschedule processing */
struct event *t_sched;
/* Info for rescheduling the RIB walk */
afi_t last_afi;
safi_t last_safi;
struct prefix last_pfx;
/* For some afi/safi (vpn/evpn e.g.), bgp may do an inner walk
* for a related table; the 'last' info represents the outer walk,
* and this info represents the inner walk.
*/
afi_t inner_afi;
safi_t inner_safi;
struct prefix inner_pfx;
/* Map of afi/safi so we don't re-walk any tables */
uint8_t table_map[AFI_MAX][SAFI_MAX];
/* Counters: current iteration, overall total, and processed count. */
uint32_t curr_counter;
uint32_t total_counter;
uint32_t total_processed;
/* TODO -- id, serial number, for debugging/logging? */
/* Linkage for list of batches per bgp */
struct bgp_clearing_info_item link;
};
/* Batch is open, new peers can be added */
#define BGP_CLEARING_INFO_FLAG_OPEN (1 << 0)
/* Batch is resuming iteration after yielding */
#define BGP_CLEARING_INFO_FLAG_RESUME (1 << 1)
/* Batch has 'inner' resume info set */
#define BGP_CLEARING_INFO_FLAG_INNER (1 << 2)
/* BGP instance structure. */ /* BGP instance structure. */
struct bgp { struct bgp {
/* AS number of this BGP instance. */ /* AS number of this BGP instance. */
@ -562,8 +464,6 @@ struct bgp {
/* start-up timer on only once at the beginning */ /* start-up timer on only once at the beginning */
struct event *t_startup; struct event *t_startup;
struct event *clearing_end;
uint32_t v_maxmed_onstartup; /* Duration of max-med on start-up */ uint32_t v_maxmed_onstartup; /* Duration of max-med on start-up */
#define BGP_MAXMED_ONSTARTUP_UNCONFIGURED 0 /* 0 means off, its the default */ #define BGP_MAXMED_ONSTARTUP_UNCONFIGURED 0 /* 0 means off, its the default */
uint32_t maxmed_onstartup_value; /* Max-med value when active on uint32_t maxmed_onstartup_value; /* Max-med value when active on
@ -970,21 +870,6 @@ struct bgp {
uint16_t tcp_keepalive_intvl; uint16_t tcp_keepalive_intvl;
uint16_t tcp_keepalive_probes; uint16_t tcp_keepalive_probes;
/* List of peers that have connection errors in the IO pthread */
struct bgp_peer_conn_errlist_head peer_conn_errlist;
/* Mutex that guards the connection-errors list */
pthread_mutex_t peer_errs_mtx;
/* Event indicating that there have been connection errors; this
* is typically signalled in the IO pthread; it's handled in the
* main pthread.
*/
struct event *t_conn_errors;
/* List of batches of peers being cleared from BGP RIBs */
struct bgp_clearing_info_head clearing_list;
struct timeval ebgprequirespolicywarning; struct timeval ebgprequirespolicywarning;
#define FIFTEENMINUTE2USEC (int64_t)15 * 60 * 1000000 #define FIFTEENMINUTE2USEC (int64_t)15 * 60 * 1000000
@ -1328,28 +1213,8 @@ struct addpath_paths_limit {
uint16_t receive; uint16_t receive;
}; };
/*
* The peer data structure has a incoming and outgoing peer connection
* variables. In the early stage of the FSM, it is possible to have
* both a incoming and outgoing connection at the same time. These
* connections both have events scheduled to happen that both produce
* logs. It is very hard to tell these debugs apart when looking at
* the log files so the debugs are now adding direction strings to
* help figure out what is going on. At a later stage in the FSM
* one of the connections will be closed and the other one kept.
* The one being kept is moved to the ESTABLISHED connection direction
* so that debugs can be figured out.
*/
enum connection_direction {
UNKNOWN,
CONNECTION_INCOMING,
CONNECTION_OUTGOING,
ESTABLISHED,
};
struct peer_connection { struct peer_connection {
struct peer *peer; struct peer *peer;
enum connection_direction dir;
/* Status of the peer connection. */ /* Status of the peer connection. */
enum bgp_fsm_status status; enum bgp_fsm_status status;
@ -1386,30 +1251,18 @@ struct peer_connection {
struct event *t_pmax_restart; struct event *t_pmax_restart;
struct event *t_routeadv; struct event *t_routeadv;
struct event *t_process_packet;
struct event *t_process_packet_error;
struct event *t_stop_with_notify; struct event *t_stop_with_notify;
/* Linkage for list connections with errors, from IO pthread */
struct bgp_peer_conn_errlist_item conn_err_link;
/* Connection error code */
uint16_t connection_errcode;
union sockunion su; union sockunion su;
#define BGP_CONNECTION_SU_UNSPEC(connection) \ #define BGP_CONNECTION_SU_UNSPEC(connection) \
(connection->su.sa.sa_family == AF_UNSPEC) (connection->su.sa.sa_family == AF_UNSPEC)
union sockunion *su_local; /* Sockunion of local address. */ union sockunion *su_local; /* Sockunion of local address. */
union sockunion *su_remote; /* Sockunion of remote address. */ union sockunion *su_remote; /* Sockunion of remote address. */
/* For FIFO list */
struct peer_connection_fifo_item fifo_item;
}; };
/* Declare the FIFO list implementation */
DECLARE_LIST(peer_connection_fifo, struct peer_connection, fifo_item);
const char *bgp_peer_get_connection_direction(struct peer_connection *connection);
extern struct peer_connection *bgp_peer_connection_new(struct peer *peer); extern struct peer_connection *bgp_peer_connection_new(struct peer *peer);
extern void bgp_peer_connection_free(struct peer_connection **connection); extern void bgp_peer_connection_free(struct peer_connection **connection);
extern void bgp_peer_connection_buffers_free(struct peer_connection *connection); extern void bgp_peer_connection_buffers_free(struct peer_connection *connection);
@ -1694,8 +1547,6 @@ struct peer {
#define PEER_FLAG_EXTENDED_LINK_BANDWIDTH (1ULL << 39) #define PEER_FLAG_EXTENDED_LINK_BANDWIDTH (1ULL << 39)
#define PEER_FLAG_DUAL_AS (1ULL << 40) #define PEER_FLAG_DUAL_AS (1ULL << 40)
#define PEER_FLAG_CAPABILITY_LINK_LOCAL (1ULL << 41) #define PEER_FLAG_CAPABILITY_LINK_LOCAL (1ULL << 41)
/* Peer is part of a batch clearing its routes */
#define PEER_FLAG_CLEARING_BATCH (1ULL << 42)
/* /*
*GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART *GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART
@ -2093,9 +1944,6 @@ struct peer {
/* Add-Path Paths-Limit */ /* Add-Path Paths-Limit */
struct addpath_paths_limit addpath_paths_limit[AFI_MAX][SAFI_MAX]; struct addpath_paths_limit addpath_paths_limit[AFI_MAX][SAFI_MAX];
/* Linkage for hash of clearing peers being cleared in a batch */
struct bgp_clearing_hash_item clear_hash_link;
QOBJ_FIELDS; QOBJ_FIELDS;
}; };
DECLARE_QOBJ_TYPE(peer); DECLARE_QOBJ_TYPE(peer);
@ -2430,14 +2278,6 @@ enum bgp_martian_type {
BGP_MARTIAN_SOO, /* bgp->evpn_info->macvrf_soo */ BGP_MARTIAN_SOO, /* bgp->evpn_info->macvrf_soo */
}; };
/* Distinguish the reason why the peer is not active. */
enum bgp_peer_active {
BGP_PEER_ACTIVE,
BGP_PEER_CONNECTION_UNSPECIFIED,
BGP_PEER_BFD_DOWN,
BGP_PEER_AF_UNCONFIGURED,
};
extern const struct message bgp_martian_type_str[]; extern const struct message bgp_martian_type_str[];
extern const char *bgp_martian_type2str(enum bgp_martian_type mt); extern const char *bgp_martian_type2str(enum bgp_martian_type mt);
@ -2486,7 +2326,7 @@ extern struct peer *peer_unlock_with_caller(const char *, struct peer *);
extern enum bgp_peer_sort peer_sort(struct peer *peer); extern enum bgp_peer_sort peer_sort(struct peer *peer);
extern enum bgp_peer_sort peer_sort_lookup(struct peer *peer); extern enum bgp_peer_sort peer_sort_lookup(struct peer *peer);
extern enum bgp_peer_active peer_active(struct peer_connection *connection); extern bool peer_active(struct peer_connection *connection);
extern bool peer_active_nego(struct peer *); extern bool peer_active_nego(struct peer *);
extern bool peer_afc_received(struct peer *peer); extern bool peer_afc_received(struct peer *peer);
extern bool peer_afc_advertised(struct peer *peer); extern bool peer_afc_advertised(struct peer *peer);
@ -2744,11 +2584,6 @@ void bgp_gr_apply_running_config(void);
int bgp_global_gr_init(struct bgp *bgp); int bgp_global_gr_init(struct bgp *bgp);
int bgp_peer_gr_init(struct peer *peer); int bgp_peer_gr_init(struct peer *peer);
/* APIs for the per-bgp peer connection error list */
int bgp_enqueue_conn_err(struct bgp *bgp, struct peer_connection *connection,
int errcode);
struct peer_connection *bgp_dequeue_conn_err(struct bgp *bgp, bool *more_p);
void bgp_conn_err_reschedule(struct bgp *bgp);
#define BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(_bgp, _peer_list) \ #define BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(_bgp, _peer_list) \
do { \ do { \
@ -3067,27 +2902,6 @@ extern void srv6_function_free(struct bgp_srv6_function *func);
extern void bgp_session_reset_safe(struct peer *peer, struct listnode **nnode); extern void bgp_session_reset_safe(struct peer *peer, struct listnode **nnode);
/* If a clearing batch is available for 'peer', add it and return 'true',
* else return 'false'.
*/
bool bgp_clearing_batch_add_peer(struct bgp *bgp, struct peer *peer);
/* Add a prefix/dest to a clearing batch */
void bgp_clearing_batch_add_dest(struct bgp_clearing_info *cinfo,
struct bgp_dest *dest);
/* Check whether a dest's peer is relevant to a clearing batch */
bool bgp_clearing_batch_check_peer(struct bgp_clearing_info *cinfo,
const struct peer *peer);
/* Check whether a clearing batch has any dests to process */
bool bgp_clearing_batch_dests_present(struct bgp_clearing_info *cinfo);
/* Returns the next dest for batch clear processing */
struct bgp_dest *bgp_clearing_batch_next_dest(struct bgp_clearing_info *cinfo);
/* Done with a peer clearing batch; deal with refcounts, free memory */
void bgp_clearing_batch_completed(struct bgp_clearing_info *cinfo);
/* Start a new batch of peers to clear */
void bgp_clearing_batch_begin(struct bgp *bgp);
/* End a new batch of peers to clear */
void bgp_clearing_batch_end_event_start(struct bgp *bgp);
#ifdef _FRR_ATTRIBUTE_PRINTFRR #ifdef _FRR_ATTRIBUTE_PRINTFRR
/* clang-format off */ /* clang-format off */
#pragma FRR printfrr_ext "%pBP" (struct peer *) #pragma FRR printfrr_ext "%pBP" (struct peer *)

View file

@ -946,7 +946,8 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
} }
} }
if (!CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED) && attrhash_cmp(bpi->attr, new_attr)) { if (attrhash_cmp(bpi->attr, new_attr)
&& !CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)) {
bgp_attr_unintern(&new_attr); bgp_attr_unintern(&new_attr);
bgp_dest_unlock_node(bn); bgp_dest_unlock_node(bn);
@ -3545,8 +3546,6 @@ DEFUN (skiplist_debug_cli,
void rfapi_init(void) void rfapi_init(void)
{ {
rfapi_rib_init();
rfapi_import_init();
bgp_rfapi_cfg_init(); bgp_rfapi_cfg_init();
vnc_debug_init(); vnc_debug_init();
@ -3577,12 +3576,6 @@ void rfapi_init(void)
rfapi_vty_init(); rfapi_vty_init();
} }
void rfapi_terminate(void)
{
rfapi_import_terminate();
rfapi_rib_terminate();
}
#ifdef DEBUG_RFAPI #ifdef DEBUG_RFAPI
static void rfapi_print_exported(struct bgp *bgp) static void rfapi_print_exported(struct bgp *bgp)
{ {

View file

@ -14,7 +14,6 @@
#include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_nexthop.h"
extern void rfapi_init(void); extern void rfapi_init(void);
extern void rfapi_terminate(void);
extern void vnc_zebra_init(struct event_loop *master); extern void vnc_zebra_init(struct event_loop *master);
extern void vnc_zebra_destroy(void); extern void vnc_zebra_destroy(void);

View file

@ -52,23 +52,14 @@
#undef DEBUG_IT_NODES #undef DEBUG_IT_NODES
#undef DEBUG_BI_SEARCH #undef DEBUG_BI_SEARCH
/*
* Hash to keep track of outstanding timers so we can force them to
* expire at shutdown time, thus freeing their allocated memory.
*/
PREDECL_HASH(rwcb);
/* /*
* Allocated for each withdraw timer instance; freed when the timer * Allocated for each withdraw timer instance; freed when the timer
* expires or is canceled * expires or is canceled
*/ */
struct rfapi_withdraw { struct rfapi_withdraw {
struct rwcb_item rwcbi;
struct rfapi_import_table *import_table; struct rfapi_import_table *import_table;
struct agg_node *node; struct agg_node *node;
struct bgp_path_info *info; struct bgp_path_info *info;
void (*timer_service_func)(struct event *t); /* for cleanup */
safi_t safi; /* used only for bulk operations */ safi_t safi; /* used only for bulk operations */
/* /*
* For import table node reference count checking (i.e., debugging). * For import table node reference count checking (i.e., debugging).
@ -81,19 +72,6 @@ struct rfapi_withdraw {
int lockoffset; int lockoffset;
}; };
static int _rwcb_cmp(const struct rfapi_withdraw *w1, const struct rfapi_withdraw *w2)
{
return (w1 != w2);
}
static uint32_t _rwcb_hash(const struct rfapi_withdraw *w)
{
return (uintptr_t)w & 0xffffffff;
}
DECLARE_HASH(rwcb, struct rfapi_withdraw, rwcbi, _rwcb_cmp, _rwcb_hash);
static struct rwcb_head _rwcbhash;
/* /*
* DEBUG FUNCTION * DEBUG FUNCTION
* Count remote routes and compare with actively-maintained values. * Count remote routes and compare with actively-maintained values.
@ -848,7 +826,6 @@ static void rfapiBgpInfoChainFree(struct bgp_path_info *bpi)
struct rfapi_withdraw *wcb = struct rfapi_withdraw *wcb =
EVENT_ARG(bpi->extra->vnc->vnc.import.timer); EVENT_ARG(bpi->extra->vnc->vnc.import.timer);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
EVENT_OFF(bpi->extra->vnc->vnc.import.timer); EVENT_OFF(bpi->extra->vnc->vnc.import.timer);
} }
@ -1352,11 +1329,11 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix,
bgp_attr_extcom_tunnel_type(bpi->attr, &tun_type); bgp_attr_extcom_tunnel_type(bpi->attr, &tun_type);
if (tun_type == BGP_ENCAP_TYPE_MPLS) { if (tun_type == BGP_ENCAP_TYPE_MPLS) {
struct prefix pfx; struct prefix p;
/* MPLS carries UN address in next hop */ /* MPLS carries UN address in next hop */
rfapiNexthop2Prefix(bpi->attr, &pfx); rfapiNexthop2Prefix(bpi->attr, &p);
if (pfx.family != AF_UNSPEC) { if (p.family != AF_UNSPEC) {
rfapiQprefix2Raddr(&pfx, &new->un_address); rfapiQprefix2Raddr(&p, &new->un_address);
have_vnc_tunnel_un = 1; have_vnc_tunnel_un = 1;
} }
} }
@ -1773,7 +1750,7 @@ struct rfapi_next_hop_entry *rfapiRouteNode2NextHopList(
* Add non-withdrawn routes from less-specific prefix * Add non-withdrawn routes from less-specific prefix
*/ */
if (parent) { if (parent) {
p = agg_node_get_prefix(parent); const struct prefix *p = agg_node_get_prefix(parent);
rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, p) : NULL; rib_rn = rfd_rib_table ? agg_node_get(rfd_rib_table, p) : NULL;
rfapiQprefix2Rprefix(p, &rprefix); rfapiQprefix2Rprefix(p, &rprefix);
@ -2372,7 +2349,6 @@ static void rfapiWithdrawTimerVPN(struct event *t)
/* This callback is responsible for the withdraw object's memory */ /* This callback is responsible for the withdraw object's memory */
if (early_exit) { if (early_exit) {
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
return; return;
} }
@ -2486,7 +2462,6 @@ done:
RFAPI_CHECK_REFCOUNT(wcb->node, SAFI_MPLS_VPN, 1 + wcb->lockoffset); RFAPI_CHECK_REFCOUNT(wcb->node, SAFI_MPLS_VPN, 1 + wcb->lockoffset);
agg_unlock_node(wcb->node); /* decr ref count */ agg_unlock_node(wcb->node); /* decr ref count */
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
} }
@ -2730,7 +2705,6 @@ static void rfapiWithdrawTimerEncap(struct event *t)
done: done:
RFAPI_CHECK_REFCOUNT(wcb->node, SAFI_ENCAP, 1); RFAPI_CHECK_REFCOUNT(wcb->node, SAFI_ENCAP, 1);
agg_unlock_node(wcb->node); /* decr ref count */ agg_unlock_node(wcb->node); /* decr ref count */
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
skiplist_free(vpn_node_sl); skiplist_free(vpn_node_sl);
} }
@ -2780,8 +2754,6 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
wcb->node = rn; wcb->node = rn;
wcb->info = bpi; wcb->info = bpi;
wcb->import_table = import_table; wcb->import_table = import_table;
wcb->timer_service_func = timer_service_func;
rwcb_add(&_rwcbhash, wcb);
bgp_attr_intern(bpi->attr); bgp_attr_intern(bpi->attr);
if (VNC_DEBUG(VERBOSE)) { if (VNC_DEBUG(VERBOSE)) {
@ -2847,7 +2819,6 @@ static void rfapiExpireEncapNow(struct rfapi_import_table *it,
wcb->info = bpi; wcb->info = bpi;
wcb->node = rn; wcb->node = rn;
wcb->import_table = it; wcb->import_table = it;
rwcb_add(&_rwcbhash, wcb);
memset(&t, 0, sizeof(t)); memset(&t, 0, sizeof(t));
t.arg = wcb; t.arg = wcb;
rfapiWithdrawTimerEncap(&t); /* frees wcb */ rfapiWithdrawTimerEncap(&t); /* frees wcb */
@ -3086,7 +3057,6 @@ static void rfapiBgpInfoFilteredImportEncap(
struct rfapi_withdraw *wcb = EVENT_ARG( struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc->vnc.import.timer); bpi->extra->vnc->vnc.import.timer);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
EVENT_OFF(bpi->extra->vnc->vnc.import EVENT_OFF(bpi->extra->vnc->vnc.import
.timer); .timer);
@ -3113,7 +3083,6 @@ static void rfapiBgpInfoFilteredImportEncap(
wcb->info = bpi; wcb->info = bpi;
wcb->node = rn; wcb->node = rn;
wcb->import_table = import_table; wcb->import_table = import_table;
rwcb_add(&_rwcbhash, wcb);
memset(&t, 0, sizeof(t)); memset(&t, 0, sizeof(t));
t.arg = wcb; t.arg = wcb;
rfapiWithdrawTimerEncap( rfapiWithdrawTimerEncap(
@ -3180,7 +3149,6 @@ static void rfapiBgpInfoFilteredImportEncap(
struct rfapi_withdraw *wcb = struct rfapi_withdraw *wcb =
EVENT_ARG(bpi->extra->vnc->vnc.import.timer); EVENT_ARG(bpi->extra->vnc->vnc.import.timer);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
EVENT_OFF(bpi->extra->vnc->vnc.import.timer); EVENT_OFF(bpi->extra->vnc->vnc.import.timer);
} }
@ -3224,7 +3192,7 @@ static void rfapiBgpInfoFilteredImportEncap(
__func__, rn); __func__, rn);
#endif #endif
for (m = RFAPI_MONITOR_ENCAP(rn); m; m = m->next) { for (m = RFAPI_MONITOR_ENCAP(rn); m; m = m->next) {
const struct prefix *pfx; const struct prefix *p;
/* /*
* For each referenced bpi/route, copy the ENCAP route's * For each referenced bpi/route, copy the ENCAP route's
@ -3252,9 +3220,9 @@ static void rfapiBgpInfoFilteredImportEncap(
* list * list
* per prefix. * per prefix.
*/ */
pfx = agg_node_get_prefix(m->node); p = agg_node_get_prefix(m->node);
referenced_vpn_prefix = referenced_vpn_prefix =
agg_node_get(referenced_vpn_table, pfx); agg_node_get(referenced_vpn_table, p);
assert(referenced_vpn_prefix); assert(referenced_vpn_prefix);
for (mnext = referenced_vpn_prefix->info; mnext; for (mnext = referenced_vpn_prefix->info; mnext;
mnext = mnext->next) { mnext = mnext->next) {
@ -3325,7 +3293,6 @@ static void rfapiExpireVpnNow(struct rfapi_import_table *it,
wcb->node = rn; wcb->node = rn;
wcb->import_table = it; wcb->import_table = it;
wcb->lockoffset = lockoffset; wcb->lockoffset = lockoffset;
rwcb_add(&_rwcbhash, wcb);
memset(&t, 0, sizeof(t)); memset(&t, 0, sizeof(t));
t.arg = wcb; t.arg = wcb;
rfapiWithdrawTimerVPN(&t); /* frees wcb */ rfapiWithdrawTimerVPN(&t); /* frees wcb */
@ -3543,7 +3510,6 @@ void rfapiBgpInfoFilteredImportVPN(
struct rfapi_withdraw *wcb = EVENT_ARG( struct rfapi_withdraw *wcb = EVENT_ARG(
bpi->extra->vnc->vnc.import.timer); bpi->extra->vnc->vnc.import.timer);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
EVENT_OFF(bpi->extra->vnc->vnc.import EVENT_OFF(bpi->extra->vnc->vnc.import
.timer); .timer);
@ -3763,7 +3729,6 @@ void rfapiBgpInfoFilteredImportVPN(
struct rfapi_withdraw *wcb = struct rfapi_withdraw *wcb =
EVENT_ARG(bpi->extra->vnc->vnc.import.timer); EVENT_ARG(bpi->extra->vnc->vnc.import.timer);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb); XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
EVENT_OFF(bpi->extra->vnc->vnc.import.timer); EVENT_OFF(bpi->extra->vnc->vnc.import.timer);
} }
@ -4515,7 +4480,6 @@ static void rfapiDeleteRemotePrefixesIt(
RFAPI_UPDATE_ITABLE_COUNT( RFAPI_UPDATE_ITABLE_COUNT(
bpi, wcb->import_table, bpi, wcb->import_table,
afi, 1); afi, 1);
rwcb_del(&_rwcbhash, wcb);
XFREE(MTYPE_RFAPI_WITHDRAW, XFREE(MTYPE_RFAPI_WITHDRAW,
wcb); wcb);
EVENT_OFF(bpi->extra->vnc->vnc EVENT_OFF(bpi->extra->vnc->vnc
@ -4840,33 +4804,3 @@ uint32_t rfapiGetHolddownFromLifetime(uint32_t lifetime)
else else
return RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY; return RFAPI_LIFETIME_INFINITE_WITHDRAW_DELAY;
} }
void rfapi_import_init(void)
{
rwcb_init(&_rwcbhash);
}
void rfapi_import_terminate(void)
{
struct rfapi_withdraw *wcb;
struct bgp_path_info *bpi;
void (*timer_service_func)(struct event *t);
struct event t;
vnc_zlog_debug_verbose("%s: cleaning up %zu pending timers", __func__,
rwcb_count(&_rwcbhash));
/*
* clean up memory allocations stored in pending timers
*/
while ((wcb = rwcb_pop(&_rwcbhash))) {
bpi = wcb->info;
assert(wcb == EVENT_ARG(bpi->extra->vnc->vnc.import.timer));
EVENT_OFF(bpi->extra->vnc->vnc.import.timer);
timer_service_func = wcb->timer_service_func;
memset(&t, 0, sizeof(t));
t.arg = wcb;
(*timer_service_func)(&t); /* frees wcb */
}
}

View file

@ -225,7 +225,4 @@ extern void rfapiCountAllItRoutes(int *pALRcount, /* active local routes */
--------------------------------------------*/ --------------------------------------------*/
extern uint32_t rfapiGetHolddownFromLifetime(uint32_t lifetime); extern uint32_t rfapiGetHolddownFromLifetime(uint32_t lifetime);
extern void rfapi_import_init(void);
extern void rfapi_import_terminate(void);
#endif /* QUAGGA_HGP_RFAPI_IMPORT_H */ #endif /* QUAGGA_HGP_RFAPI_IMPORT_H */

View file

@ -18,7 +18,6 @@
#include "lib/log.h" #include "lib/log.h"
#include "lib/skiplist.h" #include "lib/skiplist.h"
#include "lib/workqueue.h" #include "lib/workqueue.h"
#include <typesafe.h>
#include "bgpd/bgpd.h" #include "bgpd/bgpd.h"
#include "bgpd/bgp_route.h" #include "bgpd/bgp_route.h"
@ -45,7 +44,8 @@
/* forward decl */ /* forward decl */
#if DEBUG_NHL #if DEBUG_NHL
static void rfapiRibShowRibSl(void *stream, const struct prefix *pfx, struct skiplist *sl); static void rfapiRibShowRibSl(void *stream, struct prefix *pfx,
struct skiplist *sl);
#endif #endif
/* /*
@ -234,45 +234,9 @@ void rfapiFreeRfapiVnOptionChain(struct rfapi_vn_option *p)
} }
/*
* Hash to keep track of outstanding timers so we can force them to
* expire at shutdown time, thus freeing their allocated memory.
*/
PREDECL_HASH(rrtcb);
/*
* Timer control block for recently-deleted and expired routes
*/
struct rfapi_rib_tcb {
struct rrtcb_item tcbi;
struct rfapi_descriptor *rfd;
struct skiplist *sl;
struct rfapi_info *ri;
struct agg_node *rn;
int flags;
#define RFAPI_RIB_TCB_FLAG_DELETED 0x00000001
};
static int _rrtcb_cmp(const struct rfapi_rib_tcb *t1, const struct rfapi_rib_tcb *t2)
{
return (t1 != t2);
}
static uint32_t _rrtcb_hash(const struct rfapi_rib_tcb *t)
{
return (uintptr_t)t & 0xffffffff;
}
DECLARE_HASH(rrtcb, struct rfapi_rib_tcb, tcbi, _rrtcb_cmp, _rrtcb_hash);
static struct rrtcb_head _rrtcbhash;
static void rfapi_info_free(struct rfapi_info *goner) static void rfapi_info_free(struct rfapi_info *goner)
{ {
if (goner) { if (goner) {
#if DEBUG_CLEANUP
zlog_debug("%s: ri %p, timer %p", __func__, goner, goner->timer);
#endif
if (goner->tea_options) { if (goner->tea_options) {
rfapiFreeBgpTeaOptionChain(goner->tea_options); rfapiFreeBgpTeaOptionChain(goner->tea_options);
goner->tea_options = NULL; goner->tea_options = NULL;
@ -289,19 +253,32 @@ static void rfapi_info_free(struct rfapi_info *goner)
struct rfapi_rib_tcb *tcb; struct rfapi_rib_tcb *tcb;
tcb = EVENT_ARG(goner->timer); tcb = EVENT_ARG(goner->timer);
#if DEBUG_CLEANUP
zlog_debug("%s: ri %p, tcb %p", __func__, goner, tcb);
#endif
EVENT_OFF(goner->timer); EVENT_OFF(goner->timer);
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
} }
XFREE(MTYPE_RFAPI_INFO, goner); XFREE(MTYPE_RFAPI_INFO, goner);
} }
} }
static void _rfapiRibExpireTimer(struct rfapi_rib_tcb *tcb) /*
* Timer control block for recently-deleted and expired routes
*/
struct rfapi_rib_tcb {
struct rfapi_descriptor *rfd;
struct skiplist *sl;
struct rfapi_info *ri;
struct agg_node *rn;
int flags;
#define RFAPI_RIB_TCB_FLAG_DELETED 0x00000001
};
/*
* remove route from rib
*/
static void rfapiRibExpireTimer(struct event *t)
{ {
struct rfapi_rib_tcb *tcb = EVENT_ARG(t);
RFAPI_RIB_CHECK_COUNTS(1, 0); RFAPI_RIB_CHECK_COUNTS(1, 0);
/* /*
@ -332,22 +309,11 @@ static void _rfapiRibExpireTimer(struct rfapi_rib_tcb *tcb)
agg_unlock_node(tcb->rn); agg_unlock_node(tcb->rn);
} }
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
RFAPI_RIB_CHECK_COUNTS(1, 0); RFAPI_RIB_CHECK_COUNTS(1, 0);
} }
/*
* remove route from rib
*/
static void rfapiRibExpireTimer(struct event *t)
{
struct rfapi_rib_tcb *tcb = EVENT_ARG(t);
_rfapiRibExpireTimer(tcb);
}
static void rfapiRibStartTimer(struct rfapi_descriptor *rfd, static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
struct rfapi_info *ri, struct rfapi_info *ri,
struct agg_node *rn, /* route node attached to */ struct agg_node *rn, /* route node attached to */
@ -383,8 +349,6 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime, event_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
&ri->timer); &ri->timer);
rrtcb_add(&_rrtcbhash, tcb);
} }
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */ extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
@ -555,7 +519,6 @@ void rfapiRibClear(struct rfapi_descriptor *rfd)
tcb = EVENT_ARG( tcb = EVENT_ARG(
ri->timer); ri->timer);
EVENT_OFF(ri->timer); EVENT_OFF(ri->timer);
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, XFREE(MTYPE_RFAPI_RECENT_DELETE,
tcb); tcb);
} }
@ -889,6 +852,11 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
int rib_node_started_nonempty = 0; int rib_node_started_nonempty = 0;
int sendingsomeroutes = 0; int sendingsomeroutes = 0;
const struct prefix *p; const struct prefix *p;
#if DEBUG_PROCESS_PENDING_NODE
unsigned int count_rib_initial = 0;
unsigned int count_pend_vn_initial = 0;
unsigned int count_pend_cost_initial = 0;
#endif
assert(pn); assert(pn);
p = agg_node_get_prefix(pn); p = agg_node_get_prefix(pn);
@ -917,6 +885,19 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
slPendPt = (struct skiplist *)(pn->aggregate); slPendPt = (struct skiplist *)(pn->aggregate);
lPendCost = (struct list *)(pn->info); lPendCost = (struct list *)(pn->info);
#if DEBUG_PROCESS_PENDING_NODE
/* debugging */
if (slRibPt)
count_rib_initial = skiplist_count(slRibPt);
if (slPendPt)
count_pend_vn_initial = skiplist_count(slPendPt);
if (lPendCost && lPendCost != (struct list *)1)
count_pend_cost_initial = lPendCost->count;
#endif
/* /*
* Handle special case: delete all routes at prefix * Handle special case: delete all routes at prefix
*/ */
@ -939,7 +920,6 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
tcb = EVENT_ARG(ri->timer); tcb = EVENT_ARG(ri->timer);
EVENT_OFF(ri->timer); EVENT_OFF(ri->timer);
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
} }
@ -1025,7 +1005,6 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
tcb = EVENT_ARG(ori->timer); tcb = EVENT_ARG(ori->timer);
EVENT_OFF(ori->timer); EVENT_OFF(ori->timer);
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
} }
@ -1038,11 +1017,6 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
#endif #endif
} else { } else {
#if DEBUG_PROCESS_PENDING_NODE
vnc_zlog_debug_verbose("%s: slRibPt ri %p matched in pending list",
__func__, ori);
#endif
/* /*
* Found in pending list. If same lifetime, * Found in pending list. If same lifetime,
* cost, options, * cost, options,
@ -1066,10 +1040,14 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapi_info_free( rfapi_info_free(
ri); /* grr... */ ri); /* grr... */
} }
#if DEBUG_PROCESS_PENDING_NODE
vnc_zlog_debug_verbose("%s: same info", __func__);
#endif
} }
#if DEBUG_PROCESS_PENDING_NODE
vnc_zlog_debug_verbose(
"%s: slRibPt ri %p matched in pending list, %s",
__func__, ori,
(same ? "same info"
: "different info"));
#endif
} }
} }
/* /*
@ -1361,7 +1339,6 @@ callback:
tcb = EVENT_ARG(ri->timer); tcb = EVENT_ARG(ri->timer);
EVENT_OFF(ri->timer); EVENT_OFF(ri->timer);
rrtcb_del(&_rrtcbhash, tcb);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb); XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
} }
RFAPI_RIB_CHECK_COUNTS(0, delete_list->count); RFAPI_RIB_CHECK_COUNTS(0, delete_list->count);
@ -2308,7 +2285,8 @@ static int print_rib_sl(int (*fp)(void *, const char *, ...), struct vty *vty,
/* /*
* This one is for debugging (set stream to NULL to send output to log) * This one is for debugging (set stream to NULL to send output to log)
*/ */
static void rfapiRibShowRibSl(void *stream, const struct prefix *pfx, struct skiplist *sl) static void rfapiRibShowRibSl(void *stream, struct prefix *pfx,
struct skiplist *sl)
{ {
int (*fp)(void *, const char *, ...); int (*fp)(void *, const char *, ...);
struct vty *vty; struct vty *vty;
@ -2448,25 +2426,3 @@ void rfapiRibShowResponses(void *stream, struct prefix *pfx_match,
fp(out, "\n"); fp(out, "\n");
} }
} }
void rfapi_rib_init(void)
{
rrtcb_init(&_rrtcbhash);
}
void rfapi_rib_terminate(void)
{
struct rfapi_rib_tcb *tcb;
vnc_zlog_debug_verbose("%s: cleaning up %zu pending timers", __func__,
rrtcb_count(&_rrtcbhash));
/*
* Clean up memory allocations stored in pending timers
*/
while ((tcb = rrtcb_pop(&_rrtcbhash))) {
assert(tcb == EVENT_ARG(tcb->ri->timer));
EVENT_OFF(tcb->ri->timer);
_rfapiRibExpireTimer(tcb); /* deletes hash entry, frees tcb */
}
}

View file

@ -138,7 +138,4 @@ extern int rfapi_rib_key_cmp(const void *k1, const void *k2);
extern void rfapiAdbFree(struct rfapi_adb *adb); extern void rfapiAdbFree(struct rfapi_adb *adb);
extern void rfapi_rib_init(void);
extern void rfapi_rib_terminate(void);
#endif /* QUAGGA_HGP_RFAPI_RIB_H */ #endif /* QUAGGA_HGP_RFAPI_RIB_H */

View file

@ -338,12 +338,13 @@ static int process_unicast_route(struct bgp *bgp, /* in */
hattr = *attr; hattr = *attr;
if (rmap) { if (rmap) {
struct bgp_path_info pinfo = {}; struct bgp_path_info info;
route_map_result_t ret; route_map_result_t ret;
pinfo.peer = peer; memset(&info, 0, sizeof(info));
pinfo.attr = &hattr; info.peer = peer;
ret = route_map_apply(rmap, prefix, &pinfo); info.attr = &hattr;
ret = route_map_apply(rmap, prefix, &info);
if (ret == RMAP_DENYMATCH) { if (ret == RMAP_DENYMATCH) {
bgp_attr_flush(&hattr); bgp_attr_flush(&hattr);
vnc_zlog_debug_verbose( vnc_zlog_debug_verbose(
@ -767,12 +768,13 @@ static void vnc_import_bgp_add_route_mode_plain(struct bgp *bgp,
hattr = *attr; hattr = *attr;
if (rmap) { if (rmap) {
struct bgp_path_info pinfo = {}; struct bgp_path_info info;
route_map_result_t ret; route_map_result_t ret;
pinfo.peer = peer; memset(&info, 0, sizeof(info));
pinfo.attr = &hattr; info.peer = peer;
ret = route_map_apply(rmap, prefix, &pinfo); info.attr = &hattr;
ret = route_map_apply(rmap, prefix, &info);
if (ret == RMAP_DENYMATCH) { if (ret == RMAP_DENYMATCH) {
bgp_attr_flush(&hattr); bgp_attr_flush(&hattr);
vnc_zlog_debug_verbose( vnc_zlog_debug_verbose(

View file

@ -467,7 +467,6 @@ AC_C_FLAG([-Wbad-function-cast])
AC_C_FLAG([-Wwrite-strings]) AC_C_FLAG([-Wwrite-strings])
AC_C_FLAG([-Wundef]) AC_C_FLAG([-Wundef])
AC_C_FLAG([-Wimplicit-fallthrough]) AC_C_FLAG([-Wimplicit-fallthrough])
AC_C_FLAG([-Wshadow])
if test "$enable_gcc_ultra_verbose" = "yes" ; then if test "$enable_gcc_ultra_verbose" = "yes" ; then
AC_C_FLAG([-Wcast-qual]) AC_C_FLAG([-Wcast-qual])
AC_C_FLAG([-Wmissing-noreturn]) AC_C_FLAG([-Wmissing-noreturn])
@ -475,6 +474,7 @@ if test "$enable_gcc_ultra_verbose" = "yes" ; then
AC_C_FLAG([-Wunreachable-code]) AC_C_FLAG([-Wunreachable-code])
AC_C_FLAG([-Wpacked]) AC_C_FLAG([-Wpacked])
AC_C_FLAG([-Wpadded]) AC_C_FLAG([-Wpadded])
AC_C_FLAG([-Wshadow])
else else
AC_C_FLAG([-Wno-unused-result]) AC_C_FLAG([-Wno-unused-result])
fi fi
@ -732,6 +732,8 @@ AC_ARG_ENABLE([mgmtd_local_validations],
AS_HELP_STRING([--enable-mgmtd-local-validations], [dev: unimplemented local validation])) AS_HELP_STRING([--enable-mgmtd-local-validations], [dev: unimplemented local validation]))
AC_ARG_ENABLE([mgmtd_test_be_client], AC_ARG_ENABLE([mgmtd_test_be_client],
AS_HELP_STRING([--enable-mgmtd-test-be-client], [build test backend client])) AS_HELP_STRING([--enable-mgmtd-test-be-client], [build test backend client]))
AC_ARG_ENABLE([rustlibd],
AS_HELP_STRING([--enable-rustlibd], [enable rust library based daemon template]))
AC_ARG_ENABLE([fpm_listener], AC_ARG_ENABLE([fpm_listener],
AS_HELP_STRING([--enable-fpm-listener], [build fpm listener test program])) AS_HELP_STRING([--enable-fpm-listener], [build fpm listener test program]))
AC_ARG_ENABLE([ripd], AC_ARG_ENABLE([ripd],
@ -1054,11 +1056,6 @@ AC_MSG_FAILURE([Please specify a number from 0-12 for log precision ARG])
;; ;;
esac esac
with_log_timestamp_precision=${with_log_timestamp_precision:-0} with_log_timestamp_precision=${with_log_timestamp_precision:-0}
if test "${with_log_timestamp_precision}" != 0; then
AC_SUBST([LOG_TIMESTAMP_PRECISION_CLI], ["
log timestamp precision ${with_log_timestamp_precision}"])
AM_SUBST_NOTMAKE([LOG_TIMESTAMP_PRECISION_CLI])
fi
AC_DEFINE_UNQUOTED([LOG_TIMESTAMP_PRECISION], [${with_log_timestamp_precision}], [Startup zlog timestamp precision]) AC_DEFINE_UNQUOTED([LOG_TIMESTAMP_PRECISION], [${with_log_timestamp_precision}], [Startup zlog timestamp precision])
AC_DEFINE_UNQUOTED([VTYSH_PAGER], ["$VTYSH_PAGER"], [What pager to use]) AC_DEFINE_UNQUOTED([VTYSH_PAGER], ["$VTYSH_PAGER"], [What pager to use])
@ -1877,6 +1874,10 @@ AS_IF([test "$enable_ripngd" != "no"], [
AC_DEFINE([HAVE_RIPNGD], [1], [ripngd]) AC_DEFINE([HAVE_RIPNGD], [1], [ripngd])
]) ])
AS_IF([test "$enable_rustlibd" != "no"], [
AC_DEFINE([HAVE_RUSTLIBD], [1], [rustlibd])
])
AS_IF([test "$enable_ospfd" != "no"], [ AS_IF([test "$enable_ospfd" != "no"], [
AC_DEFINE([HAVE_OSPFD], [1], [ospfd]) AC_DEFINE([HAVE_OSPFD], [1], [ospfd])
]) ])
@ -2051,7 +2052,7 @@ if test "$enable_snmp" != "" -a "$enable_snmp" != "no"; then
# net-snmp lists all of its own dependencies. we absolutely do not want that # net-snmp lists all of its own dependencies. we absolutely do not want that
# among other things we avoid a GPL vs. OpenSSL license conflict here # among other things we avoid a GPL vs. OpenSSL license conflict here
for removelib in crypto ssl sensors pci wrap; do for removelib in crypto ssl sensors pci wrap; do
SNMP_LIBS="`echo $SNMP_LIBS | sed -e 's/-l'$removelib'/ /g'`" SNMP_LIBS="`echo $SNMP_LIBS | sed -e 's/\(^\|\s\)-l'$removelib'\b/ /g' -e 's/\(^\|\s\)\([^\s]*\/\)\?lib'$removelib'\.[^\s]\+\b/ /g'`"
done done
AC_MSG_CHECKING([whether we can link to Net-SNMP]) AC_MSG_CHECKING([whether we can link to Net-SNMP])
AC_LINK_IFELSE_FLAGS([$SNMP_CFLAGS], [$SNMP_LIBS], [AC_LANG_PROGRAM([ AC_LINK_IFELSE_FLAGS([$SNMP_CFLAGS], [$SNMP_LIBS], [AC_LANG_PROGRAM([
@ -2118,6 +2119,40 @@ if test "$enable_config_rollbacks" = "yes"; then
]) ])
fi fi
dnl ------------------------------------------------------
dnl rust general (add to conditional any new rust daemons)
dnl ------------------------------------------------------
if test "$enable_rustlibd" = "yes"; then
AC_PATH_PROG([CARGO], [cargo], [notfound])
AS_IF([test "$CARGO" = "notfound"], [AC_MSG_ERROR([cargo is required])])
AC_PATH_PROG([RUSTC], [rustc], [notfound])
AS_IF([test "$RUSTC" = "notfound"], [AC_MSG_ERROR([rustc is required])])
if test "$enable_dev_build" = "yes"; then
CARGO_TARGET_DIR=debug
else
CARGO_TARGET_DIR=release
fi
AC_SUBST(CARGO_TARGET_DIR)
fi
dnl ---------------
dnl rustlibd
dnl ---------------
if test "$enable_rustlibd" = "yes"; then
AC_CONFIG_FILES([rustlibd/build.rs rustlibd/wrapper.h rustlibd/Cargo.toml])
AC_CONFIG_COMMANDS([gen-dot-cargo-config], [
if test "$ac_abs_top_builddir" != "$ac_abs_top_srcdir"; then
mkdir -p ${srcdir}/rustlibd/.cargo
if ! test -e "${srcdir}/rustlibd/.cargo/config.toml"; then
printf '[[build]]\ntarget-dir = "%s"\n' "${ac_abs_top_builddir}/rustlibd/target" > "${srcdir}/rustlibd/.cargo/config.toml"
fi
fi]
)
fi
dnl --------------- dnl ---------------
dnl sysrepo dnl sysrepo
dnl --------------- dnl ---------------
@ -2787,6 +2822,7 @@ AM_CONDITIONAL([ENABLE_BGP_VNC], [test "$enable_bgp_vnc" != "no"])
AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp]) AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp])
dnl northbound dnl northbound
AM_CONDITIONAL([SQLITE3], [$SQLITE3]) AM_CONDITIONAL([SQLITE3], [$SQLITE3])
AM_CONDITIONAL([RUSTLIBD], [test "$enable_rustlibd" = "yes"])
AM_CONDITIONAL([SYSREPO], [test "$enable_sysrepo" = "yes"]) AM_CONDITIONAL([SYSREPO], [test "$enable_sysrepo" = "yes"])
AM_CONDITIONAL([GRPC], [test "$enable_grpc" = "yes"]) AM_CONDITIONAL([GRPC], [test "$enable_grpc" = "yes"])
AM_CONDITIONAL([ZEROMQ], [test "$ZEROMQ" = "true"]) AM_CONDITIONAL([ZEROMQ], [test "$ZEROMQ" = "true"])

View file

@ -32,7 +32,5 @@ Building FRR
building-frr-for-ubuntu1804 building-frr-for-ubuntu1804
building-frr-for-ubuntu2004 building-frr-for-ubuntu2004
building-frr-for-ubuntu2204 building-frr-for-ubuntu2204
building-frr-for-ubuntu2404
building-docker building-docker
cross-compiling cross-compiling
building-doc

View file

@ -23,5 +23,5 @@ FRRouting Developer's Guide
path path
pceplib pceplib
link-state link-state
rust-dev
northbound/northbound northbound/northbound
sbfd

View file

@ -187,17 +187,6 @@ To switch between compatible data structures, only these two lines need to be
changes. To switch to a data structure with a different API, some source changes. To switch to a data structure with a different API, some source
changes are necessary. changes are necessary.
As a example to the developer here are some example commits that convert
over to usage of the typesafe data structures:
+------------------------------------------------------+------------------------------------+
| Commit Message | SHA |
+======================================================+====================================+
| bgpd: Convert the bgp_advertise_attr->adv to a fifo | b2e0c12d723a6464f67491ceb9 |
+------------------------------------------------------+------------------------------------+
| zebra: convert LSP nhlfe lists to use typesafe lists | ee70f629792b90f92ea7e6bece |
+------------------------------------------------------+------------------------------------+
Common iteration macros Common iteration macros
----------------------- -----------------------
@ -773,20 +762,6 @@ Why is it ``PREDECL`` + ``DECLARE`` instead of ``DECLARE`` + ``DEFINE``?
2 ``.c`` files, but only **if the macro arguments are identical.** Maybe 2 ``.c`` files, but only **if the macro arguments are identical.** Maybe
don't do that unless you really need it. don't do that unless you really need it.
COMMON PROBLEMS
---------------
The ``fini`` call of the various typesafe structures actually close the data
structure off and attempts to use the data structure after that introduce
intentional crashes. This is leading to situations when converting from
an older data structure to the new typesafe where, on shutdown, the older
data structures would still be attempted to be accessed. This access would
just be ignored or result in benign code running. With the new typesafe
data structure crashes will occurr. Be aware that when modifying the code
base that this sort of change might end up with crashes on shutdown and
work must be done to ensure that the newly changed does not use the data
structure after the fini call.
FRR lists FRR lists
--------- ---------

View file

@ -429,8 +429,3 @@ The client and server sides of oper-state query
.. figure:: ../figures/cli-oper-state.svg .. figure:: ../figures/cli-oper-state.svg
:align: center :align: center
Config datastore cleanup for non-implict commits (i.e., file reads currently)
.. figure:: ../figures/datastores.svg
:align: center

177
doc/developer/rust-dev.rst Normal file
View file

@ -0,0 +1,177 @@
.. -*- coding: utf-8 -*-
..
.. SPDX-License-Identifier: GPL-2.0-or-later
..
.. February 26 2025, Christian Hopps <chopps@labn.net>
..
.. Copyright (c) 2025, LabN Consulting, L.L.C.
..
.. _rust_dev:
Rust Development
================
Overview
--------
The FRR project has started adding support for daemons written in rust. The
following sections document the infrastructure to support to-date. This is the
initial approach of rust integration, we expect changes as best-practices within
the community evolve.
General Structure
-----------------
An example template of the general structure of a rust based daemon can be found
in ``rustlib/`` sub-directory. The recommended structure so far is to use a C
main file and function to drive initialization of the daemon calling out to rust
at 3 critical points. The Rust code is then built as a static library and linked
into the daemon. Rust bindings are built for ``libfrr`` and accessed through a
c_shim sub-module. Here's the files and as of the time of this writing:
.. code-block:: make
rustlibd/
.gitignore
Cargo.toml.in
Makefile
README.org
build.rs.in
c_shim.rs
frrutil.rs (symlink)
rustlib_lib.rs
rustlib_main.c
sandbox.rs
subdir.am
wrapper.h.in
:file:`frrutil.rs` is a symlink to :file:`../lib/frrutil.rs` kept here to keep
various rust tools happy about files being inside or below the main source
directory.
NOTE: if you use a separate build dir (named `build` in the below example) and
you want to have your development environment proper analyze code (e.g.,
vs-code/emacs LSP mode) you should create an additional 2 symlinks and create a
local :file:`Cargo.toml` file like so:
.. code-block:: sh
cd frr/rustlibd
sed -e 's,@srcdir@/,,g' < Cargo.toml.in > Cargo.toml
ln -s ../build/rustlibd/build.rs .
ln -s ../build/rustlibd/wrapper.h .
Logging
-------
FRR logging is transparently supported using some bridging code that connects
the native rust ``tracing`` calls directly to the ``zlog`` functionality in FRR.
The only thing you have to do is call the function :func:`bridge_rust_logging`
at startup. This is already done for you in the `rustlibd` template :func:`main`
if you started with that code.
.. code-block:: rust
use tracing::{debug, info};
fn myrustfunc(sval: &str, uval: u32) {
debug!("Some DEBUG level output of str value: {}", sval);
info!("Some INFO level output of uint value: {}", uval);
}
Northbound Integration
----------------------
Support for the FRR northbound callback system is handled through rust macros.
These rust macros define C shims which then call your rust functions which will
use natural rust types. The rust macros hide the unsafe and tricky conversion
code. You put pointers to the generated C shim functions into the
:struct:`frr_yang_module_info` structure.
NOTE: Locking will probably be important as your callbacks will be called in the
FRR event loop main thread and your rust code is probably running in it's own
different thread (perhaps using the tokio async runtime as setup in the
:file:`rustlibd` template).
Here's an example of defining a handler for a config leave value `enable`:
.. code-block:: C
const struct frr_yang_module_info frr_my_module_nb_info = {
.name = "frr-my-module",
.nodes = {
{
.xpath = "/frr-my-module:lib/bvalue",
.cbs = {
.modify = my_module_bvalue_modify_shim,
.destroy = my_module_bvalue_destroy_shim
}
},
...
.. code-block:: rust
use crate::{define_nb_destroy_shim, define_nb_modify_shim};
pub(crate) fn my_module_bvalue_modify(
event: NbEvent,
_node: &DataNodeRef,
) -> Result<(), nb_error> {
debug!("RUST: bvalue modify: {}", event);
match event {
NbEvent::APPLY(_) => {
// handle the change to the `bvalue` leaf.
Ok(())
},
_ => Ok(()), // All other events just return Ok.
}
}
pub(crate) fn my_module_bvalue_destroy(
event: NbEvent,
_node: &DataNodeRef,
) -> Result<(), nb_error> {
// handle the removal of the `bvalue` leaf.
// ...
}
define_nb_modify_shim!(
my_module_bvalue_modify_shim,
my_module_bvalue_modify);
define_nb_destroy_shim!(
my_module_bvalue_destroy_shim,
my_module_bvalue_destroy);
CLI commands
~~~~~~~~~~~~
For CLI commands you should continue to write the DEFPY_YANG() calls in C which
simply set your YANG config data base on the args to DEFPY_YANG(). The actual
configuration will be handled in your rust based callbacks you defined for your
YANG model that are describe above.
Operational State
~~~~~~~~~~~~~~~~~
You have 2 choices with operation state. You can implement the operation state
callbacks in rust and use the rust macros to bridge these to the
:struct:`frr_yang_module_info` definition as you did with your config handlers, or you
can keep your operational state in a ``yang-rs`` (i.e., ``libyang``) based tree.
Here's an example of using the macros:
If you choose to do the latter and save all your operational state in a
``libyang`` :struct:`DataTree`, you only need to define 2 callback functions, a
:func:`get_tree_locked()` function which returns the :struct:`DataTree` in a
:struct:`MutexGuard` (i.e., a held lock), and an :func:`unlock_tree()` function
which is passed back the :struct:`MutexGuard` object for unlocking. You use 2
macros: :func:`define_nb_get_tree_locked`, and :func:`define_nb_unlock_tree` to
create the C based shims to plug into your :struct:`frr_yang_module_info`
structure.
NOTE: As with config, locking will probably be important as your callbacks will
be called in the FRR event loop main thread and your rust code is probably
running in it's own different thread.

View file

@ -1,291 +0,0 @@
<mxfile host="Electron" agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/26.2.2 Chrome/134.0.6998.178 Electron/35.1.2 Safari/537.36" version="26.2.2">
<diagram name="Page-1" id="i24xzCYeKZV1rkTH0XTW">
<mxGraphModel dx="1667" dy="1191" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="1100" pageHeight="850" math="0" shadow="0">
<root>
<mxCell id="0" />
<mxCell id="1" parent="0" />
<mxCell id="U9ftda_CDvz5WDsUi4ve-36" value="nb_candidate_commit_apply()" style="whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;rounded=1;fillStyle=auto;strokeWidth=1;verticalAlign=top;" vertex="1" parent="1">
<mxGeometry x="890" y="670" width="180" height="136.87" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-29" value="&lt;i&gt;&lt;font style=&quot;font-size: 16px;&quot;&gt;Daemon CLI Parsing (lib/vty.c)&lt;/font&gt;&lt;/i&gt;" style="rounded=1;whiteSpace=wrap;html=1;dashed=1;fillColor=#dae8fc;strokeColor=default;fillStyle=solid;strokeWidth=1;perimeterSpacing=0;dashPattern=1 2;gradientColor=none;gradientDirection=radial;glass=0;shadow=0;opacity=50;verticalAlign=bottom;spacingBottom=30;" parent="1" vertex="1">
<mxGeometry x="50" y="220" width="660" height="170" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-7" value="&lt;div style=&quot;font-size: 12px;&quot;&gt;mgmtd&lt;/div&gt;&lt;div style=&quot;font-size: 12px;&quot;&gt;(new config path)&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;arcSize=24;fillColor=#dae8fc;strokeColor=#6c8ebf;shadow=1;comic=0;labelBackgroundColor=none;fontFamily=Verdana;fontSize=12;align=center;verticalAlign=top;fontStyle=1" parent="1" vertex="1">
<mxGeometry x="230" y="40" width="490" height="270" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-13" value="&lt;div&gt;&lt;font&gt;vty_shared_&lt;/font&gt;&lt;/div&gt;&lt;div&gt;&lt;font&gt;candidate_config&lt;/font&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="136.25" y="70" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-14" value="&lt;div&gt;running_config&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="260" y="70" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-18" value="" style="group;shadow=0;" parent="1" vertex="1" connectable="0">
<mxGeometry x="80" y="60" width="270" height="210" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-19" value="&lt;div style=&quot;font-size: 12px;&quot;&gt;B daemon&amp;nbsp;&lt;span style=&quot;background-color: transparent; color: light-dark(rgb(0, 0, 0), rgb(255, 255, 255));&quot;&gt;(old direct vty)&lt;/span&gt;&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;arcSize=24;fillColor=#fad9d5;strokeColor=#ae4132;shadow=1;comic=0;labelBackgroundColor=none;fontFamily=Verdana;fontSize=12;align=center;verticalAlign=top;fontStyle=1" parent="QL32OzfzetEIIOdSfswY-18" vertex="1">
<mxGeometry x="-10" width="270" height="190" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-20" value="&lt;div&gt;&lt;font&gt;vty_shared_&lt;/font&gt;&lt;/div&gt;&lt;div&gt;&lt;font&gt;candidate_config&lt;/font&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="QL32OzfzetEIIOdSfswY-18" vertex="1">
<mxGeometry x="20" y="30" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-21" value="&lt;div&gt;running_config&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#d5e8d4;strokeColor=#82b366;" parent="QL32OzfzetEIIOdSfswY-18" vertex="1">
<mxGeometry x="150" y="30" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-23" value="&lt;div style=&quot;font-size: 12px;&quot;&gt;A daemon (old direct vty)&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;arcSize=24;fillColor=#fad9d5;strokeColor=#ae4132;shadow=1;comic=0;labelBackgroundColor=none;fontFamily=Verdana;fontSize=12;align=center;verticalAlign=top;fontStyle=1" parent="QL32OzfzetEIIOdSfswY-18" vertex="1">
<mxGeometry x="-40" y="20" width="270" height="190" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-25" value="&lt;div&gt;running_config&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#d5e8d4;strokeColor=#82b366;" parent="1" vertex="1">
<mxGeometry x="200" y="110" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" parent="1" target="4hLhriEXD62TuEoW85Ij-1" edge="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="648.75" y="160" as="sourcePoint" />
<mxPoint x="487.5" y="585" as="targetPoint" />
<Array as="points">
<mxPoint x="790" y="160" />
<mxPoint x="790" y="530" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-8" value="&lt;div&gt;&lt;font&gt;vty_mgmt_&lt;/font&gt;&lt;/div&gt;&lt;div&gt;&lt;font&gt;candidate_config&lt;/font&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="551.25" y="70" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-26" value="mm-&amp;gt;running" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="370" y="230" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-14" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=1;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="QL32OzfzetEIIOdSfswY-27" target="QL32OzfzetEIIOdSfswY-26">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-27" value="mm-&amp;gt;candidate" style="rounded=0;whiteSpace=wrap;html=1;" parent="1" vertex="1">
<mxGeometry x="540" y="230" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-1" value="vty_config_entry()" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;fillStyle=auto;strokeWidth=3;" parent="1" vertex="1">
<mxGeometry x="315" y="500" width="130" height="60" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-3" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" parent="1" source="QL32OzfzetEIIOdSfswY-24" target="4hLhriEXD62TuEoW85Ij-1" edge="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="120" y="260" as="sourcePoint" />
<mxPoint x="320" y="600" as="targetPoint" />
<Array as="points">
<mxPoint x="120" y="530" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-24" value="&lt;div&gt;&lt;font&gt;vty_shared_&lt;/font&gt;&lt;/div&gt;&lt;div&gt;&lt;font&gt;candidate_config&lt;/font&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="70" y="110" width="97.5" height="130" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-8" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-4" target="4hLhriEXD62TuEoW85Ij-7" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-4" value="CLI: config_exclusive()&lt;div&gt;(northbound_cli.c)&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d0cee2;strokeColor=#56517e;" parent="1" vertex="1">
<mxGeometry x="910" y="40" width="140" height="50" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-5" value="CLI: config_private()&lt;div&gt;(northbound_cli.c)&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d0cee2;strokeColor=#56517e;" parent="1" vertex="1">
<mxGeometry x="760" y="45" width="140" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-6" value="vty_config_entry()" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;fillStyle=auto;strokeWidth=3;" parent="1" vertex="1">
<mxGeometry x="860" y="230" width="120" height="60" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-10" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-7" target="4hLhriEXD62TuEoW85Ij-6" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-7" value="&lt;div&gt;private_config&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" parent="1" vertex="1">
<mxGeometry x="871.25" y="130" width="97.5" height="70" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-9" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-5" edge="1">
<mxGeometry relative="1" as="geometry">
<mxPoint x="910" y="130" as="targetPoint" />
<Array as="points">
<mxPoint x="850" y="110" />
<mxPoint x="911" y="110" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-15" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;" parent="1" source="4hLhriEXD62TuEoW85Ij-11" target="4hLhriEXD62TuEoW85Ij-1" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-20" value="2" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-11" target="4hLhriEXD62TuEoW85Ij-1" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-16" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;startArrow=classic;startFill=1;strokeWidth=2;" edge="1" parent="1" source="4hLhriEXD62TuEoW85Ij-11" target="U9ftda_CDvz5WDsUi4ve-15">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-17" value="1" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="U9ftda_CDvz5WDsUi4ve-16">
<mxGeometry x="0.0305" y="2" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-19" value="1: (mgmtd only)" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="U9ftda_CDvz5WDsUi4ve-16">
<mxGeometry x="0.0074" y="1" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-11" value="CLI: config_terminal()&lt;div&gt;(command.c)&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#d0cee2;strokeColor=#56517e;" parent="1" vertex="1">
<mxGeometry x="315" y="420" width="130" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-31" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-29" target="4hLhriEXD62TuEoW85Ij-11" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-27" value="&lt;div style=&quot;font-size: 12px;&quot;&gt;&lt;br&gt;&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;arcSize=12;fillColor=#dae8fc;strokeColor=#6c8ebf;shadow=1;comic=0;labelBackgroundColor=none;fontFamily=Verdana;fontSize=12;align=center;verticalAlign=top;fontStyle=1;container=0;" parent="1" vertex="1">
<mxGeometry x="50" y="600" width="550" height="190" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-18" value="vty_read_config()" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="260" y="670" width="130" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-21" value="vty_apply_config()" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="260" y="730" width="130" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-22" value="&lt;b&gt;&lt;i&gt;&quot;copy FILE to rrunning&quot;&lt;/i&gt;&lt;/b&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="63.75" y="730" width="150" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-23" value="&lt;b&gt;&lt;i&gt;vtysh_main.c: main()&lt;/i&gt;&lt;/b&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="430" y="730" width="150" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-19" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-18" target="4hLhriEXD62TuEoW85Ij-16" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-26" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-21" target="4hLhriEXD62TuEoW85Ij-18" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-25" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-22" target="4hLhriEXD62TuEoW85Ij-21" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-24" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-23" target="4hLhriEXD62TuEoW85Ij-21" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-34" value="VTYSH" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;strokeColor=default;fontColor=#333333;opacity=50;dashed=1;dashPattern=8 8;" parent="1" vertex="1">
<mxGeometry x="500" y="610" width="90" height="30" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-12" value="" style="curved=0;endArrow=none;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.25;exitY=1;exitDx=0;exitDy=0;dashed=1;startFill=0;" edge="1" parent="1">
<mxGeometry width="50" height="50" relative="1" as="geometry">
<mxPoint x="215" y="400" as="sourcePoint" />
<mxPoint x="380" y="400" as="targetPoint" />
<Array as="points">
<mxPoint x="215" y="370" />
<mxPoint x="380" y="370" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-16" value="vty_read_file()&lt;div&gt;&lt;b&gt;&lt;i&gt;&quot;conf term file-lock&quot;&lt;/i&gt;&lt;/b&gt;&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" parent="1" vertex="1">
<mxGeometry x="260" y="610" width="130" height="40" as="geometry" />
</mxCell>
<mxCell id="4hLhriEXD62TuEoW85Ij-17" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;jumpStyle=line;exitX=0.5;exitY=0;exitDx=0;exitDy=0;shadow=1;" parent="1" source="4hLhriEXD62TuEoW85Ij-16" edge="1">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="325" y="580" />
<mxPoint x="215" y="580" />
</Array>
<mxPoint x="395" y="670" as="sourcePoint" />
<mxPoint x="215" y="390" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-28" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;startArrow=classic;startFill=1;endArrow=oval;endFill=1;" parent="1" source="QL32OzfzetEIIOdSfswY-14" target="QL32OzfzetEIIOdSfswY-26" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="QL32OzfzetEIIOdSfswY-29" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;endArrow=oval;startFill=1;startArrow=classic;endFill=1;" parent="1" source="QL32OzfzetEIIOdSfswY-8" target="QL32OzfzetEIIOdSfswY-27" edge="1">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-15" value="&lt;div&gt;lock mm-&amp;gt;candidate&lt;/div&gt;" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;" vertex="1" parent="1">
<mxGeometry x="580" y="420" width="130" height="40" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-20" value="If we don&#39;t lock for non-mgmtd then&lt;div&gt;multiple vtysh conf t are allowed!&lt;/div&gt;" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;fontStyle=3;fontColor=#FF0000;" vertex="1" parent="1">
<mxGeometry x="425" y="463" width="210" height="40" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-24" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-21" target="U9ftda_CDvz5WDsUi4ve-23">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-21" value="vty_config_node_exit()" style="rounded=1;whiteSpace=wrap;html=1;fillColor=#f8cecc;strokeColor=#b85450;fillStyle=auto;strokeWidth=3;" vertex="1" parent="1">
<mxGeometry x="830" y="340" width="180" height="45" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-26" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-23" target="U9ftda_CDvz5WDsUi4ve-25">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-29" value="pendign == true" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="U9ftda_CDvz5WDsUi4ve-26">
<mxGeometry x="-0.0182" y="-3" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-23" value="&lt;div&gt;&amp;nbsp; &amp;nbsp;nb_cli_pending_commit_check()&lt;/div&gt;&lt;div&gt;&lt;br&gt;&lt;/div&gt;" style="whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;rounded=1;fillStyle=auto;strokeWidth=1;" vertex="1" parent="1">
<mxGeometry x="830" y="420" width="180" height="35" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-28" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" target="U9ftda_CDvz5WDsUi4ve-27">
<mxGeometry relative="1" as="geometry">
<mxPoint x="920" y="570" as="sourcePoint" />
<Array as="points">
<mxPoint x="920" y="569" />
<mxPoint x="920" y="596" />
<mxPoint x="910" y="596" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-35" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-27" target="U9ftda_CDvz5WDsUi4ve-36">
<mxGeometry relative="1" as="geometry">
<mxPoint x="920" y="574.37" as="sourcePoint" />
<mxPoint x="1000" y="610.0000000000001" as="targetPoint" />
<Array as="points">
<mxPoint x="960" y="635" />
<mxPoint x="980" y="635" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-47" value="success" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="U9ftda_CDvz5WDsUi4ve-35">
<mxGeometry x="-0.275" y="1" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-51" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-25" target="U9ftda_CDvz5WDsUi4ve-27">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-25" value="nb_cli_classic_commit()" style="whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;rounded=1;fillStyle=auto;strokeWidth=1;" vertex="1" parent="1">
<mxGeometry x="830" y="500" width="180" height="37.5" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-31" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-27" target="U9ftda_CDvz5WDsUi4ve-30">
<mxGeometry relative="1" as="geometry">
<Array as="points">
<mxPoint x="880" y="635" />
<mxPoint x="781" y="635" />
</Array>
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-32" value="fail" style="edgeLabel;html=1;align=center;verticalAlign=middle;resizable=0;points=[];" vertex="1" connectable="0" parent="U9ftda_CDvz5WDsUi4ve-31">
<mxGeometry x="-0.055" y="-3" relative="1" as="geometry">
<mxPoint as="offset" />
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-27" value="nb_candidate_commit_prepare()" style="whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;rounded=1;fillStyle=auto;strokeWidth=1;" vertex="1" parent="1">
<mxGeometry x="830" y="566.25" width="180" height="33.75" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-30" value="" style="whiteSpace=wrap;html=1;fillColor=#e1d5e7;strokeColor=#9673a6;rounded=1;fillStyle=auto;strokeWidth=1;" vertex="1" parent="1">
<mxGeometry x="691.25" y="670.01" width="180" height="99.99" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-40" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-38" target="U9ftda_CDvz5WDsUi4ve-39">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-38" value="&lt;div&gt;running&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
<mxGeometry x="706.25" y="685" width="50" height="70" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-39" value="&lt;div&gt;private or&lt;/div&gt;&lt;div&gt;candidate&lt;/div&gt;&lt;div&gt;&lt;br&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" vertex="1" parent="1">
<mxGeometry x="796.25" y="685" width="60" height="70" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-42" value="&lt;div&gt;running&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#d5e8d4;strokeColor=#82b366;" vertex="1" parent="1">
<mxGeometry x="990" y="715" width="50" height="70" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-41" value="&lt;div&gt;private or&lt;/div&gt;&lt;div&gt;candidate&lt;/div&gt;&lt;div&gt;&lt;br&gt;&lt;/div&gt;" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;align=center;fillColor=#fff2cc;strokeColor=#d6b656;" vertex="1" parent="1">
<mxGeometry x="900" y="715" width="65" height="70" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-44" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=0.5;exitY=1;exitDx=0;exitDy=0;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-36" target="U9ftda_CDvz5WDsUi4ve-36">
<mxGeometry relative="1" as="geometry" />
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-48" value="" style="edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;" edge="1" parent="1" source="U9ftda_CDvz5WDsUi4ve-41" target="U9ftda_CDvz5WDsUi4ve-42">
<mxGeometry relative="1" as="geometry">
<mxPoint x="960" y="705" as="sourcePoint" />
<mxPoint x="990" y="705" as="targetPoint" />
</mxGeometry>
</mxCell>
<mxCell id="U9ftda_CDvz5WDsUi4ve-52" value="&lt;b&gt;&lt;font style=&quot;font-size: 15px;&quot;&gt;Config Datastore Non-Implicit Commit Cleanup&lt;/font&gt;&lt;/b&gt;" style="text;html=1;align=center;verticalAlign=middle;resizable=0;points=[];autosize=1;strokeColor=none;fillColor=none;" vertex="1" parent="1">
<mxGeometry x="400" y="10" width="360" height="30" as="geometry" />
</mxCell>
</root>
</mxGraphModel>
</diagram>
</mxfile>

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 70 KiB

View file

@ -215,11 +215,6 @@ BFD peers and profiles share the same BFD session configuration commands.
The default value is 254 (which means we only expect one hop between The default value is 254 (which means we only expect one hop between
this system and the peer). this system and the peer).
.. clicmd:: log-session-changes
Enables or disables logging of session state transitions into Up
state or when the session transitions from Up state to Down state.
BFD Peer Specific Commands BFD Peer Specific Commands
-------------------------- --------------------------

View file

@ -537,13 +537,6 @@ Reject routes with AS_SET or AS_CONFED_SET types
This command enables rejection of incoming and outgoing routes having AS_SET or AS_CONFED_SET type. This command enables rejection of incoming and outgoing routes having AS_SET or AS_CONFED_SET type.
The aggregated routes are not sent to the contributing neighbors.
.. seealso::
https://datatracker.ietf.org/doc/html/draft-ietf-idr-deprecate-as-set-confed-set
Default: disabled.
Enforce first AS Enforce first AS
---------------- ----------------

View file

@ -25,8 +25,6 @@ There are several options that control the behavior of ``frr-reload``:
* ``--stdout``: print output to stdout * ``--stdout``: print output to stdout
* ``--bindir BINDIR``: path to the vtysh executable * ``--bindir BINDIR``: path to the vtysh executable
* ``--confdir CONFDIR``: path to the existing daemon config files * ``--confdir CONFDIR``: path to the existing daemon config files
* ``--logfile FILENAME``: file (with path) to logfile for the reload operation.
Default is ``/var/log/frr/frr-reload.log``
* ``--rundir RUNDIR``: path to a folder to be used to write the temporary files * ``--rundir RUNDIR``: path to a folder to be used to write the temporary files
needed by the script to do its job. The script should have write access to it needed by the script to do its job. The script should have write access to it
* ``--daemon DAEMON``: by default ``frr-reload.py`` assumes that we are using * ``--daemon DAEMON``: by default ``frr-reload.py`` assumes that we are using

View file

@ -9,7 +9,6 @@ Protocols
zebra zebra
bfd bfd
sbfd
bgp bgp
babeld babeld
fabricd fabricd

View file

@ -46,8 +46,8 @@ a static prefix and gateway, with several possible forms.
NETWORK is destination prefix with a valid v4 or v6 network based upon NETWORK is destination prefix with a valid v4 or v6 network based upon
initial form of the command. initial form of the command.
GATEWAY is the IP address to use as next-hop for the prefix. Routes of type v4 can use v4 and v6 next-hops, GATEWAY is the IP address to use as next-hop for the prefix. Currently, it must match
v6 routes only support v6 next-hops. the v4 or v6 route type specified at the start of the command.
IFNAME is the name of the interface to use as next-hop. If only IFNAME is specified IFNAME is the name of the interface to use as next-hop. If only IFNAME is specified
(without GATEWAY), a connected route will be created. (without GATEWAY), a connected route will be created.

View file

@ -84,9 +84,9 @@ endif
# #
.PHONY: info html pdf .PHONY: info html pdf
info-local: $(USERBUILD)/texinfo/frr.info info: $(USERBUILD)/texinfo/frr.info
html-local: $(USERBUILD)/html/.buildinfo html: $(USERBUILD)/html/.buildinfo
pdf-local: $(USERBUILD)/latexpdf pdf: $(USERBUILD)/latexpdf
# #
# hook-ins for clean / install / doc # hook-ins for clean / install / doc
@ -100,7 +100,7 @@ clean-userdocs:
# INSTALL_INFO=install-info # INSTALL_INFO=install-info
.PHONY: install-info uninstall-info install-html uninstall-html .PHONY: install-info uninstall-info install-html uninstall-html
install-info-local: $(USERBUILD)/texinfo/frr.info install-info: $(USERBUILD)/texinfo/frr.info
$(MKDIR_P) "$(DESTDIR)$(infodir)" $(MKDIR_P) "$(DESTDIR)$(infodir)"
$(INSTALL_DATA) "$<" "$(DESTDIR)$(infodir)" $(INSTALL_DATA) "$<" "$(DESTDIR)$(infodir)"
[ -z "${DESTDIR}" ] && $(INSTALL_INFO) --info-dir="$(DESTDIR)$(infodir)" "$<" || true [ -z "${DESTDIR}" ] && $(INSTALL_INFO) --info-dir="$(DESTDIR)$(infodir)" "$<" || true
@ -108,7 +108,7 @@ uninstall-info: $(USERBUILD)/texinfo/frr.info
-rm -f "$(DESTDIR)$(infodir)/$<" -rm -f "$(DESTDIR)$(infodir)/$<"
[ -z "${DESTDIR}" ] && $(INSTALL_INFO) --delete --info-dir="$(DESTDIR)$(infodir)" "$<" || true [ -z "${DESTDIR}" ] && $(INSTALL_INFO) --delete --info-dir="$(DESTDIR)$(infodir)" "$<" || true
install-html-local: $(USERBUILD)/html/.buildinfo install-html: $(USERBUILD)/html/.buildinfo
$(MKDIR_P) "$(DESTDIR)$(htmldir)" $(MKDIR_P) "$(DESTDIR)$(htmldir)"
cp -r "$(USERBUILD)/html" "$(DESTDIR)$(htmldir)" cp -r "$(USERBUILD)/html" "$(DESTDIR)$(htmldir)"
uninstall-html: uninstall-html:

View file

@ -81,11 +81,11 @@ static int config_write_debug(struct vty *vty)
static int eigrp_neighbor_packet_queue_sum(struct eigrp_interface *ei) static int eigrp_neighbor_packet_queue_sum(struct eigrp_interface *ei)
{ {
struct eigrp_neighbor *nbr; struct eigrp_neighbor *nbr;
struct listnode *node, *nnode;
int sum; int sum;
sum = 0; sum = 0;
frr_each (eigrp_nbr_hash, &ei->nbr_hash_head, nbr) { for (ALL_LIST_ELEMENTS(ei->nbrs, node, nnode, nbr)) {
sum += nbr->retrans_queue->count; sum += nbr->retrans_queue->count;
} }
@ -152,7 +152,7 @@ void show_ip_eigrp_interface_sub(struct vty *vty, struct eigrp *eigrp,
vty_out(vty, "%-16s ", IF_NAME(ei)); vty_out(vty, "%-16s ", IF_NAME(ei));
vty_out(vty, "%-11u", ei->params.bandwidth); vty_out(vty, "%-11u", ei->params.bandwidth);
vty_out(vty, "%-11u", ei->params.delay); vty_out(vty, "%-11u", ei->params.delay);
vty_out(vty, "%-7zu", eigrp_nbr_hash_count(&ei->nbr_hash_head)); vty_out(vty, "%-7u", ei->nbrs->count);
vty_out(vty, "%u %c %-10u", 0, '/', vty_out(vty, "%u %c %-10u", 0, '/',
eigrp_neighbor_packet_queue_sum(ei)); eigrp_neighbor_packet_queue_sum(ei));
vty_out(vty, "%-7u %-14u %-12u %-8u", 0, 0, 0, 0); vty_out(vty, "%-7u %-14u %-12u %-8u", 0, 0, 0, 0);
@ -228,7 +228,7 @@ void show_ip_eigrp_prefix_descriptor(struct vty *vty,
vty_out(vty, "%-3c", (tn->state > 0) ? 'A' : 'P'); vty_out(vty, "%-3c", (tn->state > 0) ? 'A' : 'P');
vty_out(vty, "%pFX, ", &tn->destination); vty_out(vty, "%pFX, ", tn->destination);
vty_out(vty, "%u successors, ", (successors) ? successors->count : 0); vty_out(vty, "%u successors, ", (successors) ? successors->count : 0);
vty_out(vty, "FD is %u, serno: %" PRIu64 " \n", tn->fdistance, vty_out(vty, "FD is %u, serno: %" PRIu64 " \n", tn->fdistance,
tn->serno); tn->serno);

View file

@ -42,7 +42,6 @@
#include "eigrpd/eigrp_const.h" #include "eigrpd/eigrp_const.h"
#include "eigrpd/eigrp_filter.h" #include "eigrpd/eigrp_filter.h"
#include "eigrpd/eigrp_packet.h" #include "eigrpd/eigrp_packet.h"
#include "eigrpd/eigrp_interface.h"
/* /*
* Distribute-list update functions. * Distribute-list update functions.
@ -127,9 +126,10 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
/*struct eigrp_if_info * info = ifp->info; /*struct eigrp_if_info * info = ifp->info;
ei = info->eigrp_interface;*/ ei = info->eigrp_interface;*/
struct listnode *node, *nnode;
struct eigrp_interface *ei2; struct eigrp_interface *ei2;
/* Find proper interface */ /* Find proper interface */
frr_each (eigrp_interface_hash, &e->eifs, ei2) { for (ALL_LIST_ELEMENTS(e->eiflist, node, nnode, ei2)) {
if (strcmp(ei2->ifp->name, ifp->name) == 0) { if (strcmp(ei2->ifp->name, ifp->name) == 0) {
ei = ei2; ei = ei2;
break; break;

View file

@ -403,10 +403,12 @@ int eigrp_fsm_event(struct eigrp_fsm_action_message *msg)
{ {
enum eigrp_fsm_events event = eigrp_get_fsm_event(msg); enum eigrp_fsm_events event = eigrp_get_fsm_event(msg);
zlog_info("EIGRP AS: %d State: %s Event: %s Network: %pFX Packet Type: %s Reply RIJ Count: %d change: %s", zlog_info(
msg->eigrp->AS, prefix_state2str(msg->prefix->state), fsm_state2str(event), "EIGRP AS: %d State: %s Event: %s Network: %pI4 Packet Type: %s Reply RIJ Count: %d change: %s",
&msg->prefix->destination, packet_type2str(msg->packet_type), msg->eigrp->AS, prefix_state2str(msg->prefix->state),
msg->prefix->rij->count, change2str(msg->change)); fsm_state2str(event), &msg->prefix->destination->u.prefix4,
packet_type2str(msg->packet_type), msg->prefix->rij->count,
change2str(msg->change));
(*(NSM[msg->prefix->state][event].func))(msg); (*(NSM[msg->prefix->state][event].func))(msg);
return 1; return 1;

View file

@ -496,6 +496,7 @@ static uint16_t eigrp_sequence_encode(struct eigrp *eigrp, struct stream *s)
{ {
uint16_t length = EIGRP_TLV_SEQ_BASE_LEN; uint16_t length = EIGRP_TLV_SEQ_BASE_LEN;
struct eigrp_interface *ei; struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr; struct eigrp_neighbor *nbr;
size_t backup_end, size_end; size_t backup_end, size_end;
int found; int found;
@ -508,8 +509,8 @@ static uint16_t eigrp_sequence_encode(struct eigrp *eigrp, struct stream *s)
stream_putc(s, IPV4_MAX_BYTELEN); stream_putc(s, IPV4_MAX_BYTELEN);
found = 0; found = 0;
frr_each (eigrp_interface_hash, &eigrp->eifs, ei) { for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
frr_each (eigrp_nbr_hash, &ei->nbr_hash_head, nbr) { for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
if (nbr->multicast_queue->count > 0) { if (nbr->multicast_queue->count > 0) {
length += (uint16_t)stream_put_ipv4( length += (uint16_t)stream_put_ipv4(
s, nbr->src.s_addr); s, nbr->src.s_addr);

View file

@ -45,16 +45,6 @@
DEFINE_MTYPE_STATIC(EIGRPD, EIGRP_IF, "EIGRP interface"); DEFINE_MTYPE_STATIC(EIGRPD, EIGRP_IF, "EIGRP interface");
int eigrp_interface_cmp(const struct eigrp_interface *a, const struct eigrp_interface *b)
{
return if_cmp_func(a->ifp, b->ifp);
}
uint32_t eigrp_interface_hash(const struct eigrp_interface *ei)
{
return ei->ifp->ifindex;
}
struct eigrp_interface *eigrp_if_new(struct eigrp *eigrp, struct interface *ifp, struct eigrp_interface *eigrp_if_new(struct eigrp *eigrp, struct interface *ifp,
struct prefix *p) struct prefix *p)
{ {
@ -71,12 +61,12 @@ struct eigrp_interface *eigrp_if_new(struct eigrp *eigrp, struct interface *ifp,
prefix_copy(&ei->address, p); prefix_copy(&ei->address, p);
ifp->info = ei; ifp->info = ei;
eigrp_interface_hash_add(&eigrp->eifs, ei); listnode_add(eigrp->eiflist, ei);
ei->type = EIGRP_IFTYPE_BROADCAST; ei->type = EIGRP_IFTYPE_BROADCAST;
/* Initialize neighbor list. */ /* Initialize neighbor list. */
eigrp_nbr_hash_init(&ei->nbr_hash_head); ei->nbrs = list_new();
ei->crypt_seqnum = frr_sequence32_next(); ei->crypt_seqnum = frr_sequence32_next();
@ -112,10 +102,10 @@ int eigrp_if_delete_hook(struct interface *ifp)
if (!ei) if (!ei)
return 0; return 0;
eigrp_nbr_hash_fini(&ei->nbr_hash_head); list_delete(&ei->nbrs);
eigrp = ei->eigrp; eigrp = ei->eigrp;
eigrp_interface_hash_del(&eigrp->eifs, ei); listnode_delete(eigrp->eiflist, ei);
eigrp_fifo_free(ei->obuf); eigrp_fifo_free(ei->obuf);
@ -248,6 +238,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
struct eigrp_route_descriptor *ne; struct eigrp_route_descriptor *ne;
struct eigrp_metrics metric; struct eigrp_metrics metric;
struct eigrp_interface *ei2; struct eigrp_interface *ei2;
struct listnode *node, *nnode;
struct eigrp *eigrp; struct eigrp *eigrp;
if (ei == NULL) if (ei == NULL)
@ -294,7 +285,8 @@ int eigrp_if_up(struct eigrp_interface *ei)
if (pe == NULL) { if (pe == NULL) {
pe = eigrp_prefix_descriptor_new(); pe = eigrp_prefix_descriptor_new();
pe->serno = eigrp->serno; pe->serno = eigrp->serno;
prefix_copy(&pe->destination, &dest_addr); pe->destination = (struct prefix *)prefix_ipv4_new();
prefix_copy(pe->destination, &dest_addr);
pe->af = AF_INET; pe->af = AF_INET;
pe->nt = EIGRP_TOPOLOGY_TYPE_CONNECTED; pe->nt = EIGRP_TOPOLOGY_TYPE_CONNECTED;
@ -308,8 +300,9 @@ int eigrp_if_up(struct eigrp_interface *ei)
eigrp_route_descriptor_add(eigrp, pe, ne); eigrp_route_descriptor_add(eigrp, pe, ne);
frr_each (eigrp_interface_hash, &eigrp->eifs, ei2) for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei2)) {
eigrp_update_send(ei2); eigrp_update_send(ei2);
}
pe->req_action &= ~EIGRP_FSM_NEED_UPDATE; pe->req_action &= ~EIGRP_FSM_NEED_UPDATE;
listnode_delete(eigrp->topology_changes_internalIPV4, pe); listnode_delete(eigrp->topology_changes_internalIPV4, pe);
@ -334,6 +327,9 @@ int eigrp_if_up(struct eigrp_interface *ei)
int eigrp_if_down(struct eigrp_interface *ei) int eigrp_if_down(struct eigrp_interface *ei)
{ {
struct listnode *node, *nnode;
struct eigrp_neighbor *nbr;
if (ei == NULL) if (ei == NULL)
return 0; return 0;
@ -344,9 +340,9 @@ int eigrp_if_down(struct eigrp_interface *ei)
/*Set infinite metrics to routes learned by this interface and start /*Set infinite metrics to routes learned by this interface and start
* query process*/ * query process*/
while (eigrp_nbr_hash_count(&ei->nbr_hash_head) > 0) for (ALL_LIST_ELEMENTS(ei->nbrs, node, nnode, nbr)) {
eigrp_nbr_delete(eigrp_nbr_hash_first(&ei->nbr_hash_head)); eigrp_nbr_delete(nbr);
}
return 1; return 1;
} }
@ -440,6 +436,8 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
pe); pe);
eigrp_if_down(ei); eigrp_if_down(ei);
listnode_delete(ei->eigrp->eiflist, ei);
} }
/* Simulate down/up on the interface. This is needed, for example, when /* Simulate down/up on the interface. This is needed, for example, when
@ -459,9 +457,10 @@ struct eigrp_interface *eigrp_if_lookup_by_local_addr(struct eigrp *eigrp,
struct interface *ifp, struct interface *ifp,
struct in_addr address) struct in_addr address)
{ {
struct listnode *node;
struct eigrp_interface *ei; struct eigrp_interface *ei;
frr_each (eigrp_interface_hash, &eigrp->eifs, ei) { for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
if (ifp && ei->ifp != ifp) if (ifp && ei->ifp != ifp)
continue; continue;
@ -487,10 +486,10 @@ struct eigrp_interface *eigrp_if_lookup_by_name(struct eigrp *eigrp,
const char *if_name) const char *if_name)
{ {
struct eigrp_interface *ei; struct eigrp_interface *ei;
struct listnode *node;
/* iterate over all eigrp interfaces */ /* iterate over all eigrp interfaces */
// XXX for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
frr_each (eigrp_interface_hash, &eigrp->eifs, ei) {
/* compare int name with eigrp interface's name */ /* compare int name with eigrp interface's name */
if (strcmp(ei->ifp->name, if_name) == 0) { if (strcmp(ei->ifp->name, if_name) == 0) {
return ei; return ei;

View file

@ -43,10 +43,4 @@ extern struct eigrp_interface *eigrp_if_lookup_by_name(struct eigrp *,
/* Simulate down/up on the interface. */ /* Simulate down/up on the interface. */
extern void eigrp_if_reset(struct interface *); extern void eigrp_if_reset(struct interface *);
extern int eigrp_interface_cmp(const struct eigrp_interface *a, const struct eigrp_interface *b);
extern uint32_t eigrp_interface_hash(const struct eigrp_interface *ei);
DECLARE_HASH(eigrp_interface_hash, struct eigrp_interface, eif_item, eigrp_interface_cmp,
eigrp_interface_hash);
#endif /* ZEBRA_EIGRP_INTERFACE_H_ */ #endif /* ZEBRA_EIGRP_INTERFACE_H_ */

View file

@ -98,9 +98,6 @@ static void sigint(void)
keychain_terminate(); keychain_terminate();
route_map_finish();
prefix_list_reset();
eigrp_terminate(); eigrp_terminate();
exit(0); exit(0);

View file

@ -41,21 +41,6 @@
DEFINE_MTYPE_STATIC(EIGRPD, EIGRP_NEIGHBOR, "EIGRP neighbor"); DEFINE_MTYPE_STATIC(EIGRPD, EIGRP_NEIGHBOR, "EIGRP neighbor");
int eigrp_nbr_comp(const struct eigrp_neighbor *a, const struct eigrp_neighbor *b)
{
if (a->src.s_addr == b->src.s_addr)
return 0;
else if (a->src.s_addr < b->src.s_addr)
return -1;
return 1;
}
uint32_t eigrp_nbr_hash(const struct eigrp_neighbor *a)
{
return a->src.s_addr;
}
struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei) struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei)
{ {
struct eigrp_neighbor *nbr; struct eigrp_neighbor *nbr;
@ -95,18 +80,17 @@ struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
struct eigrp_header *eigrph, struct eigrp_header *eigrph,
struct ip *iph) struct ip *iph)
{ {
struct eigrp_neighbor lookup, *nbr; struct eigrp_neighbor *nbr;
struct listnode *node, *nnode;
lookup.src = iph->ip_src; for (ALL_LIST_ELEMENTS(ei->nbrs, node, nnode, nbr)) {
lookup.ei = ei; if (iph->ip_src.s_addr == nbr->src.s_addr) {
nbr = eigrp_nbr_hash_find(&ei->nbr_hash_head, &lookup);
if (nbr) {
return nbr; return nbr;
} }
}
nbr = eigrp_nbr_add(ei, eigrph, iph); nbr = eigrp_nbr_add(ei, eigrph, iph);
eigrp_nbr_hash_add(&ei->nbr_hash_head, nbr); listnode_add(ei->nbrs, nbr);
return nbr; return nbr;
} }
@ -126,13 +110,17 @@ struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
struct eigrp_neighbor *eigrp_nbr_lookup_by_addr(struct eigrp_interface *ei, struct eigrp_neighbor *eigrp_nbr_lookup_by_addr(struct eigrp_interface *ei,
struct in_addr *addr) struct in_addr *addr)
{ {
struct eigrp_neighbor lookup, *nbr; struct eigrp_neighbor *nbr;
struct listnode *node, *nnode;
lookup.src = *addr;
nbr = eigrp_nbr_hash_find(&ei->nbr_hash_head, &lookup);
for (ALL_LIST_ELEMENTS(ei->nbrs, node, nnode, nbr)) {
if (addr->s_addr == nbr->src.s_addr) {
return nbr; return nbr;
} }
}
return NULL;
}
/** /**
* @fn eigrp_nbr_lookup_by_addr_process * @fn eigrp_nbr_lookup_by_addr_process
@ -150,17 +138,19 @@ struct eigrp_neighbor *eigrp_nbr_lookup_by_addr_process(struct eigrp *eigrp,
struct in_addr nbr_addr) struct in_addr nbr_addr)
{ {
struct eigrp_interface *ei; struct eigrp_interface *ei;
struct eigrp_neighbor lookup, *nbr; struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
/* iterate over all eigrp interfaces */ /* iterate over all eigrp interfaces */
frr_each (eigrp_interface_hash, &eigrp->eifs, ei) { for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
/* iterate over all neighbors on eigrp interface */ /* iterate over all neighbors on eigrp interface */
lookup.src = nbr_addr; for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
nbr = eigrp_nbr_hash_find(&ei->nbr_hash_head, &lookup); /* compare if neighbor address is same as arg address */
if (nbr) { if (nbr->src.s_addr == nbr_addr.s_addr) {
return nbr; return nbr;
} }
} }
}
return NULL; return NULL;
} }
@ -180,7 +170,7 @@ void eigrp_nbr_delete(struct eigrp_neighbor *nbr)
EVENT_OFF(nbr->t_holddown); EVENT_OFF(nbr->t_holddown);
if (nbr->ei) if (nbr->ei)
eigrp_nbr_hash_del(&nbr->ei->nbr_hash_head, nbr); listnode_delete(nbr->ei->nbrs, nbr);
XFREE(MTYPE_EIGRP_NEIGHBOR, nbr); XFREE(MTYPE_EIGRP_NEIGHBOR, nbr);
} }
@ -288,12 +278,18 @@ void eigrp_nbr_state_update(struct eigrp_neighbor *nbr)
int eigrp_nbr_count_get(struct eigrp *eigrp) int eigrp_nbr_count_get(struct eigrp *eigrp)
{ {
struct eigrp_interface *iface; struct eigrp_interface *iface;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
uint32_t counter; uint32_t counter;
counter = 0; counter = 0;
frr_each (eigrp_interface_hash, &eigrp->eifs, iface) for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) {
counter += eigrp_nbr_hash_count(&iface->nbr_hash_head); for (ALL_LIST_ELEMENTS(iface->nbrs, node2, nnode2, nbr)) {
if (nbr->state == EIGRP_NEIGHBOR_UP) {
counter++;
}
}
}
return counter; return counter;
} }

View file

@ -26,6 +26,8 @@ extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
extern void holddown_timer_expired(struct event *thread); extern void holddown_timer_expired(struct event *thread);
extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
struct TLV_Parameter_Type *tlv);
extern void eigrp_nbr_state_update(struct eigrp_neighbor *neigh); extern void eigrp_nbr_state_update(struct eigrp_neighbor *neigh);
extern void eigrp_nbr_state_set(struct eigrp_neighbor *neigh, uint8_t state); extern void eigrp_nbr_state_set(struct eigrp_neighbor *neigh, uint8_t state);
extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *neigh); extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *neigh);
@ -39,9 +41,4 @@ extern void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty);
extern int eigrp_nbr_split_horizon_check(struct eigrp_route_descriptor *ne, extern int eigrp_nbr_split_horizon_check(struct eigrp_route_descriptor *ne,
struct eigrp_interface *ei); struct eigrp_interface *ei);
extern int eigrp_nbr_comp(const struct eigrp_neighbor *a, const struct eigrp_neighbor *b);
extern uint32_t eigrp_nbr_hash(const struct eigrp_neighbor *a);
DECLARE_HASH(eigrp_nbr_hash, struct eigrp_neighbor, nbr_hash_item, eigrp_nbr_comp, eigrp_nbr_hash);
#endif /* _ZEBRA_EIGRP_NEIGHBOR_H */ #endif /* _ZEBRA_EIGRP_NEIGHBOR_H */

View file

@ -219,21 +219,6 @@ int eigrp_network_set(struct eigrp *eigrp, struct prefix *p)
return 1; return 1;
} }
static void eigrp_network_delete_all(struct eigrp *eigrp, struct route_table *table)
{
struct route_node *rn;
for (rn = route_top(table); rn; rn = route_next(rn)) {
prefix_free((struct prefix **)&rn->info);
}
}
void eigrp_network_free(struct eigrp *eigrp, struct route_table *table)
{
eigrp_network_delete_all(eigrp, table);
route_table_finish(table);
}
/* Check whether interface matches given network /* Check whether interface matches given network
* returns: 1, true. 0, false * returns: 1, true. 0, false
*/ */
@ -277,6 +262,7 @@ static void eigrp_network_run_interface(struct eigrp *eigrp, struct prefix *p,
void eigrp_if_update(struct interface *ifp) void eigrp_if_update(struct interface *ifp)
{ {
struct listnode *node, *nnode;
struct route_node *rn; struct route_node *rn;
struct eigrp *eigrp; struct eigrp *eigrp;
@ -284,7 +270,7 @@ void eigrp_if_update(struct interface *ifp)
* In the event there are multiple eigrp autonymnous systems running, * In the event there are multiple eigrp autonymnous systems running,
* we need to check eac one and add the interface as approperate * we need to check eac one and add the interface as approperate
*/ */
frr_each (eigrp_master_hash, &eigrp_om->eigrp, eigrp) { for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp)) {
if (ifp->vrf->vrf_id != eigrp->vrf_id) if (ifp->vrf->vrf_id != eigrp->vrf_id)
continue; continue;
@ -303,6 +289,7 @@ void eigrp_if_update(struct interface *ifp)
int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p) int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p)
{ {
struct route_node *rn; struct route_node *rn;
struct listnode *node, *nnode;
struct eigrp_interface *ei; struct eigrp_interface *ei;
struct prefix *pref; struct prefix *pref;
@ -320,7 +307,7 @@ int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p)
route_unlock_node(rn); /* initial reference */ route_unlock_node(rn); /* initial reference */
/* Find interfaces that not configured already. */ /* Find interfaces that not configured already. */
frr_each (eigrp_interface_hash, &eigrp->eifs, ei) { for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei)) {
bool found = false; bool found = false;
for (rn = route_top(eigrp->networks); rn; rn = route_next(rn)) { for (rn = route_top(eigrp->networks); rn; rn = route_next(rn)) {

View file

@ -19,7 +19,6 @@ extern int eigrp_sock_init(struct vrf *vrf);
extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int); extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int);
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p); extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p); extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
extern void eigrp_network_free(struct eigrp *eigrp, struct route_table *table);
extern void eigrp_hello_timer(struct event *thread); extern void eigrp_hello_timer(struct event *thread);
extern void eigrp_if_update(struct interface *); extern void eigrp_if_update(struct interface *);

View file

@ -43,11 +43,13 @@ static void redistribute_get_metrics(const struct lyd_node *dnode,
em->reliability = yang_dnode_get_uint32(dnode, "reliability"); em->reliability = yang_dnode_get_uint32(dnode, "reliability");
} }
static struct eigrp_interface *eigrp_interface_lookup(struct eigrp *eigrp, const char *ifname) static struct eigrp_interface *eigrp_interface_lookup(const struct eigrp *eigrp,
const char *ifname)
{ {
struct eigrp_interface *eif; struct eigrp_interface *eif;
struct listnode *ln;
frr_each (eigrp_interface_hash, &eigrp->eifs, eif) { for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, ln, eif)) {
if (strcmp(ifname, eif->ifp->name)) if (strcmp(ifname, eif->ifp->name))
continue; continue;
@ -739,7 +741,7 @@ static int eigrpd_instance_redistribute_create(struct nb_cb_create_args *args)
else else
vrfid = VRF_DEFAULT; vrfid = VRF_DEFAULT;
if (vrf_bitmap_check(&eigrp_zclient->redist[AFI_IP][proto], vrfid)) if (vrf_bitmap_check(&zclient->redist[AFI_IP][proto], vrfid))
return NB_ERR_INCONSISTENCY; return NB_ERR_INCONSISTENCY;
break; break;
case NB_EV_PREPARE: case NB_EV_PREPARE:

View file

@ -532,8 +532,8 @@ void eigrp_read(struct event *thread)
return; return;
/* Self-originated packet should be discarded silently. */ /* Self-originated packet should be discarded silently. */
if (eigrp_if_lookup_by_local_addr(eigrp, ifp, iph->ip_src) || if (eigrp_if_lookup_by_local_addr(eigrp, NULL, iph->ip_src)
(IPV4_ADDR_SAME(&srcaddr, &ei->address.u.prefix4))) { || (IPV4_ADDR_SAME(&srcaddr, &ei->address.u.prefix4))) {
if (IS_DEBUG_EIGRP_TRANSMIT(0, RECV)) if (IS_DEBUG_EIGRP_TRANSMIT(0, RECV))
zlog_debug( zlog_debug(
"eigrp_read[%pI4]: Dropping self-originated packet", "eigrp_read[%pI4]: Dropping self-originated packet",
@ -1129,7 +1129,7 @@ uint16_t eigrp_add_internalTLV_to_stream(struct stream *s,
uint16_t length; uint16_t length;
stream_putw(s, EIGRP_TLV_IPv4_INT); stream_putw(s, EIGRP_TLV_IPv4_INT);
switch (pe->destination.prefixlen) { switch (pe->destination->prefixlen) {
case 0: case 0:
case 1: case 1:
case 2: case 2:
@ -1176,8 +1176,8 @@ uint16_t eigrp_add_internalTLV_to_stream(struct stream *s,
stream_putw(s, length); stream_putw(s, length);
break; break;
default: default:
flog_err(EC_LIB_DEVELOPMENT, "%s: Unexpected prefix length: %d", __func__, flog_err(EC_LIB_DEVELOPMENT, "%s: Unexpected prefix length: %d",
pe->destination.prefixlen); __func__, pe->destination->prefixlen);
return 0; return 0;
} }
stream_putl(s, 0x00000000); stream_putl(s, 0x00000000);
@ -1194,15 +1194,15 @@ uint16_t eigrp_add_internalTLV_to_stream(struct stream *s,
stream_putc(s, pe->reported_metric.tag); stream_putc(s, pe->reported_metric.tag);
stream_putc(s, pe->reported_metric.flags); stream_putc(s, pe->reported_metric.flags);
stream_putc(s, pe->destination.prefixlen); stream_putc(s, pe->destination->prefixlen);
stream_putc(s, (ntohl(pe->destination.u.prefix4.s_addr) >> 24) & 0xFF); stream_putc(s, (ntohl(pe->destination->u.prefix4.s_addr) >> 24) & 0xFF);
if (pe->destination.prefixlen > 8) if (pe->destination->prefixlen > 8)
stream_putc(s, (ntohl(pe->destination.u.prefix4.s_addr) >> 16) & 0xFF); stream_putc(s, (ntohl(pe->destination->u.prefix4.s_addr) >> 16) & 0xFF);
if (pe->destination.prefixlen > 16) if (pe->destination->prefixlen > 16)
stream_putc(s, (ntohl(pe->destination.u.prefix4.s_addr) >> 8) & 0xFF); stream_putc(s, (ntohl(pe->destination->u.prefix4.s_addr) >> 8) & 0xFF);
if (pe->destination.prefixlen > 24) if (pe->destination->prefixlen > 24)
stream_putc(s, ntohl(pe->destination.u.prefix4.s_addr) & 0xFF); stream_putc(s, ntohl(pe->destination->u.prefix4.s_addr) & 0xFF);
return length; return length;
} }

View file

@ -41,7 +41,7 @@
uint32_t eigrp_query_send_all(struct eigrp *eigrp) uint32_t eigrp_query_send_all(struct eigrp *eigrp)
{ {
struct eigrp_interface *iface; struct eigrp_interface *iface;
struct listnode *node2, *nnode2; struct listnode *node, *node2, *nnode2;
struct eigrp_prefix_descriptor *pe; struct eigrp_prefix_descriptor *pe;
uint32_t counter; uint32_t counter;
@ -51,7 +51,7 @@ uint32_t eigrp_query_send_all(struct eigrp *eigrp)
} }
counter = 0; counter = 0;
frr_each (eigrp_interface_hash, &eigrp->eifs, iface) { for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) {
eigrp_send_query(iface); eigrp_send_query(iface);
counter++; counter++;
} }
@ -146,7 +146,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
{ {
struct eigrp_packet *ep = NULL; struct eigrp_packet *ep = NULL;
uint16_t length = EIGRP_HEADER_LEN; uint16_t length = EIGRP_HEADER_LEN;
struct listnode *node, *nnode; struct listnode *node, *nnode, *node2, *nnode2;
struct eigrp_neighbor *nbr; struct eigrp_neighbor *nbr;
struct eigrp_prefix_descriptor *pe; struct eigrp_prefix_descriptor *pe;
bool has_tlv = false; bool has_tlv = false;
@ -177,7 +177,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
length += eigrp_add_internalTLV_to_stream(ep->s, pe); length += eigrp_add_internalTLV_to_stream(ep->s, pe);
has_tlv = true; has_tlv = true;
frr_each (eigrp_nbr_hash, &ei->nbr_hash_head, nbr) { for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
if (nbr->state == EIGRP_NEIGHBOR_UP) if (nbr->state == EIGRP_NEIGHBOR_UP)
listnode_add(pe->rij, nbr); listnode_add(pe->rij, nbr);
} }
@ -197,7 +197,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
ep->sequence_number = ei->eigrp->sequence_number; ep->sequence_number = ei->eigrp->sequence_number;
ei->eigrp->sequence_number++; ei->eigrp->sequence_number++;
frr_each (eigrp_nbr_hash, &ei->nbr_hash_head, nbr) { for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
struct eigrp_packet *dup; struct eigrp_packet *dup;
if (nbr->state != EIGRP_NEIGHBOR_UP) if (nbr->state != EIGRP_NEIGHBOR_UP)
@ -237,7 +237,7 @@ void eigrp_send_query(struct eigrp_interface *ei)
ep->sequence_number = ei->eigrp->sequence_number; ep->sequence_number = ei->eigrp->sequence_number;
ei->eigrp->sequence_number++; ei->eigrp->sequence_number++;
frr_each (eigrp_nbr_hash, &ei->nbr_hash_head, nbr) { for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
struct eigrp_packet *dup; struct eigrp_packet *dup;
if (nbr->state != EIGRP_NEIGHBOR_UP) if (nbr->state != EIGRP_NEIGHBOR_UP)

View file

@ -61,7 +61,8 @@ void eigrp_send_reply(struct eigrp_neighbor *nbr,
sizeof(struct eigrp_prefix_descriptor)); sizeof(struct eigrp_prefix_descriptor));
memcpy(pe2, pe, sizeof(struct eigrp_prefix_descriptor)); memcpy(pe2, pe, sizeof(struct eigrp_prefix_descriptor));
if (eigrp_update_prefix_apply(eigrp, ei, EIGRP_FILTER_OUT, &pe2->destination)) { if (eigrp_update_prefix_apply(eigrp, ei, EIGRP_FILTER_OUT,
pe2->destination)) {
zlog_info("REPLY SEND: Setting Metric to max"); zlog_info("REPLY SEND: Setting Metric to max");
pe2->reported_metric.delay = EIGRP_MAX_METRIC; pe2->reported_metric.delay = EIGRP_MAX_METRIC;
} }

Some files were not shown because too many files have changed in this diff Show more