2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-05-20 03:03:47 +02:00
|
|
|
/**
|
|
|
|
* bgp_updgrp.c: BGP update group structures
|
|
|
|
*
|
|
|
|
* @copyright Copyright (C) 2014 Cumulus Networks, Inc.
|
|
|
|
*
|
|
|
|
* @author Avneesh Sachdev <avneesh@sproute.net>
|
|
|
|
* @author Rajesh Varadarajan <rajesh@sproute.net>
|
|
|
|
* @author Pradosh Mohapatra <pradosh@sproute.net>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "prefix.h"
|
2022-02-28 16:40:31 +01:00
|
|
|
#include "event.h"
|
2015-05-20 03:03:47 +02:00
|
|
|
#include "buffer.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "command.h"
|
|
|
|
#include "sockunion.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "filter.h"
|
|
|
|
#include "routemap.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "plist.h"
|
|
|
|
#include "linklist.h"
|
|
|
|
#include "workqueue.h"
|
|
|
|
#include "hash.h"
|
|
|
|
#include "jhash.h"
|
|
|
|
#include "queue.h"
|
|
|
|
|
|
|
|
#include "bgpd/bgpd.h"
|
|
|
|
#include "bgpd/bgp_table.h"
|
|
|
|
#include "bgpd/bgp_debug.h"
|
2018-06-15 23:08:53 +02:00
|
|
|
#include "bgpd/bgp_errors.h"
|
2015-05-20 03:03:47 +02:00
|
|
|
#include "bgpd/bgp_fsm.h"
|
2022-01-27 09:12:59 +01:00
|
|
|
#include "bgpd/bgp_addpath.h"
|
2015-05-20 03:03:47 +02:00
|
|
|
#include "bgpd/bgp_advertise.h"
|
|
|
|
#include "bgpd/bgp_packet.h"
|
|
|
|
#include "bgpd/bgp_updgrp.h"
|
|
|
|
#include "bgpd/bgp_route.h"
|
|
|
|
#include "bgpd/bgp_filter.h"
|
2017-06-12 22:20:50 +02:00
|
|
|
#include "bgpd/bgp_io.h"
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
/********************
|
|
|
|
* PRIVATE FUNCTIONS
|
|
|
|
********************/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* assign a unique ID to update group and subgroup. Mostly for display/
|
|
|
|
* debugging purposes. It's a 64-bit space - used leisurely without a
|
|
|
|
* worry about its wrapping and about filling gaps. While at it, timestamp
|
|
|
|
* the creation.
|
|
|
|
*/
|
|
|
|
static void update_group_checkin(struct update_group *updgrp)
|
|
|
|
{
|
|
|
|
updgrp->id = ++bm->updgrp_idspace;
|
2022-08-18 00:27:54 +02:00
|
|
|
updgrp->uptime = monotime(NULL);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void update_subgroup_checkin(struct update_subgroup *subgrp,
|
|
|
|
struct update_group *updgrp)
|
|
|
|
{
|
|
|
|
subgrp->id = ++bm->subgrp_idspace;
|
2022-08-18 00:27:54 +02:00
|
|
|
subgrp->uptime = monotime(NULL);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2021-02-25 18:46:49 +01:00
|
|
|
static void sync_init(struct update_subgroup *subgrp,
|
|
|
|
struct update_group *updgrp)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
2021-02-25 18:46:49 +01:00
|
|
|
struct peer *peer = UPDGRP_PEER(updgrp);
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->sync =
|
|
|
|
XCALLOC(MTYPE_BGP_SYNCHRONISE, sizeof(struct bgp_synchronize));
|
2019-04-21 18:17:45 +02:00
|
|
|
bgp_adv_fifo_init(&subgrp->sync->update);
|
|
|
|
bgp_adv_fifo_init(&subgrp->sync->withdraw);
|
|
|
|
bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
|
2017-09-04 00:57:30 +02:00
|
|
|
subgrp->hash =
|
2022-07-25 15:37:17 +02:00
|
|
|
hash_create(bgp_advertise_attr_hash_key,
|
|
|
|
bgp_advertise_attr_hash_cmp, "BGP SubGroup Hash");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* We use a larger buffer for subgrp->work in the event that:
|
|
|
|
* - We RX a BGP_UPDATE where the attributes alone are just
|
2021-02-25 18:46:49 +01:00
|
|
|
* under 4096 or 65535 (if Extended Message capability negotiated).
|
2015-05-20 03:03:47 +02:00
|
|
|
* - The user configures an outbound route-map that does many as-path
|
|
|
|
* prepends or adds many communities. At most they can have
|
|
|
|
* CMD_ARGC_MAX
|
|
|
|
* args in a route-map so there is a finite limit on how large they
|
|
|
|
* can
|
|
|
|
* make the attributes.
|
|
|
|
*
|
|
|
|
* Having a buffer with BGP_MAX_PACKET_SIZE_OVERFLOW allows us to avoid
|
|
|
|
* bounds
|
|
|
|
* checking for every single attribute as we construct an UPDATE.
|
|
|
|
*/
|
2021-02-25 18:46:49 +01:00
|
|
|
subgrp->work = stream_new(peer->max_packet_size
|
|
|
|
+ BGP_MAX_PACKET_SIZE_OVERFLOW);
|
|
|
|
subgrp->scratch = stream_new(peer->max_packet_size);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sync_delete(struct update_subgroup *subgrp)
|
|
|
|
{
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
|
2023-03-21 13:54:21 +01:00
|
|
|
hash_clean_and_free(&subgrp->hash,
|
|
|
|
(void (*)(void *))bgp_advertise_attr_free);
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (subgrp->work)
|
|
|
|
stream_free(subgrp->work);
|
|
|
|
subgrp->work = NULL;
|
|
|
|
if (subgrp->scratch)
|
|
|
|
stream_free(subgrp->scratch);
|
|
|
|
subgrp->scratch = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* conf_copy
|
|
|
|
*
|
|
|
|
* copy only those fields that are relevant to update group match
|
|
|
|
*/
|
|
|
|
static void conf_copy(struct peer *dst, struct peer *src, afi_t afi,
|
|
|
|
safi_t safi)
|
|
|
|
{
|
|
|
|
struct bgp_filter *srcfilter;
|
|
|
|
struct bgp_filter *dstfilter;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
srcfilter = &src->filter[afi][safi];
|
|
|
|
dstfilter = &dst->filter[afi][safi];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
dst->bgp = src->bgp;
|
|
|
|
dst->sort = src->sort;
|
|
|
|
dst->as = src->as;
|
|
|
|
dst->v_routeadv = src->v_routeadv;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->af_flags[afi][safi] = src->af_flags[afi][safi];
|
2020-01-17 15:04:18 +01:00
|
|
|
dst->pmax_out[afi][safi] = src->pmax_out[afi][safi];
|
2021-02-25 18:46:49 +01:00
|
|
|
dst->max_packet_size = src->max_packet_size;
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_PEER_HOST, dst->host);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 16:44:57 +02:00
|
|
|
dst->host = XSTRDUP(MTYPE_BGP_PEER_HOST, src->host);
|
2015-05-20 03:03:47 +02:00
|
|
|
dst->cap = src->cap;
|
|
|
|
dst->af_cap[afi][safi] = src->af_cap[afi][safi];
|
|
|
|
dst->afc_nego[afi][safi] = src->afc_nego[afi][safi];
|
2015-11-04 17:31:33 +01:00
|
|
|
dst->orf_plist[afi][safi] = src->orf_plist[afi][safi];
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
dst->addpath_type[afi][safi] = src->addpath_type[afi][safi];
|
2015-05-20 03:03:47 +02:00
|
|
|
dst->local_as = src->local_as;
|
|
|
|
dst->change_local_as = src->change_local_as;
|
|
|
|
dst->shared_network = src->shared_network;
|
2022-06-17 12:14:46 +02:00
|
|
|
dst->local_role = src->local_role;
|
2023-01-05 09:25:38 +01:00
|
|
|
dst->as_path_loop_detection = src->as_path_loop_detection;
|
2022-08-19 12:15:15 +02:00
|
|
|
|
|
|
|
if (src->soo[afi][safi]) {
|
|
|
|
ecommunity_free(&dst->soo[afi][safi]);
|
|
|
|
dst->soo[afi][safi] = ecommunity_dup(src->soo[afi][safi]);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
dst->group = src->group;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (src->default_rmap[afi][safi].name) {
|
|
|
|
dst->default_rmap[afi][safi].name =
|
2015-08-26 16:44:57 +02:00
|
|
|
XSTRDUP(MTYPE_ROUTE_MAP_NAME,
|
|
|
|
src->default_rmap[afi][safi].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
dst->default_rmap[afi][safi].map =
|
|
|
|
src->default_rmap[afi][safi].map;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (DISTRIBUTE_OUT_NAME(srcfilter)) {
|
2015-08-26 16:44:57 +02:00
|
|
|
DISTRIBUTE_OUT_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, DISTRIBUTE_OUT_NAME(srcfilter));
|
2015-05-20 03:03:47 +02:00
|
|
|
DISTRIBUTE_OUT(dstfilter) = DISTRIBUTE_OUT(srcfilter);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (PREFIX_LIST_OUT_NAME(srcfilter)) {
|
2015-08-26 16:44:57 +02:00
|
|
|
PREFIX_LIST_OUT_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, PREFIX_LIST_OUT_NAME(srcfilter));
|
2015-05-20 03:03:47 +02:00
|
|
|
PREFIX_LIST_OUT(dstfilter) = PREFIX_LIST_OUT(srcfilter);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (FILTER_LIST_OUT_NAME(srcfilter)) {
|
2015-08-26 16:44:57 +02:00
|
|
|
FILTER_LIST_OUT_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, FILTER_LIST_OUT_NAME(srcfilter));
|
2015-05-20 03:03:47 +02:00
|
|
|
FILTER_LIST_OUT(dstfilter) = FILTER_LIST_OUT(srcfilter);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (ROUTE_MAP_OUT_NAME(srcfilter)) {
|
2015-08-26 16:44:57 +02:00
|
|
|
ROUTE_MAP_OUT_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, ROUTE_MAP_OUT_NAME(srcfilter));
|
2015-05-20 03:03:47 +02:00
|
|
|
ROUTE_MAP_OUT(dstfilter) = ROUTE_MAP_OUT(srcfilter);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (UNSUPPRESS_MAP_NAME(srcfilter)) {
|
2015-08-26 16:44:57 +02:00
|
|
|
UNSUPPRESS_MAP_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, UNSUPPRESS_MAP_NAME(srcfilter));
|
2015-05-20 03:03:47 +02:00
|
|
|
UNSUPPRESS_MAP(dstfilter) = UNSUPPRESS_MAP(srcfilter);
|
|
|
|
}
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
|
|
|
|
if (ADVERTISE_MAP_NAME(srcfilter)) {
|
|
|
|
ADVERTISE_MAP_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, ADVERTISE_MAP_NAME(srcfilter));
|
|
|
|
ADVERTISE_MAP(dstfilter) = ADVERTISE_MAP(srcfilter);
|
|
|
|
ADVERTISE_CONDITION(dstfilter) = ADVERTISE_CONDITION(srcfilter);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CONDITION_MAP_NAME(srcfilter)) {
|
|
|
|
CONDITION_MAP_NAME(dstfilter) = XSTRDUP(
|
|
|
|
MTYPE_BGP_FILTER_NAME, CONDITION_MAP_NAME(srcfilter));
|
|
|
|
CONDITION_MAP(dstfilter) = CONDITION_MAP(srcfilter);
|
|
|
|
}
|
2021-06-16 01:49:19 +02:00
|
|
|
|
|
|
|
dstfilter->advmap.update_type = srcfilter->advmap.update_type;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-08-26 16:44:57 +02:00
|
|
|
* since we did a bunch of XSTRDUP's in conf_copy, time to free them up
|
2015-05-20 03:03:47 +02:00
|
|
|
*/
|
|
|
|
static void conf_release(struct peer *src, afi_t afi, safi_t safi)
|
|
|
|
{
|
|
|
|
struct bgp_filter *srcfilter;
|
|
|
|
|
|
|
|
srcfilter = &src->filter[afi][safi];
|
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_ROUTE_MAP_NAME, src->default_rmap[afi][safi].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->dlist[FILTER_OUT].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->plist[FILTER_OUT].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->aslist[FILTER_OUT].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->map[RMAP_OUT].name);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->usmap.name);
|
2015-09-02 14:19:44 +02:00
|
|
|
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.aname);
|
|
|
|
|
|
|
|
XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname);
|
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_PEER_HOST, src->host);
|
2022-09-10 01:28:07 +02:00
|
|
|
|
|
|
|
ecommunity_free(&src->soo[afi][safi]);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf)
|
|
|
|
{
|
|
|
|
struct peer *src;
|
|
|
|
struct peer *dst;
|
|
|
|
|
|
|
|
if (!updgrp || !paf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
src = paf->peer;
|
|
|
|
dst = updgrp->conf;
|
|
|
|
if (!src || !dst)
|
|
|
|
return;
|
|
|
|
|
|
|
|
updgrp->afi = paf->afi;
|
|
|
|
updgrp->safi = paf->safi;
|
|
|
|
updgrp->afid = paf->afid;
|
|
|
|
updgrp->bgp = src->bgp;
|
|
|
|
|
|
|
|
conf_copy(dst, src, paf->afi, paf->safi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* auxiliary functions to maintain the hash table.
|
|
|
|
* - updgrp_hash_alloc - to create a new entry, passed to hash_get
|
|
|
|
* - updgrp_hash_key_make - makes the key for update group search
|
|
|
|
* - updgrp_hash_cmp - compare two update groups.
|
|
|
|
*/
|
|
|
|
static void *updgrp_hash_alloc(void *p)
|
|
|
|
{
|
|
|
|
struct update_group *updgrp;
|
2015-05-20 03:12:17 +02:00
|
|
|
const struct update_group *in;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
in = (const struct update_group *)p;
|
2015-05-20 03:03:47 +02:00
|
|
|
updgrp = XCALLOC(MTYPE_BGP_UPDGRP, sizeof(struct update_group));
|
|
|
|
memcpy(updgrp, in, sizeof(struct update_group));
|
|
|
|
updgrp->conf = XCALLOC(MTYPE_BGP_PEER, sizeof(struct peer));
|
|
|
|
conf_copy(updgrp->conf, in->conf, in->afi, in->safi);
|
|
|
|
return updgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The hash value for a peer is computed from the following variables:
|
|
|
|
* v = f(
|
|
|
|
* 1. IBGP (1) or EBGP (2)
|
|
|
|
* 2. FLAGS based on configuration:
|
|
|
|
* LOCAL_AS_NO_PREPEND
|
|
|
|
* LOCAL_AS_REPLACE_AS
|
|
|
|
* 3. AF_FLAGS based on configuration:
|
|
|
|
* Refer to definition in bgp_updgrp.h
|
|
|
|
* 4. (AF-independent) Capability flags:
|
|
|
|
* AS4_RCV capability
|
|
|
|
* 5. (AF-dependent) Capability flags:
|
|
|
|
* ORF_PREFIX_SM_RCV (peer can send prefix ORF)
|
|
|
|
* 6. MRAI
|
|
|
|
* 7. peer-group name
|
|
|
|
* 8. Outbound route-map name (neighbor route-map <> out)
|
|
|
|
* 9. Outbound distribute-list name (neighbor distribute-list <> out)
|
|
|
|
* 10. Outbound prefix-list name (neighbor prefix-list <> out)
|
|
|
|
* 11. Outbound as-list name (neighbor filter-list <> out)
|
|
|
|
* 12. Unsuppress map name (neighbor unsuppress-map <>)
|
|
|
|
* 13. default rmap name (neighbor default-originate route-map <>)
|
|
|
|
* 14. encoding both global and link-local nexthop?
|
|
|
|
* 15. If peer is configured to be a lonesoul, peer ip address
|
|
|
|
* 16. Local-as should match, if configured.
|
2021-10-20 16:15:22 +02:00
|
|
|
* 17. maximum-prefix-out
|
2022-06-17 12:14:46 +02:00
|
|
|
* 18. Local-role should also match, if configured.
|
2015-05-20 03:03:47 +02:00
|
|
|
* )
|
|
|
|
*/
|
2019-05-14 22:19:07 +02:00
|
|
|
static unsigned int updgrp_hash_key_make(const void *p)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
const struct update_group *updgrp;
|
|
|
|
const struct peer *peer;
|
|
|
|
const struct bgp_filter *filter;
|
2023-02-23 22:02:35 +01:00
|
|
|
uint64_t flags;
|
2015-05-20 03:03:47 +02:00
|
|
|
uint32_t key;
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
|
|
|
|
2023-03-01 21:43:42 +01:00
|
|
|
/*
|
|
|
|
* IF YOU ADD AN ADDITION TO THE HASH KEY TO ENSURE
|
|
|
|
* THAT THE UPDATE GROUP CALCULATION IS CORRECT THEN
|
|
|
|
* PLEASE ADD IT TO THE DEBUG OUTPUT TOO AT THE BOTTOM
|
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
#define SEED1 999331
|
|
|
|
#define SEED2 2147483647
|
|
|
|
|
|
|
|
updgrp = p;
|
|
|
|
peer = updgrp->conf;
|
|
|
|
afi = updgrp->afi;
|
|
|
|
safi = updgrp->safi;
|
|
|
|
flags = peer->af_flags[afi][safi];
|
|
|
|
filter = &peer->filter[afi][safi];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
key = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
key = jhash_1word(peer->sort, key); /* EBGP or IBGP */
|
|
|
|
key = jhash_1word((peer->flags & PEER_UPDGRP_FLAGS), key);
|
|
|
|
key = jhash_1word((flags & PEER_UPDGRP_AF_FLAGS), key);
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
key = jhash_1word((uint32_t)peer->addpath_type[afi][safi], key);
|
2015-05-20 03:03:47 +02:00
|
|
|
key = jhash_1word((peer->cap & PEER_UPDGRP_CAP_FLAGS), key);
|
|
|
|
key = jhash_1word((peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS),
|
|
|
|
key);
|
|
|
|
key = jhash_1word(peer->v_routeadv, key);
|
|
|
|
key = jhash_1word(peer->change_local_as, key);
|
2021-07-02 15:48:11 +02:00
|
|
|
key = jhash_1word(peer->max_packet_size, key);
|
2021-10-20 16:15:22 +02:00
|
|
|
key = jhash_1word(peer->pmax_out[afi][safi], key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-01-05 09:25:38 +01:00
|
|
|
if (peer->as_path_loop_detection)
|
|
|
|
key = jhash_2words(peer->as, peer->as_path_loop_detection, key);
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (peer->group)
|
|
|
|
key = jhash_1word(jhash(peer->group->name,
|
|
|
|
strlen(peer->group->name), SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (filter->map[RMAP_OUT].name)
|
|
|
|
key = jhash_1word(jhash(filter->map[RMAP_OUT].name,
|
|
|
|
strlen(filter->map[RMAP_OUT].name),
|
|
|
|
SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (filter->dlist[FILTER_OUT].name)
|
|
|
|
key = jhash_1word(jhash(filter->dlist[FILTER_OUT].name,
|
|
|
|
strlen(filter->dlist[FILTER_OUT].name),
|
|
|
|
SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (filter->plist[FILTER_OUT].name)
|
|
|
|
key = jhash_1word(jhash(filter->plist[FILTER_OUT].name,
|
|
|
|
strlen(filter->plist[FILTER_OUT].name),
|
|
|
|
SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (filter->aslist[FILTER_OUT].name)
|
|
|
|
key = jhash_1word(jhash(filter->aslist[FILTER_OUT].name,
|
|
|
|
strlen(filter->aslist[FILTER_OUT].name),
|
|
|
|
SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (filter->usmap.name)
|
|
|
|
key = jhash_1word(jhash(filter->usmap.name,
|
|
|
|
strlen(filter->usmap.name), SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
if (filter->advmap.aname)
|
|
|
|
key = jhash_1word(jhash(filter->advmap.aname,
|
|
|
|
strlen(filter->advmap.aname), SEED1),
|
|
|
|
key);
|
|
|
|
|
2021-06-16 01:49:19 +02:00
|
|
|
if (filter->advmap.update_type)
|
|
|
|
key = jhash_1word(filter->advmap.update_type, key);
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (peer->default_rmap[afi][safi].name)
|
|
|
|
key = jhash_1word(
|
|
|
|
jhash(peer->default_rmap[afi][safi].name,
|
|
|
|
strlen(peer->default_rmap[afi][safi].name),
|
|
|
|
SEED1),
|
|
|
|
key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* If peer is on a shared network and is exchanging IPv6 prefixes,
|
|
|
|
* it needs to include link-local address. That's different from
|
|
|
|
* non-shared-network peers (nexthop encoded with 32 bytes vs 16
|
|
|
|
* bytes). We create different update groups to take care of that.
|
|
|
|
*/
|
|
|
|
key = jhash_1word(
|
|
|
|
(peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
|
|
|
|
key);
|
|
|
|
/*
|
2015-11-04 17:31:33 +01:00
|
|
|
* There are certain peers that must get their own update-group:
|
|
|
|
* - lonesoul peers
|
|
|
|
* - peers that negotiated ORF
|
2020-09-02 07:44:27 +02:00
|
|
|
* - maximum-prefix-out is set
|
2015-05-20 03:03:47 +02:00
|
|
|
*/
|
|
|
|
if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)
|
2015-11-04 17:31:33 +01:00
|
|
|
|| CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
|
2015-11-10 16:29:12 +01:00
|
|
|
|| CHECK_FLAG(peer->af_cap[afi][safi],
|
2020-09-02 07:44:27 +02:00
|
|
|
PEER_CAP_ORF_PREFIX_SM_OLD_RCV)
|
|
|
|
|| CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT))
|
2015-05-20 03:03:47 +02:00
|
|
|
key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
|
|
|
|
key);
|
2022-06-17 12:14:46 +02:00
|
|
|
/*
|
|
|
|
* Multiple sessions with the same neighbor should get their own
|
|
|
|
* update-group if they have different roles.
|
|
|
|
*/
|
|
|
|
key = jhash_1word(peer->local_role, key);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-10-12 20:06:47 +02:00
|
|
|
/* Neighbors configured with the AIGP attribute are put in a separate
|
|
|
|
* update group from other neighbors.
|
|
|
|
*/
|
|
|
|
key = jhash_1word((peer->flags & PEER_FLAG_AIGP), key);
|
|
|
|
|
2022-08-19 12:15:15 +02:00
|
|
|
if (peer->soo[afi][safi]) {
|
|
|
|
char *soo_str = ecommunity_str(peer->soo[afi][safi]);
|
|
|
|
|
|
|
|
key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key);
|
|
|
|
}
|
|
|
|
|
2023-03-01 21:43:42 +01:00
|
|
|
/*
|
|
|
|
* ANY NEW ITEMS THAT ARE ADDED TO THE key, ENSURE DEBUG
|
|
|
|
* STATEMENT STAYS UP TO DATE
|
|
|
|
*/
|
2022-05-05 17:21:20 +02:00
|
|
|
if (bgp_debug_neighbor_events(peer)) {
|
|
|
|
zlog_debug(
|
2022-08-31 13:23:23 +02:00
|
|
|
"%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju",
|
2022-06-20 10:46:15 +02:00
|
|
|
peer, peer->sort,
|
2022-08-31 13:23:23 +02:00
|
|
|
(intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS),
|
|
|
|
(intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS));
|
2022-05-05 17:21:20 +02:00
|
|
|
zlog_debug(
|
2023-01-05 09:25:38 +01:00
|
|
|
"%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u, as_path_loop_detection: %d",
|
2022-05-05 17:21:20 +02:00
|
|
|
peer, (uint32_t)peer->addpath_type[afi][safi],
|
2022-08-31 13:23:23 +02:00
|
|
|
CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS),
|
|
|
|
CHECK_FLAG(peer->af_cap[afi][safi],
|
|
|
|
PEER_UPDGRP_AF_CAP_FLAGS),
|
2023-01-05 09:25:38 +01:00
|
|
|
peer->v_routeadv, peer->change_local_as,
|
|
|
|
peer->as_path_loop_detection);
|
2022-05-05 17:21:20 +02:00
|
|
|
zlog_debug(
|
|
|
|
"%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
|
|
|
|
peer, peer->max_packet_size, peer->pmax_out[afi][safi],
|
|
|
|
peer->group ? peer->group->name : "(NONE)",
|
|
|
|
ROUTE_MAP_OUT_NAME(filter) ? ROUTE_MAP_OUT_NAME(filter)
|
|
|
|
: "(NONE)");
|
|
|
|
zlog_debug(
|
2023-03-01 21:43:42 +01:00
|
|
|
"%pBP Update Group Hash: dlist out: %s plist out: %s aslist out: %s usmap out: %s advmap: %s %d",
|
2022-05-05 17:21:20 +02:00
|
|
|
peer,
|
|
|
|
DISTRIBUTE_OUT_NAME(filter)
|
|
|
|
? DISTRIBUTE_OUT_NAME(filter)
|
|
|
|
: "(NONE)",
|
|
|
|
PREFIX_LIST_OUT_NAME(filter)
|
|
|
|
? PREFIX_LIST_OUT_NAME(filter)
|
|
|
|
: "(NONE)",
|
|
|
|
FILTER_LIST_OUT_NAME(filter)
|
|
|
|
? FILTER_LIST_OUT_NAME(filter)
|
|
|
|
: "(NONE)",
|
|
|
|
UNSUPPRESS_MAP_NAME(filter)
|
|
|
|
? UNSUPPRESS_MAP_NAME(filter)
|
|
|
|
: "(NONE)",
|
|
|
|
ADVERTISE_MAP_NAME(filter) ? ADVERTISE_MAP_NAME(filter)
|
2023-03-01 21:43:42 +01:00
|
|
|
: "(NONE)",
|
|
|
|
filter->advmap.update_type);
|
2022-05-05 17:21:20 +02:00
|
|
|
zlog_debug(
|
|
|
|
"%pBP Update Group Hash: default rmap: %s shared network and afi active network: %d",
|
|
|
|
peer,
|
|
|
|
peer->default_rmap[afi][safi].name
|
|
|
|
? peer->default_rmap[afi][safi].name
|
|
|
|
: "(NONE)",
|
|
|
|
peer->shared_network &&
|
|
|
|
peer_afi_active_nego(peer, AFI_IP6));
|
|
|
|
zlog_debug(
|
2022-08-31 13:23:23 +02:00
|
|
|
"%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju",
|
2022-07-08 13:59:10 +02:00
|
|
|
peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
|
2022-05-05 17:21:20 +02:00
|
|
|
CHECK_FLAG(peer->af_cap[afi][safi],
|
|
|
|
PEER_CAP_ORF_PREFIX_SM_RCV),
|
|
|
|
CHECK_FLAG(peer->af_cap[afi][safi],
|
|
|
|
PEER_CAP_ORF_PREFIX_SM_OLD_RCV),
|
2022-08-31 13:23:23 +02:00
|
|
|
(intmax_t)CHECK_FLAG(peer->af_flags[afi][safi],
|
|
|
|
PEER_FLAG_MAX_PREFIX_OUT));
|
2023-03-01 21:43:42 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%pBP Update Group Hash: local role: %u AIGP: %d SOO: %s",
|
|
|
|
peer, peer->local_role,
|
|
|
|
!!CHECK_FLAG(peer->flags, PEER_FLAG_AIGP),
|
|
|
|
peer->soo[afi][safi]
|
|
|
|
? ecommunity_str(peer->soo[afi][safi])
|
|
|
|
: "(NONE)");
|
2022-05-05 17:21:20 +02:00
|
|
|
zlog_debug("%pBP Update Group Hash key: %u", peer, key);
|
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
2018-10-17 21:27:12 +02:00
|
|
|
static bool updgrp_hash_cmp(const void *p1, const void *p2)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
const struct update_group *grp1;
|
|
|
|
const struct update_group *grp2;
|
|
|
|
const struct peer *pe1;
|
|
|
|
const struct peer *pe2;
|
2023-02-23 22:02:35 +01:00
|
|
|
uint64_t flags1;
|
|
|
|
uint64_t flags2;
|
2015-05-20 03:03:47 +02:00
|
|
|
const struct bgp_filter *fl1;
|
|
|
|
const struct bgp_filter *fl2;
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!p1 || !p2)
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
grp1 = p1;
|
|
|
|
grp2 = p2;
|
|
|
|
pe1 = grp1->conf;
|
|
|
|
pe2 = grp2->conf;
|
|
|
|
afi = grp1->afi;
|
|
|
|
safi = grp1->safi;
|
|
|
|
flags1 = pe1->af_flags[afi][safi];
|
|
|
|
flags2 = pe2->af_flags[afi][safi];
|
|
|
|
fl1 = &pe1->filter[afi][safi];
|
|
|
|
fl2 = &pe2->filter[afi][safi];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* put EBGP and IBGP peers in different update groups */
|
|
|
|
if (pe1->sort != pe2->sort)
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* check peer flags */
|
|
|
|
if ((pe1->flags & PEER_UPDGRP_FLAGS)
|
|
|
|
!= (pe2->flags & PEER_UPDGRP_FLAGS))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* If there is 'local-as' configured, it should match. */
|
|
|
|
if (pe1->change_local_as != pe2->change_local_as)
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-10-20 16:15:22 +02:00
|
|
|
if (pe1->pmax_out[afi][safi] != pe2->pmax_out[afi][safi])
|
|
|
|
return false;
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* flags like route reflector client */
|
|
|
|
if ((flags1 & PEER_UPDGRP_AF_FLAGS) != (flags2 & PEER_UPDGRP_AF_FLAGS))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
if (pe1->addpath_type[afi][safi] != pe2->addpath_type[afi][safi])
|
2019-02-25 19:55:37 +01:00
|
|
|
return false;
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((pe1->cap & PEER_UPDGRP_CAP_FLAGS)
|
|
|
|
!= (pe2->cap & PEER_UPDGRP_CAP_FLAGS))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((pe1->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS)
|
|
|
|
!= (pe2->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (pe1->v_routeadv != pe2->v_routeadv)
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (pe1->group != pe2->group)
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-06-17 12:14:46 +02:00
|
|
|
/* Roles can affect filtering */
|
|
|
|
if (pe1->local_role != pe2->local_role)
|
|
|
|
return false;
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* route-map names should be the same */
|
|
|
|
if ((fl1->map[RMAP_OUT].name && !fl2->map[RMAP_OUT].name)
|
|
|
|
|| (!fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name)
|
|
|
|
|| (fl1->map[RMAP_OUT].name && fl2->map[RMAP_OUT].name
|
|
|
|
&& strcmp(fl1->map[RMAP_OUT].name, fl2->map[RMAP_OUT].name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((fl1->dlist[FILTER_OUT].name && !fl2->dlist[FILTER_OUT].name)
|
|
|
|
|| (!fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name)
|
|
|
|
|| (fl1->dlist[FILTER_OUT].name && fl2->dlist[FILTER_OUT].name
|
|
|
|
&& strcmp(fl1->dlist[FILTER_OUT].name,
|
|
|
|
fl2->dlist[FILTER_OUT].name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((fl1->plist[FILTER_OUT].name && !fl2->plist[FILTER_OUT].name)
|
|
|
|
|| (!fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name)
|
|
|
|
|| (fl1->plist[FILTER_OUT].name && fl2->plist[FILTER_OUT].name
|
|
|
|
&& strcmp(fl1->plist[FILTER_OUT].name,
|
|
|
|
fl2->plist[FILTER_OUT].name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((fl1->aslist[FILTER_OUT].name && !fl2->aslist[FILTER_OUT].name)
|
|
|
|
|| (!fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name)
|
|
|
|
|| (fl1->aslist[FILTER_OUT].name && fl2->aslist[FILTER_OUT].name
|
|
|
|
&& strcmp(fl1->aslist[FILTER_OUT].name,
|
|
|
|
fl2->aslist[FILTER_OUT].name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((fl1->usmap.name && !fl2->usmap.name)
|
|
|
|
|| (!fl1->usmap.name && fl2->usmap.name)
|
|
|
|
|| (fl1->usmap.name && fl2->usmap.name
|
|
|
|
&& strcmp(fl1->usmap.name, fl2->usmap.name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
if ((fl1->advmap.aname && !fl2->advmap.aname)
|
|
|
|
|| (!fl1->advmap.aname && fl2->advmap.aname)
|
|
|
|
|| (fl1->advmap.aname && fl2->advmap.aname
|
|
|
|
&& strcmp(fl1->advmap.aname, fl2->advmap.aname)))
|
|
|
|
return false;
|
|
|
|
|
2021-06-16 01:49:19 +02:00
|
|
|
if (fl1->advmap.update_type != fl2->advmap.update_type)
|
|
|
|
return false;
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((pe1->default_rmap[afi][safi].name
|
|
|
|
&& !pe2->default_rmap[afi][safi].name)
|
|
|
|
|| (!pe1->default_rmap[afi][safi].name
|
|
|
|
&& pe2->default_rmap[afi][safi].name)
|
|
|
|
|| (pe1->default_rmap[afi][safi].name
|
|
|
|
&& pe2->default_rmap[afi][safi].name
|
|
|
|
&& strcmp(pe1->default_rmap[afi][safi].name,
|
|
|
|
pe2->default_rmap[afi][safi].name)))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((afi == AFI_IP6) && (pe1->shared_network != pe2->shared_network))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if ((CHECK_FLAG(pe1->flags, PEER_FLAG_LONESOUL)
|
2015-11-04 17:31:33 +01:00
|
|
|
|| CHECK_FLAG(pe1->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV)
|
2015-11-10 16:29:12 +01:00
|
|
|
|| CHECK_FLAG(pe1->af_cap[afi][safi],
|
|
|
|
PEER_CAP_ORF_PREFIX_SM_OLD_RCV))
|
2015-05-20 03:03:47 +02:00
|
|
|
&& !sockunion_same(&pe1->su, &pe2->su))
|
2018-10-17 21:27:12 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-17 21:27:12 +02:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void peer_lonesoul_or_not(struct peer *peer, int set)
|
|
|
|
{
|
|
|
|
/* no change in status? */
|
|
|
|
if (set == (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL) > 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (set)
|
|
|
|
SET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
|
|
|
|
else
|
|
|
|
UNSET_FLAG(peer->flags, PEER_FLAG_LONESOUL);
|
|
|
|
|
|
|
|
update_group_adjust_peer_afs(peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* subgroup_total_packets_enqueued
|
|
|
|
*
|
|
|
|
* Returns the total number of packets enqueued to a subgroup.
|
|
|
|
*/
|
|
|
|
static unsigned int
|
|
|
|
subgroup_total_packets_enqueued(struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
struct bpacket *pkt;
|
|
|
|
|
|
|
|
pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
|
|
|
|
|
|
|
|
return pkt->ver - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_group_show_walkcb(struct update_group *updgrp, void *arg)
|
|
|
|
{
|
2015-05-20 03:04:09 +02:00
|
|
|
struct updwalk_context *ctx = arg;
|
|
|
|
struct vty *vty;
|
2015-05-20 03:03:47 +02:00
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
struct peer_af *paf;
|
|
|
|
struct bgp_filter *filter;
|
2021-07-02 22:48:16 +02:00
|
|
|
struct peer *peer = UPDGRP_PEER(updgrp);
|
2015-05-20 03:04:09 +02:00
|
|
|
int match = 0;
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
json_object *json_updgrp = NULL;
|
|
|
|
json_object *json_subgrps = NULL;
|
|
|
|
json_object *json_subgrp = NULL;
|
|
|
|
json_object *json_time = NULL;
|
|
|
|
json_object *json_subgrp_time = NULL;
|
|
|
|
json_object *json_subgrp_event = NULL;
|
|
|
|
json_object *json_peers = NULL;
|
|
|
|
json_object *json_pkt_info = NULL;
|
|
|
|
time_t epoch_tbuf, tbuf;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
if (!ctx)
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
if (ctx->subgrp_id) {
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-05-20 03:04:09 +02:00
|
|
|
if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
else {
|
2015-05-20 03:04:09 +02:00
|
|
|
match = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2015-05-20 03:04:09 +02:00
|
|
|
match = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
if (!match) {
|
|
|
|
/* Since this routine is invoked from a walk, we cannot signal
|
2017-07-17 14:03:14 +02:00
|
|
|
* any */
|
2015-05-20 03:04:09 +02:00
|
|
|
/* error here, can only return. */
|
2015-05-20 03:12:17 +02:00
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
vty = ctx->vty;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj) {
|
|
|
|
json_updgrp = json_object_new_object();
|
|
|
|
/* Display json o/p */
|
|
|
|
tbuf = monotime(NULL);
|
|
|
|
tbuf -= updgrp->uptime;
|
|
|
|
epoch_tbuf = time(NULL) - tbuf;
|
|
|
|
json_time = json_object_new_object();
|
|
|
|
json_object_int_add(json_time, "epoch", epoch_tbuf);
|
|
|
|
json_object_string_add(json_time, "epochString",
|
|
|
|
ctime(&epoch_tbuf));
|
|
|
|
json_object_object_add(json_updgrp, "groupCreateTime",
|
|
|
|
json_time);
|
|
|
|
json_object_string_add(json_updgrp, "afi",
|
|
|
|
afi2str(updgrp->afi));
|
|
|
|
json_object_string_add(json_updgrp, "safi",
|
|
|
|
safi2str(updgrp->safi));
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "Update-group %" PRIu64 ":\n", updgrp->id);
|
|
|
|
vty_out(vty, " Created: %s", timestamp_string(updgrp->uptime));
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
filter = &updgrp->conf->filter[updgrp->afi][updgrp->safi];
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (filter->map[RMAP_OUT].name) {
|
|
|
|
if (ctx->uj)
|
|
|
|
json_object_string_add(json_updgrp, "outRouteMap",
|
|
|
|
filter->map[RMAP_OUT].name);
|
|
|
|
else
|
|
|
|
vty_out(vty, " Outgoing route map: %s\n",
|
|
|
|
filter->map[RMAP_OUT].name);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj)
|
|
|
|
json_object_int_add(json_updgrp, "minRouteAdvInt",
|
|
|
|
updgrp->conf->v_routeadv);
|
|
|
|
else
|
|
|
|
vty_out(vty, " MRAI value (seconds): %d\n",
|
|
|
|
updgrp->conf->v_routeadv);
|
|
|
|
|
|
|
|
if (updgrp->conf->change_local_as) {
|
|
|
|
if (ctx->uj) {
|
|
|
|
json_object_int_add(json_updgrp, "localAs",
|
|
|
|
updgrp->conf->change_local_as);
|
|
|
|
json_object_boolean_add(
|
|
|
|
json_updgrp, "noPrepend",
|
|
|
|
CHECK_FLAG(updgrp->conf->flags,
|
|
|
|
PEER_FLAG_LOCAL_AS_NO_PREPEND));
|
|
|
|
json_object_boolean_add(
|
|
|
|
json_updgrp, "replaceLocalAs",
|
|
|
|
CHECK_FLAG(updgrp->conf->flags,
|
|
|
|
PEER_FLAG_LOCAL_AS_REPLACE_AS));
|
|
|
|
} else {
|
|
|
|
vty_out(vty, " Local AS %u%s%s\n",
|
|
|
|
updgrp->conf->change_local_as,
|
|
|
|
CHECK_FLAG(updgrp->conf->flags,
|
|
|
|
PEER_FLAG_LOCAL_AS_NO_PREPEND)
|
|
|
|
? " no-prepend"
|
|
|
|
: "",
|
|
|
|
CHECK_FLAG(updgrp->conf->flags,
|
|
|
|
PEER_FLAG_LOCAL_AS_REPLACE_AS)
|
|
|
|
? " replace-as"
|
|
|
|
: "");
|
|
|
|
}
|
|
|
|
}
|
2022-12-09 00:22:17 +01:00
|
|
|
if (ctx->uj)
|
|
|
|
json_subgrps = json_object_new_array();
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-05-20 03:04:09 +02:00
|
|
|
if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
|
|
|
|
continue;
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj) {
|
|
|
|
json_subgrp = json_object_new_object();
|
|
|
|
json_object_int_add(json_subgrp, "subGroupId",
|
|
|
|
subgrp->id);
|
|
|
|
tbuf = monotime(NULL);
|
|
|
|
tbuf -= subgrp->uptime;
|
|
|
|
epoch_tbuf = time(NULL) - tbuf;
|
|
|
|
json_subgrp_time = json_object_new_object();
|
|
|
|
json_object_int_add(json_subgrp_time, "epoch",
|
|
|
|
epoch_tbuf);
|
|
|
|
json_object_string_add(json_subgrp_time, "epochString",
|
|
|
|
ctime(&epoch_tbuf));
|
|
|
|
json_object_object_add(json_subgrp, "groupCreateTime",
|
|
|
|
json_subgrp_time);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
vty_out(vty, " Update-subgroup %" PRIu64 ":\n",
|
|
|
|
subgrp->id);
|
|
|
|
vty_out(vty, " Created: %s",
|
|
|
|
timestamp_string(subgrp->uptime));
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (subgrp->split_from.update_group_id
|
2015-05-20 03:04:09 +02:00
|
|
|
|| subgrp->split_from.subgroup_id) {
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj) {
|
|
|
|
json_object_int_add(
|
|
|
|
json_subgrp, "splitGroupId",
|
|
|
|
subgrp->split_from.update_group_id);
|
|
|
|
json_object_int_add(
|
|
|
|
json_subgrp, "splitSubGroupId",
|
|
|
|
subgrp->split_from.subgroup_id);
|
|
|
|
} else {
|
|
|
|
vty_out(vty,
|
|
|
|
" Split from group id: %" PRIu64
|
|
|
|
"\n",
|
|
|
|
subgrp->split_from.update_group_id);
|
|
|
|
vty_out(vty,
|
|
|
|
" Split from subgroup id: %" PRIu64
|
|
|
|
"\n",
|
|
|
|
subgrp->split_from.subgroup_id);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj) {
|
|
|
|
json_subgrp_event = json_object_new_object();
|
|
|
|
json_object_int_add(json_subgrp_event, "joinEvents",
|
|
|
|
subgrp->join_events);
|
|
|
|
json_object_int_add(json_subgrp_event, "pruneEvents",
|
|
|
|
subgrp->prune_events);
|
|
|
|
json_object_int_add(json_subgrp_event, "mergeEvents",
|
|
|
|
subgrp->merge_events);
|
|
|
|
json_object_int_add(json_subgrp_event, "splitEvents",
|
|
|
|
subgrp->split_events);
|
|
|
|
json_object_int_add(json_subgrp_event, "switchEvents",
|
|
|
|
subgrp->updgrp_switch_events);
|
|
|
|
json_object_int_add(json_subgrp_event,
|
|
|
|
"peerRefreshEvents",
|
|
|
|
subgrp->peer_refreshes_combined);
|
|
|
|
json_object_int_add(json_subgrp_event,
|
|
|
|
"mergeCheckEvents",
|
|
|
|
subgrp->merge_checks_triggered);
|
|
|
|
json_object_object_add(json_subgrp, "statistics",
|
|
|
|
json_subgrp_event);
|
|
|
|
json_object_int_add(json_subgrp, "coalesceTime",
|
|
|
|
(UPDGRP_INST(subgrp->update_group))
|
|
|
|
->coalesce_time);
|
|
|
|
json_object_int_add(json_subgrp, "version",
|
|
|
|
subgrp->version);
|
|
|
|
json_pkt_info = json_object_new_object();
|
|
|
|
json_object_int_add(
|
|
|
|
json_pkt_info, "qeueueLen",
|
|
|
|
bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
|
|
|
|
json_object_int_add(
|
|
|
|
json_pkt_info, "queuedTotal",
|
|
|
|
subgroup_total_packets_enqueued(subgrp));
|
|
|
|
json_object_int_add(
|
|
|
|
json_pkt_info, "queueHwmLen",
|
|
|
|
bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
|
|
|
|
json_object_int_add(
|
|
|
|
json_pkt_info, "totalEnqueued",
|
|
|
|
subgroup_total_packets_enqueued(subgrp));
|
|
|
|
json_object_object_add(json_subgrp, "packetQueueInfo",
|
|
|
|
json_pkt_info);
|
|
|
|
json_object_int_add(json_subgrp, "adjListCount",
|
|
|
|
subgrp->adj_count);
|
|
|
|
json_object_boolean_add(
|
|
|
|
json_subgrp, "needsRefresh",
|
|
|
|
CHECK_FLAG(subgrp->flags,
|
|
|
|
SUBGRP_FLAG_NEEDS_REFRESH));
|
|
|
|
} else {
|
|
|
|
vty_out(vty, " Join events: %u\n",
|
|
|
|
subgrp->join_events);
|
|
|
|
vty_out(vty, " Prune events: %u\n",
|
|
|
|
subgrp->prune_events);
|
|
|
|
vty_out(vty, " Merge events: %u\n",
|
|
|
|
subgrp->merge_events);
|
|
|
|
vty_out(vty, " Split events: %u\n",
|
|
|
|
subgrp->split_events);
|
|
|
|
vty_out(vty, " Update group switch events: %u\n",
|
|
|
|
subgrp->updgrp_switch_events);
|
|
|
|
vty_out(vty, " Peer refreshes combined: %u\n",
|
|
|
|
subgrp->peer_refreshes_combined);
|
|
|
|
vty_out(vty, " Merge checks triggered: %u\n",
|
|
|
|
subgrp->merge_checks_triggered);
|
|
|
|
vty_out(vty, " Coalesce Time: %u%s\n",
|
|
|
|
(UPDGRP_INST(subgrp->update_group))
|
|
|
|
->coalesce_time,
|
|
|
|
subgrp->t_coalesce ? "(Running)" : "");
|
|
|
|
vty_out(vty, " Version: %" PRIu64 "\n",
|
|
|
|
subgrp->version);
|
|
|
|
vty_out(vty, " Packet queue length: %d\n",
|
|
|
|
bpacket_queue_length(SUBGRP_PKTQ(subgrp)));
|
|
|
|
vty_out(vty, " Total packets enqueued: %u\n",
|
|
|
|
subgroup_total_packets_enqueued(subgrp));
|
|
|
|
vty_out(vty, " Packet queue high watermark: %d\n",
|
|
|
|
bpacket_queue_hwm_length(SUBGRP_PKTQ(subgrp)));
|
|
|
|
vty_out(vty, " Adj-out list count: %u\n",
|
|
|
|
subgrp->adj_count);
|
|
|
|
vty_out(vty, " Advertise list: %s\n",
|
|
|
|
advertise_list_is_empty(subgrp) ? "empty"
|
|
|
|
: "not empty");
|
|
|
|
vty_out(vty, " Flags: %s\n",
|
|
|
|
CHECK_FLAG(subgrp->flags,
|
|
|
|
SUBGRP_FLAG_NEEDS_REFRESH)
|
|
|
|
? "R"
|
|
|
|
: "");
|
|
|
|
if (peer)
|
|
|
|
vty_out(vty, " Max packet size: %d\n",
|
|
|
|
peer->max_packet_size);
|
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
if (subgrp->peer_count > 0) {
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
if (ctx->uj) {
|
|
|
|
json_peers = json_object_new_array();
|
|
|
|
SUBGRP_FOREACH_PEER (subgrp, paf) {
|
|
|
|
json_object *peer =
|
|
|
|
json_object_new_string(
|
|
|
|
paf->peer->host);
|
|
|
|
json_object_array_add(json_peers, peer);
|
|
|
|
}
|
|
|
|
json_object_object_add(json_subgrp, "peers",
|
|
|
|
json_peers);
|
|
|
|
} else {
|
|
|
|
vty_out(vty, " Peers:\n");
|
|
|
|
SUBGRP_FOREACH_PEER (subgrp, paf)
|
|
|
|
vty_out(vty, " - %s\n",
|
|
|
|
paf->peer->host);
|
|
|
|
}
|
2015-05-20 03:04:09 +02:00
|
|
|
}
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
|
|
|
|
if (ctx->uj)
|
|
|
|
json_object_array_add(json_subgrps, json_subgrp);
|
2015-05-20 03:04:09 +02:00
|
|
|
}
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
|
|
|
|
if (ctx->uj) {
|
|
|
|
json_object_object_add(json_updgrp, "subGroup", json_subgrps);
|
|
|
|
json_object_object_addf(ctx->json_updategrps, json_updgrp,
|
|
|
|
"%" PRIu64, updgrp->id);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
return UPDWALK_CONTINUE;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helper function to show the packet queue for each subgroup of update group.
|
|
|
|
* Will be constrained to a particular subgroup id if id !=0
|
|
|
|
*/
|
|
|
|
static int updgrp_show_packet_queue_walkcb(struct update_group *updgrp,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct updwalk_context *ctx = arg;
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
struct vty *vty;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
vty = ctx->vty;
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-05-20 03:03:47 +02:00
|
|
|
if (ctx->subgrp_id && (ctx->subgrp_id != subgrp->id))
|
|
|
|
continue;
|
2017-07-13 18:50:29 +02:00
|
|
|
vty_out(vty, "update group %" PRIu64 ", subgroup %" PRIu64 "\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
updgrp->id, subgrp->id);
|
2015-05-20 03:03:47 +02:00
|
|
|
bpacket_queue_show_vty(SUBGRP_PKTQ(subgrp), vty);
|
|
|
|
}
|
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Show the packet queue for each subgroup of update group. Will be
|
|
|
|
* constrained to a particular subgroup id if id !=0
|
|
|
|
*/
|
|
|
|
void update_group_show_packet_queue(struct bgp *bgp, afi_t afi, safi_t safi,
|
2016-08-24 17:11:00 +02:00
|
|
|
struct vty *vty, uint64_t id)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct updwalk_context ctx;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
memset(&ctx, 0, sizeof(ctx));
|
|
|
|
ctx.vty = vty;
|
|
|
|
ctx.subgrp_id = id;
|
|
|
|
ctx.flags = 0;
|
|
|
|
update_group_af_walk(bgp, afi, safi, updgrp_show_packet_queue_walkcb,
|
|
|
|
&ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct update_group *update_group_find(struct peer_af *paf)
|
|
|
|
{
|
|
|
|
struct update_group *updgrp;
|
|
|
|
struct update_group tmp;
|
|
|
|
struct peer tmp_conf;
|
|
|
|
|
|
|
|
if (!peer_established(PAF_PEER(paf)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memset(&tmp, 0, sizeof(tmp));
|
|
|
|
memset(&tmp_conf, 0, sizeof(tmp_conf));
|
|
|
|
tmp.conf = &tmp_conf;
|
|
|
|
peer2_updgrp_copy(&tmp, paf);
|
|
|
|
|
|
|
|
updgrp = hash_lookup(paf->peer->bgp->update_groups[paf->afid], &tmp);
|
|
|
|
conf_release(&tmp_conf, paf->afi, paf->safi);
|
|
|
|
return updgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct update_group *update_group_create(struct peer_af *paf)
|
|
|
|
{
|
|
|
|
struct update_group *updgrp;
|
|
|
|
struct update_group tmp;
|
|
|
|
struct peer tmp_conf;
|
|
|
|
|
|
|
|
memset(&tmp, 0, sizeof(tmp));
|
|
|
|
memset(&tmp_conf, 0, sizeof(tmp_conf));
|
|
|
|
tmp.conf = &tmp_conf;
|
|
|
|
peer2_updgrp_copy(&tmp, paf);
|
|
|
|
|
|
|
|
updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
|
|
|
|
updgrp_hash_alloc);
|
|
|
|
update_group_checkin(updgrp);
|
|
|
|
|
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug("create update group %" PRIu64, updgrp->id);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
UPDGRP_GLOBAL_STAT(updgrp, updgrps_created) += 1;
|
|
|
|
|
2015-09-02 14:19:44 +02:00
|
|
|
conf_release(&tmp_conf, paf->afi, paf->safi);
|
2015-05-20 03:03:47 +02:00
|
|
|
return updgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_group_delete(struct update_group *updgrp)
|
|
|
|
{
|
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug("delete update group %" PRIu64, updgrp->id);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
UPDGRP_GLOBAL_STAT(updgrp, updgrps_deleted) += 1;
|
|
|
|
|
|
|
|
hash_release(updgrp->bgp->update_groups[updgrp->afid], updgrp);
|
|
|
|
conf_release(updgrp->conf, updgrp->afi, updgrp->safi);
|
2015-05-20 03:29:17 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_PEER_HOST, updgrp->conf->host);
|
2015-08-26 16:44:57 +02:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_BGP_PEER_IFNAME, updgrp->conf->ifname);
|
2015-08-26 16:44:57 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
XFREE(MTYPE_BGP_PEER, updgrp->conf);
|
|
|
|
XFREE(MTYPE_BGP_UPDGRP, updgrp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_group_add_subgroup(struct update_group *updgrp,
|
|
|
|
struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
if (!updgrp || !subgrp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LIST_INSERT_HEAD(&(updgrp->subgrps), subgrp, updgrp_train);
|
|
|
|
subgrp->update_group = updgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_group_remove_subgroup(struct update_group *updgrp,
|
|
|
|
struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
if (!updgrp || !subgrp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LIST_REMOVE(subgrp, updgrp_train);
|
|
|
|
subgrp->update_group = NULL;
|
|
|
|
if (LIST_EMPTY(&(updgrp->subgrps)))
|
|
|
|
update_group_delete(updgrp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct update_subgroup *
|
|
|
|
update_subgroup_create(struct update_group *updgrp)
|
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
|
|
|
|
subgrp = XCALLOC(MTYPE_BGP_UPD_SUBGRP, sizeof(struct update_subgroup));
|
|
|
|
update_subgroup_checkin(subgrp, updgrp);
|
|
|
|
subgrp->v_coalesce = (UPDGRP_INST(updgrp))->coalesce_time;
|
2021-02-25 18:46:49 +01:00
|
|
|
sync_init(subgrp, updgrp);
|
2015-05-20 03:03:47 +02:00
|
|
|
bpacket_queue_init(SUBGRP_PKTQ(subgrp));
|
|
|
|
bpacket_queue_add(SUBGRP_PKTQ(subgrp), NULL, NULL);
|
|
|
|
TAILQ_INIT(&(subgrp->adjq));
|
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug("create subgroup u%" PRIu64 ":s%" PRIu64, updgrp->id,
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->id);
|
|
|
|
|
|
|
|
update_group_add_subgroup(updgrp, subgrp);
|
|
|
|
|
|
|
|
UPDGRP_INCR_STAT(updgrp, subgrps_created);
|
|
|
|
|
|
|
|
return subgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_subgroup_delete(struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
if (!subgrp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (subgrp->update_group)
|
|
|
|
UPDGRP_INCR_STAT(subgrp->update_group, subgrps_deleted);
|
|
|
|
|
2020-10-12 14:35:18 +02:00
|
|
|
THREAD_OFF(subgrp->t_merge_check);
|
2020-07-17 23:09:51 +02:00
|
|
|
THREAD_OFF(subgrp->t_coalesce);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
bpacket_queue_cleanup(SUBGRP_PKTQ(subgrp));
|
|
|
|
subgroup_clear_table(subgrp);
|
|
|
|
|
|
|
|
sync_delete(subgrp);
|
|
|
|
|
2018-06-15 20:09:55 +02:00
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS) && subgrp->update_group)
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug("delete subgroup u%" PRIu64 ":s%" PRIu64,
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->update_group->id, subgrp->id);
|
|
|
|
|
|
|
|
update_group_remove_subgroup(subgrp->update_group, subgrp);
|
|
|
|
|
|
|
|
XFREE(MTYPE_BGP_UPD_SUBGRP, subgrp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void update_subgroup_inherit_info(struct update_subgroup *to,
|
|
|
|
struct update_subgroup *from)
|
|
|
|
{
|
|
|
|
if (!to || !from)
|
|
|
|
return;
|
|
|
|
|
|
|
|
to->sflags = from->sflags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_check_delete
|
|
|
|
*
|
|
|
|
* Delete a subgroup if it is ready to be deleted.
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* Returns true if the subgroup was deleted.
|
2015-05-20 03:03:47 +02:00
|
|
|
*/
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool update_subgroup_check_delete(struct update_subgroup *subgrp)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
if (!subgrp)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
if (!LIST_EMPTY(&(subgrp->peers)))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
update_subgroup_delete(subgrp);
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_add_peer
|
|
|
|
*
|
|
|
|
* @param send_enqueued_packets If true all currently enqueued packets will
|
|
|
|
* also be sent to the peer.
|
|
|
|
*/
|
|
|
|
static void update_subgroup_add_peer(struct update_subgroup *subgrp,
|
|
|
|
struct peer_af *paf,
|
|
|
|
int send_enqueued_pkts)
|
|
|
|
{
|
|
|
|
struct bpacket *pkt;
|
|
|
|
|
|
|
|
if (!subgrp || !paf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
LIST_INSERT_HEAD(&(subgrp->peers), paf, subgrp_train);
|
|
|
|
paf->subgroup = subgrp;
|
|
|
|
subgrp->peer_count++;
|
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
|
2015-05-20 03:03:47 +02:00
|
|
|
UPDGRP_PEER_DBG_EN(subgrp->update_group);
|
|
|
|
}
|
|
|
|
|
|
|
|
SUBGRP_INCR_STAT(subgrp, join_events);
|
|
|
|
|
|
|
|
if (send_enqueued_pkts) {
|
|
|
|
pkt = bpacket_queue_first(SUBGRP_PKTQ(subgrp));
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hang the peer off of the last, placeholder, packet in the
|
|
|
|
* queue. This means it won't see any of the packets that are
|
|
|
|
* currently the queue.
|
|
|
|
*/
|
|
|
|
pkt = bpacket_queue_last(SUBGRP_PKTQ(subgrp));
|
|
|
|
assert(pkt->buffer == NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
bpacket_add_peer(pkt, paf);
|
|
|
|
|
2019-06-12 12:24:37 +02:00
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
|
|
|
zlog_debug("peer %s added to subgroup s%" PRIu64,
|
|
|
|
paf->peer->host, subgrp->id);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_remove_peer_internal
|
|
|
|
*
|
|
|
|
* Internal function that removes a peer from a subgroup, but does not
|
|
|
|
* delete the subgroup. A call to this function must almost always be
|
|
|
|
* followed by a call to update_subgroup_check_delete().
|
|
|
|
*
|
|
|
|
* @see update_subgroup_remove_peer
|
|
|
|
*/
|
|
|
|
static void update_subgroup_remove_peer_internal(struct update_subgroup *subgrp,
|
|
|
|
struct peer_af *paf)
|
|
|
|
{
|
2018-06-18 16:38:23 +02:00
|
|
|
assert(subgrp && paf && subgrp->update_group);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
|
2015-05-20 03:03:47 +02:00
|
|
|
UPDGRP_PEER_DBG_DIS(subgrp->update_group);
|
|
|
|
}
|
|
|
|
|
|
|
|
bpacket_queue_remove_peer(paf);
|
|
|
|
LIST_REMOVE(paf, subgrp_train);
|
|
|
|
paf->subgroup = NULL;
|
|
|
|
subgrp->peer_count--;
|
|
|
|
|
2019-06-12 12:24:37 +02:00
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
|
|
|
zlog_debug("peer %s deleted from subgroup s%"
|
2019-11-15 10:09:17 +01:00
|
|
|
PRIu64 " peer cnt %d",
|
2019-06-12 12:24:37 +02:00
|
|
|
paf->peer->host, subgrp->id, subgrp->peer_count);
|
2015-05-20 03:03:47 +02:00
|
|
|
SUBGRP_INCR_STAT(subgrp, prune_events);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_remove_peer
|
|
|
|
*/
|
|
|
|
void update_subgroup_remove_peer(struct update_subgroup *subgrp,
|
|
|
|
struct peer_af *paf)
|
|
|
|
{
|
|
|
|
if (!subgrp || !paf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
update_subgroup_remove_peer_internal(subgrp, paf);
|
|
|
|
|
|
|
|
if (update_subgroup_check_delete(subgrp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The deletion of the peer may have caused some packets to be
|
|
|
|
* deleted from the subgroup packet queue. Check if the subgroup can
|
|
|
|
* be merged now.
|
|
|
|
*/
|
|
|
|
update_subgroup_check_merge(subgrp, "removed peer from subgroup");
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct update_subgroup *update_subgroup_find(struct update_group *updgrp,
|
|
|
|
struct peer_af *paf)
|
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp = NULL;
|
|
|
|
uint64_t version;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (paf->subgroup) {
|
|
|
|
assert(0);
|
|
|
|
return NULL;
|
|
|
|
} else
|
|
|
|
version = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!peer_established(PAF_PEER(paf)))
|
|
|
|
return NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-09-21 06:09:00 +02:00
|
|
|
if (subgrp->version != version
|
|
|
|
|| CHECK_FLAG(subgrp->sflags,
|
|
|
|
SUBGRP_STATUS_DEFAULT_ORIGINATE))
|
2015-05-20 03:03:47 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* The version number is not meaningful on a subgroup that needs
|
|
|
|
* a refresh.
|
|
|
|
*/
|
|
|
|
if (update_subgroup_needs_refresh(subgrp))
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
return subgrp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_ready_for_merge
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* Returns true if this subgroup is in a state that allows it to be
|
2015-05-20 03:03:47 +02:00
|
|
|
* merged into another subgroup.
|
|
|
|
*/
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool update_subgroup_ready_for_merge(struct update_subgroup *subgrp)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not ready if there are any encoded packets waiting to be written
|
|
|
|
* out to peers.
|
|
|
|
*/
|
|
|
|
if (!bpacket_queue_is_empty(SUBGRP_PKTQ(subgrp)))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Not ready if there enqueued updates waiting to be encoded.
|
|
|
|
*/
|
|
|
|
if (!advertise_list_is_empty(subgrp))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Don't attempt to merge a subgroup that needs a refresh. For one,
|
|
|
|
* we can't determine if the adj_out of such a group matches that of
|
|
|
|
* another group.
|
|
|
|
*/
|
|
|
|
if (update_subgroup_needs_refresh(subgrp))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgrp_can_merge_into
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* Returns true if the first subgroup can merge into the second
|
2015-05-20 03:03:47 +02:00
|
|
|
* subgroup.
|
|
|
|
*/
|
|
|
|
static int update_subgroup_can_merge_into(struct update_subgroup *subgrp,
|
|
|
|
struct update_subgroup *target)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (subgrp == target)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Both must have processed the BRIB to the same point in order to
|
|
|
|
* be merged.
|
|
|
|
*/
|
|
|
|
if (subgrp->version != target->version)
|
|
|
|
return 0;
|
|
|
|
|
2015-09-21 06:09:00 +02:00
|
|
|
if (CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)
|
|
|
|
!= CHECK_FLAG(target->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE))
|
|
|
|
return 0;
|
|
|
|
|
BGP: crash in update_subgroup_merge()
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Ticket: CM-8191
On my hard node I have a route to 10.0.0.0/22 via eth0, I then learn
10.0.0.8/32 from peer 10.0.0.8 with a nexthop of 10.0.0.8:
superm-redxp-05# show ip bgp 10.0.0.8/32
BGP routing table entry for 10.0.0.8/32
Paths: (1 available, no best path)
Not advertised to any peer
80
10.0.0.8 from r8(swp6) (10.0.0.8)
Origin IGP, metric 0, localpref 100, valid, external
AddPath ID: RX 0, TX 9
Last update: Thu Nov 12 14:00:00 2015
superm-redxp-05#
I do a lookup for the nexthop and see that 10.0.0.8 is reachable via my
eth0 10.0.0.22 so I select a bestpath and install the route. At this
point my route to 10.0.0.8 is a /32 that resolves via itself, NHT sees
that this is illegal and flags the nexthop as inaccessible.
superm-redxp-05# show ip bgp 10.0.0.8/32
BGP routing table entry for 10.0.0.8/32
Paths: (1 available, best #1, table Default-IP-Routing-Table)
Advertised to non peer-group peers:
r6(swp4) r7(swp5) r8(swp6) r2(10.1.2.2) r3(10.1.3.2) r4(10.1.4.2)
80
10.0.0.8 (inaccessible) from r8(swp6) (10.0.0.8)
Origin IGP, metric 0, localpref 100, invalid, external, bestpath-from-AS 80, best
AddPath ID: RX 0, TX 9
Last update: Thu Nov 12 14:00:00 2015
superm-redxp-05#
at which point we withdraw the route, things churn, we relearn it and go
through the whole process over and over again. We end up advertising and
withdrawing this route about 9 times a second!!
This exposed a crash in the update-group code where we try to merge two sub-groups
but the assert on adj_count fails because the timing worked out where one had
advertised 10.0.0.8/32 but the other had not.
NOTE: the race condition described above will be resolved via a separate patch.
2015-11-17 03:09:57 +01:00
|
|
|
if (subgrp->adj_count != target->adj_count)
|
2015-05-20 03:03:47 +02:00
|
|
|
return 0;
|
|
|
|
|
BGP: crash in update_subgroup_merge()
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Ticket: CM-8191
On my hard node I have a route to 10.0.0.0/22 via eth0, I then learn
10.0.0.8/32 from peer 10.0.0.8 with a nexthop of 10.0.0.8:
superm-redxp-05# show ip bgp 10.0.0.8/32
BGP routing table entry for 10.0.0.8/32
Paths: (1 available, no best path)
Not advertised to any peer
80
10.0.0.8 from r8(swp6) (10.0.0.8)
Origin IGP, metric 0, localpref 100, valid, external
AddPath ID: RX 0, TX 9
Last update: Thu Nov 12 14:00:00 2015
superm-redxp-05#
I do a lookup for the nexthop and see that 10.0.0.8 is reachable via my
eth0 10.0.0.22 so I select a bestpath and install the route. At this
point my route to 10.0.0.8 is a /32 that resolves via itself, NHT sees
that this is illegal and flags the nexthop as inaccessible.
superm-redxp-05# show ip bgp 10.0.0.8/32
BGP routing table entry for 10.0.0.8/32
Paths: (1 available, best #1, table Default-IP-Routing-Table)
Advertised to non peer-group peers:
r6(swp4) r7(swp5) r8(swp6) r2(10.1.2.2) r3(10.1.3.2) r4(10.1.4.2)
80
10.0.0.8 (inaccessible) from r8(swp6) (10.0.0.8)
Origin IGP, metric 0, localpref 100, invalid, external, bestpath-from-AS 80, best
AddPath ID: RX 0, TX 9
Last update: Thu Nov 12 14:00:00 2015
superm-redxp-05#
at which point we withdraw the route, things churn, we relearn it and go
through the whole process over and over again. We end up advertising and
withdrawing this route about 9 times a second!!
This exposed a crash in the update-group code where we try to merge two sub-groups
but the assert on adj_count fails because the timing worked out where one had
advertised 10.0.0.8/32 but the other had not.
NOTE: the race condition described above will be resolved via a separate patch.
2015-11-17 03:09:57 +01:00
|
|
|
return update_subgroup_ready_for_merge(target);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_merge
|
|
|
|
*
|
|
|
|
* Merge the first subgroup into the second one.
|
|
|
|
*/
|
|
|
|
static void update_subgroup_merge(struct update_subgroup *subgrp,
|
|
|
|
struct update_subgroup *target,
|
|
|
|
const char *reason)
|
|
|
|
{
|
|
|
|
struct peer_af *paf;
|
|
|
|
int result;
|
|
|
|
int peer_count;
|
|
|
|
|
|
|
|
assert(subgrp->adj_count == target->adj_count);
|
|
|
|
|
|
|
|
peer_count = subgrp->peer_count;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
paf = LIST_FIRST(&subgrp->peers);
|
|
|
|
if (!paf)
|
|
|
|
break;
|
|
|
|
|
|
|
|
update_subgroup_remove_peer_internal(subgrp, paf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the peer to the target subgroup, while making sure that
|
|
|
|
* any currently enqueued packets won't be sent to it. Enqueued
|
|
|
|
* packets could, for example, result in an unnecessary withdraw
|
|
|
|
* followed by an advertise.
|
|
|
|
*/
|
|
|
|
update_subgroup_add_peer(target, paf, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
SUBGRP_INCR_STAT(target, merge_events);
|
|
|
|
|
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64" (%d peers) merged into u%" PRIu64 ":s%" PRIu64", trigger: %s",
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->update_group->id, subgrp->id, peer_count,
|
|
|
|
target->update_group->id, target->id,
|
|
|
|
reason ? reason : "unknown");
|
|
|
|
|
|
|
|
result = update_subgroup_check_delete(subgrp);
|
|
|
|
assert(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_check_merge
|
|
|
|
*
|
|
|
|
* Merge this subgroup into another subgroup if possible.
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* Returns true if the subgroup has been merged. The subgroup pointer
|
2015-05-20 03:03:47 +02:00
|
|
|
* should not be accessed in this case.
|
|
|
|
*/
|
2020-03-20 10:57:54 +01:00
|
|
|
bool update_subgroup_check_merge(struct update_subgroup *subgrp,
|
|
|
|
const char *reason)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct update_subgroup *target;
|
|
|
|
|
|
|
|
if (!update_subgroup_ready_for_merge(subgrp))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for a subgroup to merge into.
|
|
|
|
*/
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (subgrp->update_group, target) {
|
2015-05-20 03:03:47 +02:00
|
|
|
if (update_subgroup_can_merge_into(subgrp, target))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!target)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
update_subgroup_merge(subgrp, target, reason);
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-07-22 14:52:33 +02:00
|
|
|
* update_subgroup_merge_check_thread_cb
|
|
|
|
*/
|
2022-03-01 22:18:12 +01:00
|
|
|
static void update_subgroup_merge_check_thread_cb(struct event *thread)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
|
|
|
|
subgrp = THREAD_ARG(thread);
|
|
|
|
|
|
|
|
subgrp->t_merge_check = NULL;
|
|
|
|
|
|
|
|
update_subgroup_check_merge(subgrp, "triggered merge check");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_trigger_merge_check
|
|
|
|
*
|
|
|
|
* Triggers a call to update_subgroup_check_merge() on a clean context.
|
|
|
|
*
|
|
|
|
* @param force If true, the merge check will be triggered even if the
|
|
|
|
* subgroup doesn't currently look ready for a merge.
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* Returns true if a merge check will be performed shortly.
|
2015-05-20 03:03:47 +02:00
|
|
|
*/
|
2020-03-20 10:57:54 +01:00
|
|
|
bool update_subgroup_trigger_merge_check(struct update_subgroup *subgrp,
|
|
|
|
int force)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
if (subgrp->t_merge_check)
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
if (!force && !update_subgroup_ready_for_merge(subgrp))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2017-05-05 23:22:25 +02:00
|
|
|
subgrp->t_merge_check = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer_msec(bm->master, update_subgroup_merge_check_thread_cb,
|
|
|
|
subgrp, 0, &subgrp->t_merge_check);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
SUBGRP_INCR_STAT(subgrp, merge_checks_triggered);
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_copy_adj_out
|
|
|
|
*
|
|
|
|
* Helper function that clones the adj out (state about advertised
|
|
|
|
* routes) from one subgroup to another. It assumes that the adj out
|
|
|
|
* of the target subgroup is empty.
|
|
|
|
*/
|
|
|
|
static void update_subgroup_copy_adj_out(struct update_subgroup *source,
|
|
|
|
struct update_subgroup *dest)
|
|
|
|
{
|
|
|
|
struct bgp_adj_out *aout, *aout_copy;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
SUBGRP_FOREACH_ADJ (source, aout) {
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Copy the adj out.
|
|
|
|
*/
|
2020-03-27 00:11:58 +01:00
|
|
|
aout_copy = bgp_adj_out_alloc(dest, aout->dest,
|
|
|
|
aout->addpath_tx_id);
|
2015-05-20 03:03:47 +02:00
|
|
|
aout_copy->attr =
|
2017-09-07 14:24:00 +02:00
|
|
|
aout->attr ? bgp_attr_intern(aout->attr) : NULL;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
2018-10-09 20:56:46 +02:00
|
|
|
|
|
|
|
dest->scount = source->scount;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_copy_packets
|
|
|
|
*
|
|
|
|
* Copy packets after and including the given packet to the subgroup
|
|
|
|
* 'dest'.
|
|
|
|
*
|
|
|
|
* Returns the number of packets copied.
|
|
|
|
*/
|
|
|
|
static int update_subgroup_copy_packets(struct update_subgroup *dest,
|
|
|
|
struct bpacket *pkt)
|
|
|
|
{
|
|
|
|
int count;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
count = 0;
|
|
|
|
while (pkt && pkt->buffer) {
|
|
|
|
bpacket_queue_add(SUBGRP_PKTQ(dest), stream_dup(pkt->buffer),
|
|
|
|
&pkt->arr);
|
|
|
|
count++;
|
|
|
|
pkt = bpacket_next(pkt);
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool updgrp_prefix_list_update(struct update_group *updgrp,
|
|
|
|
const char *name)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct bgp_filter *filter;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
peer = UPDGRP_PEER(updgrp);
|
|
|
|
filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (PREFIX_LIST_OUT_NAME(filter)
|
|
|
|
&& (strcmp(name, PREFIX_LIST_OUT_NAME(filter)) == 0)) {
|
|
|
|
PREFIX_LIST_OUT(filter) = prefix_list_lookup(
|
|
|
|
UPDGRP_AFI(updgrp), PREFIX_LIST_OUT_NAME(filter));
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool updgrp_filter_list_update(struct update_group *updgrp,
|
|
|
|
const char *name)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct bgp_filter *filter;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
peer = UPDGRP_PEER(updgrp);
|
|
|
|
filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (FILTER_LIST_OUT_NAME(filter)
|
|
|
|
&& (strcmp(name, FILTER_LIST_OUT_NAME(filter)) == 0)) {
|
|
|
|
FILTER_LIST_OUT(filter) =
|
|
|
|
as_list_lookup(FILTER_LIST_OUT_NAME(filter));
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool updgrp_distribute_list_update(struct update_group *updgrp,
|
|
|
|
const char *name)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct bgp_filter *filter;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
peer = UPDGRP_PEER(updgrp);
|
|
|
|
filter = &peer->filter[UPDGRP_AFI(updgrp)][UPDGRP_SAFI(updgrp)];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (DISTRIBUTE_OUT_NAME(filter)
|
|
|
|
&& (strcmp(name, DISTRIBUTE_OUT_NAME(filter)) == 0)) {
|
|
|
|
DISTRIBUTE_OUT(filter) = access_list_lookup(
|
|
|
|
UPDGRP_AFI(updgrp), DISTRIBUTE_OUT_NAME(filter));
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
static int updgrp_route_map_update(struct update_group *updgrp,
|
2015-05-20 03:03:47 +02:00
|
|
|
const char *name, int *def_rmap_changed)
|
|
|
|
{
|
|
|
|
struct peer *peer;
|
|
|
|
struct bgp_filter *filter;
|
|
|
|
int changed = 0;
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
peer = UPDGRP_PEER(updgrp);
|
|
|
|
afi = UPDGRP_AFI(updgrp);
|
|
|
|
safi = UPDGRP_SAFI(updgrp);
|
|
|
|
filter = &peer->filter[afi][safi];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (ROUTE_MAP_OUT_NAME(filter)
|
|
|
|
&& (strcmp(name, ROUTE_MAP_OUT_NAME(filter)) == 0)) {
|
|
|
|
ROUTE_MAP_OUT(filter) = route_map_lookup_by_name(name);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
changed = 1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (UNSUPPRESS_MAP_NAME(filter)
|
|
|
|
&& (strcmp(name, UNSUPPRESS_MAP_NAME(filter)) == 0)) {
|
|
|
|
UNSUPPRESS_MAP(filter) = route_map_lookup_by_name(name);
|
|
|
|
changed = 1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* process default-originate route-map */
|
|
|
|
if (peer->default_rmap[afi][safi].name
|
|
|
|
&& (strcmp(name, peer->default_rmap[afi][safi].name) == 0)) {
|
|
|
|
peer->default_rmap[afi][safi].map =
|
|
|
|
route_map_lookup_by_name(name);
|
|
|
|
if (def_rmap_changed)
|
|
|
|
*def_rmap_changed = 1;
|
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hash iteration callback function to process a policy change for an
|
|
|
|
* update group. Check if the changed policy matches the updgrp's
|
|
|
|
* outbound route-map or unsuppress-map or default-originate map or
|
|
|
|
* filter-list or prefix-list or distribute-list.
|
|
|
|
* Trigger update generation accordingly.
|
|
|
|
*/
|
|
|
|
static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg)
|
|
|
|
{
|
|
|
|
struct updwalk_context *ctx = arg;
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
int changed = 0;
|
|
|
|
int def_changed = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!updgrp || !ctx || !ctx->policy_name)
|
|
|
|
return UPDWALK_CONTINUE;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
switch (ctx->policy_type) {
|
|
|
|
case BGP_POLICY_ROUTE_MAP:
|
|
|
|
changed = updgrp_route_map_update(updgrp, ctx->policy_name,
|
|
|
|
&def_changed);
|
|
|
|
break;
|
|
|
|
case BGP_POLICY_FILTER_LIST:
|
|
|
|
changed = updgrp_filter_list_update(updgrp, ctx->policy_name);
|
|
|
|
break;
|
|
|
|
case BGP_POLICY_PREFIX_LIST:
|
|
|
|
changed = updgrp_prefix_list_update(updgrp, ctx->policy_name);
|
|
|
|
break;
|
|
|
|
case BGP_POLICY_DISTRIBUTE_LIST:
|
|
|
|
changed =
|
|
|
|
updgrp_distribute_list_update(updgrp, ctx->policy_name);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* If not doing route update, return after updating "config" */
|
|
|
|
if (!ctx->policy_route_update)
|
|
|
|
return UPDWALK_CONTINUE;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/* If nothing has changed, return after updating "config" */
|
|
|
|
if (!changed && !def_changed)
|
|
|
|
return UPDWALK_CONTINUE;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* If something has changed, at the beginning of a route-map
|
|
|
|
* modification
|
|
|
|
* event, mark each subgroup's needs-refresh bit. For one, it signals to
|
|
|
|
* whoever that the subgroup needs a refresh. Second, it prevents
|
|
|
|
* premature
|
|
|
|
* merge of this subgroup with another before a complete (outbound)
|
|
|
|
* refresh.
|
|
|
|
*/
|
|
|
|
if (ctx->policy_event_start_flag) {
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-05-20 03:03:47 +02:00
|
|
|
update_subgroup_set_needs_refresh(subgrp, 1);
|
|
|
|
}
|
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2020-11-12 11:30:19 +01:00
|
|
|
/* Avoid supressing duplicate routes later
|
|
|
|
* when processing in subgroup_announce_table().
|
|
|
|
*/
|
|
|
|
SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES);
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (changed) {
|
|
|
|
if (bgp_debug_update(NULL, NULL, updgrp, 0))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug(
|
2020-03-27 12:51:47 +01:00
|
|
|
"u%" PRIu64 ":s%" PRIu64" announcing routes upon policy %s (type %d) change",
|
2015-05-20 03:03:47 +02:00
|
|
|
updgrp->id, subgrp->id,
|
|
|
|
ctx->policy_name, ctx->policy_type);
|
|
|
|
subgroup_announce_route(subgrp);
|
|
|
|
}
|
|
|
|
if (def_changed) {
|
|
|
|
if (bgp_debug_update(NULL, NULL, updgrp, 0))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug(
|
2020-03-27 12:51:47 +01:00
|
|
|
"u%" PRIu64 ":s%" PRIu64" announcing default upon default routemap %s change",
|
2015-05-20 03:03:47 +02:00
|
|
|
updgrp->id, subgrp->id,
|
|
|
|
ctx->policy_name);
|
2022-03-07 09:56:10 +01:00
|
|
|
if (route_map_lookup_by_name(ctx->policy_name)) {
|
|
|
|
/*
|
|
|
|
* When there is change in routemap, this flow
|
|
|
|
* is triggered. the routemap is still present
|
|
|
|
* in lib, hence its a update flow. The flag
|
|
|
|
* needs to be unset.
|
|
|
|
*/
|
|
|
|
UNSET_FLAG(subgrp->sflags,
|
|
|
|
SUBGRP_STATUS_DEFAULT_ORIGINATE);
|
|
|
|
subgroup_default_originate(subgrp, 0);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* This is a explicit withdraw, since the
|
|
|
|
* routemap is not present in routemap lib. need
|
|
|
|
* to pass 1 for withdraw arg.
|
|
|
|
*/
|
|
|
|
subgroup_default_originate(subgrp, 1);
|
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
update_subgroup_set_needs_refresh(subgrp, 0);
|
|
|
|
}
|
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2019-02-19 16:46:52 +01:00
|
|
|
static int update_group_walkcb(struct hash_bucket *bucket, void *arg)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
2019-02-19 16:46:52 +01:00
|
|
|
struct update_group *updgrp = bucket->data;
|
2015-05-20 03:03:47 +02:00
|
|
|
struct updwalk_context *wctx = arg;
|
|
|
|
int ret = (*wctx->cb)(updgrp, wctx->context);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int update_group_periodic_merge_walkcb(struct update_group *updgrp,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
struct update_subgroup *tmp_subgrp;
|
|
|
|
const char *reason = arg;
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP_SAFE (updgrp, subgrp, tmp_subgrp)
|
|
|
|
update_subgroup_check_merge(subgrp, reason);
|
2015-05-20 03:03:47 +02:00
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/********************
|
|
|
|
* PUBLIC FUNCTIONS
|
|
|
|
********************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* trigger function when a policy (route-map/filter-list/prefix-list/
|
|
|
|
* distribute-list etc.) content changes. Go through all the
|
|
|
|
* update groups and process the change.
|
|
|
|
*
|
|
|
|
* bgp: the bgp instance
|
|
|
|
* ptype: the type of policy that got modified, see bgpd.h
|
|
|
|
* pname: name of the policy
|
|
|
|
* route_update: flag to control if an automatic update generation should
|
|
|
|
* occur
|
|
|
|
* start_event: flag that indicates if it's the beginning of the change.
|
|
|
|
* Esp. when the user is changing the content interactively
|
|
|
|
* over multiple statements. Useful to set dirty flag on
|
|
|
|
* update groups.
|
|
|
|
*/
|
2022-04-12 10:32:23 +02:00
|
|
|
void update_group_policy_update(struct bgp *bgp, enum bgp_policy_type ptype,
|
2022-08-05 14:06:00 +02:00
|
|
|
const char *pname, bool route_update,
|
2015-05-20 03:12:17 +02:00
|
|
|
int start_event)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
struct updwalk_context ctx;
|
|
|
|
|
|
|
|
memset(&ctx, 0, sizeof(ctx));
|
|
|
|
ctx.policy_type = ptype;
|
|
|
|
ctx.policy_name = pname;
|
|
|
|
ctx.policy_route_update = route_update;
|
|
|
|
ctx.policy_event_start_flag = start_event;
|
|
|
|
ctx.flags = 0;
|
|
|
|
|
|
|
|
update_group_walk(bgp, updgrp_policy_update_walkcb, &ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_split_peer
|
|
|
|
*
|
|
|
|
* Ensure that the given peer is in a subgroup of its own in the
|
|
|
|
* specified update group.
|
|
|
|
*/
|
|
|
|
void update_subgroup_split_peer(struct peer_af *paf,
|
|
|
|
struct update_group *updgrp)
|
|
|
|
{
|
|
|
|
struct update_subgroup *old_subgrp, *subgrp;
|
|
|
|
uint64_t old_id;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
old_subgrp = paf->subgroup;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!updgrp)
|
|
|
|
updgrp = old_subgrp->update_group;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* If the peer is alone in its subgroup, reuse the existing
|
|
|
|
* subgroup.
|
|
|
|
*/
|
|
|
|
if (old_subgrp->peer_count == 1) {
|
|
|
|
if (updgrp == old_subgrp->update_group)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp = old_subgrp;
|
|
|
|
old_id = old_subgrp->update_group->id;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
|
2015-05-20 03:03:47 +02:00
|
|
|
UPDGRP_PEER_DBG_DIS(old_subgrp->update_group);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
update_group_remove_subgroup(old_subgrp->update_group,
|
|
|
|
old_subgrp);
|
|
|
|
update_group_add_subgroup(updgrp, subgrp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:12:17 +02:00
|
|
|
if (bgp_debug_peer_updout_enabled(paf->peer->host)) {
|
2015-05-20 03:03:47 +02:00
|
|
|
UPDGRP_PEER_DBG_EN(updgrp);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s moved to u%" PRIu64 ":s%" PRIu64,
|
2015-05-20 03:03:47 +02:00
|
|
|
old_id, subgrp->id, paf->peer->host,
|
|
|
|
updgrp->id, subgrp->id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
/*
|
2015-05-20 03:03:47 +02:00
|
|
|
* The state of the subgroup (adj_out, advs, packet queue etc)
|
|
|
|
* is consistent internally, but may not be identical to other
|
|
|
|
* subgroups in the new update group even if the version number
|
|
|
|
* matches up. Make sure a full refresh is done before the
|
|
|
|
* subgroup is merged with another.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
update_subgroup_set_needs_refresh(subgrp, 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
SUBGRP_INCR_STAT(subgrp, updgrp_switch_events);
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/*
|
2015-05-20 03:03:47 +02:00
|
|
|
* Create a new subgroup under the specified update group, and copy
|
2015-05-20 03:12:17 +02:00
|
|
|
* over relevant state to it.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp = update_subgroup_create(updgrp);
|
|
|
|
update_subgroup_inherit_info(subgrp, old_subgrp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->split_from.update_group_id = old_subgrp->update_group->id;
|
|
|
|
subgrp->split_from.subgroup_id = old_subgrp->id;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Copy out relevant state from the old subgroup.
|
|
|
|
*/
|
|
|
|
update_subgroup_copy_adj_out(paf->subgroup, subgrp);
|
|
|
|
update_subgroup_copy_packets(subgrp, paf->next_pkt_to_send);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64" peer %s split and moved into u%" PRIu64":s%" PRIu64,
|
2015-05-20 03:03:47 +02:00
|
|
|
paf->subgroup->update_group->id, paf->subgroup->id,
|
|
|
|
paf->peer->host, updgrp->id, subgrp->id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
SUBGRP_INCR_STAT(paf->subgroup, split_events);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Since queued advs were left behind, this new subgroup needs a
|
|
|
|
* refresh.
|
|
|
|
*/
|
|
|
|
update_subgroup_set_needs_refresh(subgrp, 1);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* Remove peer from old subgroup, and add it to the new one.
|
|
|
|
*/
|
|
|
|
update_subgroup_remove_peer(paf->subgroup, paf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
update_subgroup_add_peer(subgrp, paf, 1);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:29:17 +02:00
|
|
|
void update_bgp_group_init(struct bgp *bgp)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
|
|
|
int afid;
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
AF_FOREACH (afid)
|
2017-09-04 00:57:30 +02:00
|
|
|
bgp->update_groups[afid] =
|
|
|
|
hash_create(updgrp_hash_key_make, updgrp_hash_cmp,
|
|
|
|
"BGP Update Group Hash");
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:29:17 +02:00
|
|
|
void update_bgp_group_free(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
int afid;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
AF_FOREACH (afid) {
|
2015-12-11 22:12:56 +01:00
|
|
|
if (bgp->update_groups[afid]) {
|
|
|
|
hash_free(bgp->update_groups[afid]);
|
|
|
|
bgp->update_groups[afid] = NULL;
|
|
|
|
}
|
2015-05-20 03:29:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
void update_group_show(struct bgp *bgp, afi_t afi, safi_t safi, struct vty *vty,
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
uint64_t subgrp_id, bool uj)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
2015-05-20 03:04:09 +02:00
|
|
|
struct updwalk_context ctx;
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
json_object *json_vrf_obj = NULL;
|
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
memset(&ctx, 0, sizeof(ctx));
|
|
|
|
ctx.vty = vty;
|
|
|
|
ctx.subgrp_id = subgrp_id;
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
ctx.uj = uj;
|
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
ctx.json_updategrps = json_object_new_object();
|
|
|
|
json_vrf_obj = json_object_new_object();
|
|
|
|
}
|
2015-05-20 03:04:09 +02:00
|
|
|
|
|
|
|
update_group_af_walk(bgp, afi, safi, update_group_show_walkcb, &ctx);
|
bgpd: JSON support for show ip bgp vrf all update-group
Ticket:#3229030
Testing Done: UT
Changes:
- JSON support for the update group command.
Testing:
torc-11# show ip bgp vrf all ipv6 update-groups json
torc-12# show bgp vrf all update-groups json
{
"default":{
"2":{
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"afi":"IPv6",
"safi":"unicast",
"outRouteMap":"MY_ORIGIN_ASPATH_ONLY",
"minRouteAdvInt":0,
"subGroup":[
{
"subGroupId":2,
"groupCreateTime":{
"epoch":1669225617,
"epochString":"Wed Nov 23 17:46:57 2022\n"
},
"statistics":{
"joinEvents":2,
"pruneEvents":0,
"mergeEvents":0,
"splitEvents":0,
"switchEvents":0,
"peerRefreshEvents":0,
"mergeCheckEvents":2
},
"coalesceTime":1100,
"version":12,
"packetQueueInfo":{
"qeueueLen":0,
"queuedTotal":1,
"queueHwmLen":1,
"totalEnqueued":1
},
"adjListCount":1,
"needsRefresh":false,
"peers":[
"uplink_1",
"uplink_2"
]
}
]
}
}
}
{
"sym_3":{
}
}
{
"sym_5":{
}
}
{
"sym_2":{
}
}
{
"sym_4":{
}
}
{
"sym_1":{
}
}
Co-authored-by: Chirag Shah <chirag@nvidia.com>
Signed-off-by: Ashwini Reddy <ashred@nvidia.com>
2022-11-07 20:53:48 +01:00
|
|
|
|
|
|
|
if (uj) {
|
|
|
|
const char *vname;
|
|
|
|
|
|
|
|
if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)
|
|
|
|
vname = VRF_DEFAULT_NAME;
|
|
|
|
else
|
|
|
|
vname = bgp->name;
|
|
|
|
json_object_object_add(json_vrf_obj, vname,
|
|
|
|
ctx.json_updategrps);
|
|
|
|
vty_json(vty, json_vrf_obj);
|
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_group_show_stats
|
|
|
|
*
|
|
|
|
* Show global statistics about update groups.
|
|
|
|
*/
|
|
|
|
void update_group_show_stats(struct bgp *bgp, struct vty *vty)
|
|
|
|
{
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Update groups created: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.updgrps_created);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Update groups deleted: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.updgrps_deleted);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Update subgroups created: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.subgrps_created);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Update subgroups deleted: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.subgrps_deleted);
|
|
|
|
vty_out(vty, "Join events: %u\n", bgp->update_group_stats.join_events);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Prune events: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.prune_events);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Merge events: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.merge_events);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Split events: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.split_events);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Update group switch events: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.updgrp_switch_events);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Peer route refreshes combined: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.peer_refreshes_combined);
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Merge checks triggered: %u\n",
|
2017-06-21 05:10:57 +02:00
|
|
|
bgp->update_group_stats.merge_checks_triggered);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_group_adjust_peer
|
|
|
|
*/
|
|
|
|
void update_group_adjust_peer(struct peer_af *paf)
|
|
|
|
{
|
|
|
|
struct update_group *updgrp;
|
|
|
|
struct update_subgroup *subgrp, *old_subgrp;
|
|
|
|
struct peer *peer;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!paf)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
peer = PAF_PEER(paf);
|
|
|
|
if (!peer_established(peer)) {
|
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!peer->afc_nego[paf->afi][paf->safi]) {
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
updgrp = update_group_find(paf);
|
|
|
|
if (!updgrp) {
|
|
|
|
updgrp = update_group_create(paf);
|
|
|
|
if (!updgrp) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_UPDGRP_CREATE,
|
2018-09-13 21:38:57 +02:00
|
|
|
"couldn't create update group for peer %s",
|
|
|
|
paf->peer->host);
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
old_subgrp = paf->subgroup;
|
|
|
|
|
|
|
|
if (old_subgrp) {
|
|
|
|
|
2017-07-17 14:03:14 +02:00
|
|
|
/*
|
2015-05-20 03:03:47 +02:00
|
|
|
* If the update group of the peer is unchanged, the peer can
|
2017-07-17 14:03:14 +02:00
|
|
|
* stay
|
2015-05-20 03:03:47 +02:00
|
|
|
* in its existing subgroup and we're done.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
if (old_subgrp->update_group == updgrp)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
2015-05-20 03:03:47 +02:00
|
|
|
* The peer is switching between update groups. Put it in its
|
|
|
|
* own subgroup under the new update group.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
update_subgroup_split_peer(paf, updgrp);
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp = update_subgroup_find(updgrp, paf);
|
|
|
|
if (!subgrp) {
|
|
|
|
subgrp = update_subgroup_create(updgrp);
|
|
|
|
if (!subgrp)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:03:47 +02:00
|
|
|
|
|
|
|
update_subgroup_add_peer(subgrp, paf, 1);
|
|
|
|
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
|
2015-05-20 03:12:17 +02:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64 " add peer %s", updgrp->id,
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->id, paf->peer->host);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int update_group_adjust_soloness(struct peer *peer, int set)
|
|
|
|
{
|
|
|
|
struct peer_group *group;
|
|
|
|
struct listnode *node, *nnode;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
|
|
|
|
peer_lonesoul_or_not(peer, set);
|
2021-06-07 15:39:10 +02:00
|
|
|
if (peer_established(peer))
|
2015-05-20 03:03:47 +02:00
|
|
|
bgp_announce_route_all(peer);
|
|
|
|
} else {
|
|
|
|
group = peer->group;
|
|
|
|
for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
|
|
|
|
peer_lonesoul_or_not(peer, set);
|
2021-06-07 15:39:10 +02:00
|
|
|
if (peer_established(peer))
|
2015-05-20 03:03:47 +02:00
|
|
|
bgp_announce_route_all(peer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* update_subgroup_rib
|
|
|
|
*/
|
|
|
|
struct bgp_table *update_subgroup_rib(struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
struct bgp *bgp;
|
|
|
|
|
|
|
|
bgp = SUBGRP_INST(subgrp);
|
|
|
|
if (!bgp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return bgp->rib[SUBGRP_AFI(subgrp)][SUBGRP_SAFI(subgrp)];
|
|
|
|
}
|
|
|
|
|
|
|
|
void update_group_af_walk(struct bgp *bgp, afi_t afi, safi_t safi,
|
|
|
|
updgrp_walkcb cb, void *ctx)
|
|
|
|
{
|
|
|
|
struct updwalk_context wctx;
|
|
|
|
int afid;
|
|
|
|
|
|
|
|
if (!bgp)
|
|
|
|
return;
|
|
|
|
afid = afindex(afi, safi);
|
|
|
|
if (afid >= BGP_AF_MAX)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&wctx, 0, sizeof(wctx));
|
|
|
|
wctx.cb = cb;
|
|
|
|
wctx.context = ctx;
|
2015-05-20 03:04:20 +02:00
|
|
|
|
|
|
|
if (bgp->update_groups[afid])
|
|
|
|
hash_walk(bgp->update_groups[afid], update_group_walkcb, &wctx);
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void update_group_walk(struct bgp *bgp, updgrp_walkcb cb, void *ctx)
|
|
|
|
{
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
FOREACH_AFI_SAFI (afi, safi) {
|
2015-05-20 03:03:47 +02:00
|
|
|
update_group_af_walk(bgp, afi, safi, cb, ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void update_group_periodic_merge(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
char reason[] = "periodic merge check";
|
|
|
|
|
|
|
|
update_group_walk(bgp, update_group_periodic_merge_walkcb,
|
|
|
|
(void *)reason);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
static int
|
|
|
|
update_group_default_originate_route_map_walkcb(struct update_group *updgrp,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
struct peer *peer;
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) {
|
2015-05-20 03:04:20 +02:00
|
|
|
peer = SUBGRP_PEER(subgrp);
|
|
|
|
afi = SUBGRP_AFI(subgrp);
|
|
|
|
safi = SUBGRP_SAFI(subgrp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
if (peer->default_rmap[afi][safi].name) {
|
2022-03-07 09:56:10 +01:00
|
|
|
/*
|
|
|
|
* When there is change in routemap this flow will
|
|
|
|
* be triggered. We need to unset the Flag to ensure
|
|
|
|
* the update flow gets triggered.
|
|
|
|
*/
|
|
|
|
UNSET_FLAG(subgrp->sflags,
|
|
|
|
SUBGRP_STATUS_DEFAULT_ORIGINATE);
|
2015-05-20 03:04:20 +02:00
|
|
|
subgroup_default_originate(subgrp, 0);
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
void update_group_refresh_default_originate_route_map(struct event *thread)
|
2015-05-20 03:04:20 +02:00
|
|
|
{
|
|
|
|
struct bgp *bgp;
|
|
|
|
char reason[] = "refresh default-originate route-map";
|
|
|
|
|
|
|
|
bgp = THREAD_ARG(thread);
|
|
|
|
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
|
|
|
|
reason);
|
2022-06-03 16:59:31 +02:00
|
|
|
THREAD_OFF(bgp->t_rmap_def_originate_eval);
|
2015-05-20 03:04:20 +02:00
|
|
|
bgp_unlock(bgp);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
/*
|
|
|
|
* peer_af_announce_route
|
|
|
|
*
|
|
|
|
* Refreshes routes out to a peer_af immediately.
|
|
|
|
*
|
2019-07-01 19:26:05 +02:00
|
|
|
* If the combine parameter is true, then this function will try to
|
2015-05-20 03:03:47 +02:00
|
|
|
* gather other peers in the subgroup for which a route announcement
|
|
|
|
* is pending and efficently announce routes to all of them.
|
|
|
|
*
|
|
|
|
* For now, the 'combine' option has an effect only if all peers in
|
|
|
|
* the subgroup have a route announcement pending.
|
|
|
|
*/
|
|
|
|
void peer_af_announce_route(struct peer_af *paf, int combine)
|
|
|
|
{
|
|
|
|
struct update_subgroup *subgrp;
|
|
|
|
struct peer_af *cur_paf;
|
|
|
|
int all_pending;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp = paf->subgroup;
|
|
|
|
all_pending = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (combine) {
|
|
|
|
/*
|
|
|
|
* If there are other peers in the old subgroup that also need
|
|
|
|
* routes to be announced, pull them into the peer's new
|
|
|
|
* subgroup.
|
|
|
|
* Combine route announcement with other peers if possible.
|
|
|
|
*
|
|
|
|
* For now, we combine only if all peers in the subgroup have an
|
|
|
|
* announcement pending.
|
|
|
|
*/
|
|
|
|
all_pending = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
|
2015-05-20 03:03:47 +02:00
|
|
|
if (cur_paf == paf)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
if (cur_paf->t_announce_route)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
all_pending = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
2015-05-20 03:03:47 +02:00
|
|
|
* Announce to the peer alone if we were not asked to combine peers,
|
|
|
|
* or if some peers don't have a route annoucement pending.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-05-20 03:03:47 +02:00
|
|
|
if (!combine || !all_pending) {
|
|
|
|
update_subgroup_split_peer(paf, NULL);
|
2019-06-12 12:24:37 +02:00
|
|
|
subgrp = paf->subgroup;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-06-12 12:24:37 +02:00
|
|
|
assert(subgrp && subgrp->update_group);
|
2015-05-20 03:03:47 +02:00
|
|
|
if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64" %s announcing routes",
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->update_group->id, subgrp->id,
|
|
|
|
paf->peer->host);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
subgroup_announce_route(paf->subgroup);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We will announce routes the entire subgroup.
|
|
|
|
*
|
|
|
|
* First stop refresh timers on all the other peers.
|
|
|
|
*/
|
2017-09-15 17:47:35 +02:00
|
|
|
SUBGRP_FOREACH_PEER (subgrp, cur_paf) {
|
2015-05-20 03:03:47 +02:00
|
|
|
if (cur_paf == paf)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bgp_stop_announce_route_timer(cur_paf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bgp_debug_update(paf->peer, NULL, subgrp->update_group, 0))
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_debug("u%" PRIu64 ":s%" PRIu64" announcing routes to %s, combined into %d peers",
|
2015-05-20 03:03:47 +02:00
|
|
|
subgrp->update_group->id, subgrp->id,
|
|
|
|
paf->peer->host, subgrp->peer_count);
|
|
|
|
|
|
|
|
subgroup_announce_route(subgrp);
|
|
|
|
|
|
|
|
SUBGRP_INCR_STAT_BY(subgrp, peer_refreshes_combined,
|
|
|
|
subgrp->peer_count - 1);
|
|
|
|
}
|
|
|
|
|
2017-06-12 22:20:50 +02:00
|
|
|
void subgroup_trigger_write(struct update_subgroup *subgrp)
|
|
|
|
{
|
|
|
|
struct peer_af *paf;
|
|
|
|
|
2017-09-08 17:51:12 +02:00
|
|
|
/*
|
|
|
|
* For each peer in the subgroup, schedule a job to pull packets from
|
|
|
|
* the subgroup output queue into their own output queue. This action
|
|
|
|
* will trigger a write job on the I/O thread.
|
|
|
|
*/
|
2017-06-12 22:20:50 +02:00
|
|
|
SUBGRP_FOREACH_PEER (subgrp, paf)
|
2021-06-07 15:39:10 +02:00
|
|
|
if (peer_established(paf->peer))
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer_msec(
|
2017-06-12 22:20:50 +02:00
|
|
|
bm->master, bgp_generate_updgrp_packets,
|
|
|
|
paf->peer, 0,
|
|
|
|
&paf->peer->t_generate_updgrp_packets);
|
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
int update_group_clear_update_dbg(struct update_group *updgrp, void *arg)
|
|
|
|
{
|
|
|
|
UPDGRP_PEER_DBG_OFF(updgrp);
|
|
|
|
return UPDWALK_CONTINUE;
|
|
|
|
}
|
BGP: support for addpath TX
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Vivek Venkataraman <vivek@cumulusnetworks.com
Ticket: CM-8014
This implements addpath TX with the first feature to use it
being "neighbor x.x.x.x addpath-tx-all-paths".
One change to show output is 'show ip bgp x.x.x.x'. If no addpath-tx
features are configured for any peers then everything looks the same
as it is today in that "Advertised to" is at the top and refers to
which peers the bestpath was advertise to.
root@superm-redxp-05[quagga-stash5]# vtysh -c 'show ip bgp 1.1.1.1'
BGP routing table entry for 1.1.1.1/32
Paths: (6 available, best #6, table Default-IP-Routing-Table)
Advertised to non peer-group peers:
r1(10.0.0.1) r2(10.0.0.2) r3(10.0.0.3) r4(10.0.0.4) r5(10.0.0.5) r6(10.0.0.6) r8(10.0.0.8)
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r2(10.0.0.2) (10.0.0.2)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 8
Last update: Fri Oct 30 18:26:44 2015
[snip]
but once you enable an addpath feature we must display "Advertised to" on a path-by-path basis:
superm-redxp-05# show ip bgp 1.1.1.1/32
BGP routing table entry for 1.1.1.1/32
Paths: (6 available, best #6, table Default-IP-Routing-Table)
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r2(10.0.0.2) (10.0.0.2)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 8
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:44 2015
Local, (Received from a RR-client)
34.34.34.34 (metric 20) from r3(10.0.0.3) (10.0.0.3)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 7
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
56.56.56.56 (metric 20) from r6(10.0.0.6) (10.0.0.6)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 6
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
56.56.56.56 (metric 20) from r5(10.0.0.5) (10.0.0.5)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 5
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
34.34.34.34 (metric 20) from r4(10.0.0.4) (10.0.0.4)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 4
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r1(10.0.0.1) (10.0.0.1)
Origin IGP, metric 0, localpref 100, valid, internal, best
AddPath ID: RX 0, TX 3
Advertised to: r1(10.0.0.1) r2(10.0.0.2) r3(10.0.0.3) r4(10.0.0.4) r5(10.0.0.5) r6(10.0.0.6) r8(10.0.0.8)
Last update: Fri Oct 30 18:26:34 2015
superm-redxp-05#
2015-11-05 18:29:43 +01:00
|
|
|
|
2015-11-06 17:34:41 +01:00
|
|
|
/* Return true if we should addpath encode NLRI to this peer */
|
2022-01-27 08:51:59 +01:00
|
|
|
bool bgp_addpath_encode_tx(struct peer *peer, afi_t afi, safi_t safi)
|
BGP: support for addpath TX
Signed-off-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Vivek Venkataraman <vivek@cumulusnetworks.com
Ticket: CM-8014
This implements addpath TX with the first feature to use it
being "neighbor x.x.x.x addpath-tx-all-paths".
One change to show output is 'show ip bgp x.x.x.x'. If no addpath-tx
features are configured for any peers then everything looks the same
as it is today in that "Advertised to" is at the top and refers to
which peers the bestpath was advertise to.
root@superm-redxp-05[quagga-stash5]# vtysh -c 'show ip bgp 1.1.1.1'
BGP routing table entry for 1.1.1.1/32
Paths: (6 available, best #6, table Default-IP-Routing-Table)
Advertised to non peer-group peers:
r1(10.0.0.1) r2(10.0.0.2) r3(10.0.0.3) r4(10.0.0.4) r5(10.0.0.5) r6(10.0.0.6) r8(10.0.0.8)
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r2(10.0.0.2) (10.0.0.2)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 8
Last update: Fri Oct 30 18:26:44 2015
[snip]
but once you enable an addpath feature we must display "Advertised to" on a path-by-path basis:
superm-redxp-05# show ip bgp 1.1.1.1/32
BGP routing table entry for 1.1.1.1/32
Paths: (6 available, best #6, table Default-IP-Routing-Table)
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r2(10.0.0.2) (10.0.0.2)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 8
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:44 2015
Local, (Received from a RR-client)
34.34.34.34 (metric 20) from r3(10.0.0.3) (10.0.0.3)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 7
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
56.56.56.56 (metric 20) from r6(10.0.0.6) (10.0.0.6)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 6
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
56.56.56.56 (metric 20) from r5(10.0.0.5) (10.0.0.5)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 5
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
34.34.34.34 (metric 20) from r4(10.0.0.4) (10.0.0.4)
Origin IGP, metric 0, localpref 100, valid, internal
AddPath ID: RX 0, TX 4
Advertised to: r8(10.0.0.8)
Last update: Fri Oct 30 18:26:39 2015
Local, (Received from a RR-client)
12.12.12.12 (metric 20) from r1(10.0.0.1) (10.0.0.1)
Origin IGP, metric 0, localpref 100, valid, internal, best
AddPath ID: RX 0, TX 3
Advertised to: r1(10.0.0.1) r2(10.0.0.2) r3(10.0.0.3) r4(10.0.0.4) r5(10.0.0.5) r6(10.0.0.6) r8(10.0.0.8)
Last update: Fri Oct 30 18:26:34 2015
superm-redxp-05#
2015-11-05 18:29:43 +01:00
|
|
|
{
|
|
|
|
return (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ADDPATH_AF_TX_ADV)
|
|
|
|
&& CHECK_FLAG(peer->af_cap[afi][safi],
|
|
|
|
PEER_CAP_ADDPATH_AF_RX_RCV));
|
|
|
|
}
|
2022-01-27 09:12:59 +01:00
|
|
|
|
2022-12-06 17:28:17 +01:00
|
|
|
bool bgp_addpath_capable(struct bgp_path_info *bpi, struct peer *peer,
|
|
|
|
afi_t afi, safi_t safi)
|
|
|
|
{
|
|
|
|
return (bgp_addpath_tx_path(peer->addpath_type[afi][safi], bpi) ||
|
|
|
|
(safi == SAFI_LABELED_UNICAST &&
|
|
|
|
bgp_addpath_tx_path(peer->addpath_type[afi][SAFI_UNICAST],
|
|
|
|
bpi)));
|
|
|
|
}
|
|
|
|
|
2022-01-27 09:12:59 +01:00
|
|
|
bool bgp_check_selected(struct bgp_path_info *bpi, struct peer *peer,
|
|
|
|
bool addpath_capable, afi_t afi, safi_t safi)
|
|
|
|
{
|
|
|
|
return (CHECK_FLAG(bpi->flags, BGP_PATH_SELECTED) ||
|
2022-12-06 17:28:17 +01:00
|
|
|
(addpath_capable && bgp_addpath_capable(bpi, peer, afi, safi)));
|
2022-01-27 09:12:59 +01:00
|
|
|
}
|