2002-12-13 21:15:29 +01:00
|
|
|
/* BGP routing information base
|
2017-05-13 10:25:29 +02:00
|
|
|
* Copyright (C) 1996, 97, 98, 2000 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-05-23 16:19:54 +02:00
|
|
|
#ifndef _QUAGGA_BGP_ROUTE_H
|
|
|
|
#define _QUAGGA_BGP_ROUTE_H
|
|
|
|
|
2019-05-09 11:12:14 +02:00
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include "hook.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "queue.h"
|
2017-08-21 03:10:50 +02:00
|
|
|
#include "nexthop.h"
|
2005-06-01 Paul Jakma <paul.jakma@sun.com>
* bgpd/(general) refcount struct peer and bgp_info, hence allowing us
add work_queues for bgp_process.
* bgpd/bgp_route.h: (struct bgp_info) Add 'lock' field for refcount.
Add bgp_info_{lock,unlock} helper functions.
Add bgp_info_{add,delete} helpers, to remove need for
users managing locking/freeing of bgp_info and bgp_node's.
* bgpd/bgp_table.h: (struct bgp_node) Add a flags field, and
BGP_NODE_PROCESS_SCHEDULED to merge redundant processing of
nodes.
* bgpd/bgp_fsm.h: Make the ON/OFF/ADD/REMOVE macros lock and unlock
peer reference as appropriate.
* bgpd/bgp_damp.c: Remove its internal prototypes for
bgp_info_delete/free. Just use bgp_info_delete.
* bgpd/bgpd.h: (struct bgp_master) Add work_queue pointers.
(struct peer) Add reference count 'lock'
(peer_lock,peer_unlock) New helpers to take/release reference
on struct peer.
* bgpd/bgp_advertise.c: (general) Add peer and bgp_info refcounting
and balance how references are taken and released.
(bgp_advertise_free) release bgp_info reference, if appropriate
(bgp_adj_out_free) unlock peer
(bgp_advertise_clean) leave the adv references alone, or else
call bgp_advertise_free cant unlock them.
(bgp_adj_out_set) lock the peer on new adj's, leave the reference
alone otherwise. lock the new bgp_info reference.
(bgp_adj_in_set) lock the peer reference
(bgp_adj_in_remove) and unlock it here
(bgp_sync_delete) make hash_free on peer conditional, just in
case.
* bgpd/bgp_fsm.c: (general) document that the timers depend on
bgp_event to release a peer reference.
(bgp_fsm_change_status) moved up the file, unchanged.
(bgp_stop) Decrement peer lock as many times as cancel_event
canceled - shouldnt be needed but just in case.
stream_fifo_clean of obuf made conditional, just in case.
(bgp_event) always unlock the peer, regardless of return value
of bgp_fsm_change_status.
* bgpd/bgp_packet.c: (general) change several bgp_stop's to BGP_EVENT's.
(bgp_read) Add a mysterious extra peer_unlock for ACCEPT_PEERs
along with a comment on it.
* bgpd/bgp_route.c: (general) Add refcounting of bgp_info, cleanup
some of the resource management around bgp_info. Refcount peer.
Add workqueues for bgp_process and clear_table.
(bgp_info_new) make static
(bgp_info_free) Ditto, and unlock the peer reference.
(bgp_info_lock,bgp_info_unlock) new exported functions
(bgp_info_add) Add a bgp_info to a bgp_node in correct fashion,
taking care of reference counts.
(bgp_info_delete) do the opposite of bgp_info_add.
(bgp_process_rsclient) Converted into a work_queue work function.
(bgp_process_main) ditto.
(bgp_processq_del) process work queue item deconstructor
(bgp_process_queue_init) process work queue init
(bgp_process) call init function if required, set up queue item
and add to queue, rather than calling process functions directly.
(bgp_rib_remove) let bgp_info_delete manage bgp_info refcounts
(bgp_rib_withdraw) ditto
(bgp_update_rsclient) let bgp_info_add manage refcounts
(bgp_update_main) ditto
(bgp_clear_route_node) clear_node_queue work function, does
per-node aspects of what bgp_clear_route_table did previously
(bgp_clear_node_queue_del) clear_node_queue item delete function
(bgp_clear_node_complete) clear_node_queue completion function,
it unplugs the process queues, which have to be blocked while
clear_node_queue is being processed to prevent a race.
(bgp_clear_node_queue_init) init function for clear_node_queue
work queues
(bgp_clear_route_table) Sets up items onto a workqueue now, rather
than clearing each node directly. Plugs both process queues to
avoid potential race.
(bgp_static_withdraw_rsclient) let bgp_info_{add,delete} manage
bgp_info refcounts.
(bgp_static_update_rsclient) ditto
(bgp_static_update_main) ditto
(bgp_static_update_vpnv4) ditto, remove unneeded cast.
(bgp_static_withdraw) see bgp_static_withdraw_rsclient
(bgp_static_withdraw_vpnv4) ditto
(bgp_aggregate_{route,add,delete}) ditto
(bgp_redistribute_{add,delete,withdraw}) ditto
* bgpd/bgp_vty.c: (peer_rsclient_set_vty) lock rsclient list peer
reference
(peer_rsclient_unset_vty) ditto, but unlock same reference
* bgpd/bgpd.c: (peer_free) handle frees of info to be kept for lifetime
of struct peer.
(peer_lock,peer_unlock) peer refcount helpers
(peer_new) add initial refcounts
(peer_create,peer_create_accept) lock peer as appropriate
(peer_delete) unlock as appropriate, move out some free's to
peer_free.
(peer_group_bind,peer_group_unbind) peer refcounting as
appropriate.
(bgp_create) check CALLOC return value.
(bgp_terminate) free workqueues too.
* lib/memtypes.c: Add MTYPE_BGP_PROCESS_QUEUE and
MTYPE_BGP_CLEAR_NODE_QUEUE
2005-06-01 13:17:05 +02:00
|
|
|
#include "bgp_table.h"
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
#include "bgp_addpath_types.h"
|
2005-06-01 Paul Jakma <paul.jakma@sun.com>
* bgpd/(general) refcount struct peer and bgp_info, hence allowing us
add work_queues for bgp_process.
* bgpd/bgp_route.h: (struct bgp_info) Add 'lock' field for refcount.
Add bgp_info_{lock,unlock} helper functions.
Add bgp_info_{add,delete} helpers, to remove need for
users managing locking/freeing of bgp_info and bgp_node's.
* bgpd/bgp_table.h: (struct bgp_node) Add a flags field, and
BGP_NODE_PROCESS_SCHEDULED to merge redundant processing of
nodes.
* bgpd/bgp_fsm.h: Make the ON/OFF/ADD/REMOVE macros lock and unlock
peer reference as appropriate.
* bgpd/bgp_damp.c: Remove its internal prototypes for
bgp_info_delete/free. Just use bgp_info_delete.
* bgpd/bgpd.h: (struct bgp_master) Add work_queue pointers.
(struct peer) Add reference count 'lock'
(peer_lock,peer_unlock) New helpers to take/release reference
on struct peer.
* bgpd/bgp_advertise.c: (general) Add peer and bgp_info refcounting
and balance how references are taken and released.
(bgp_advertise_free) release bgp_info reference, if appropriate
(bgp_adj_out_free) unlock peer
(bgp_advertise_clean) leave the adv references alone, or else
call bgp_advertise_free cant unlock them.
(bgp_adj_out_set) lock the peer on new adj's, leave the reference
alone otherwise. lock the new bgp_info reference.
(bgp_adj_in_set) lock the peer reference
(bgp_adj_in_remove) and unlock it here
(bgp_sync_delete) make hash_free on peer conditional, just in
case.
* bgpd/bgp_fsm.c: (general) document that the timers depend on
bgp_event to release a peer reference.
(bgp_fsm_change_status) moved up the file, unchanged.
(bgp_stop) Decrement peer lock as many times as cancel_event
canceled - shouldnt be needed but just in case.
stream_fifo_clean of obuf made conditional, just in case.
(bgp_event) always unlock the peer, regardless of return value
of bgp_fsm_change_status.
* bgpd/bgp_packet.c: (general) change several bgp_stop's to BGP_EVENT's.
(bgp_read) Add a mysterious extra peer_unlock for ACCEPT_PEERs
along with a comment on it.
* bgpd/bgp_route.c: (general) Add refcounting of bgp_info, cleanup
some of the resource management around bgp_info. Refcount peer.
Add workqueues for bgp_process and clear_table.
(bgp_info_new) make static
(bgp_info_free) Ditto, and unlock the peer reference.
(bgp_info_lock,bgp_info_unlock) new exported functions
(bgp_info_add) Add a bgp_info to a bgp_node in correct fashion,
taking care of reference counts.
(bgp_info_delete) do the opposite of bgp_info_add.
(bgp_process_rsclient) Converted into a work_queue work function.
(bgp_process_main) ditto.
(bgp_processq_del) process work queue item deconstructor
(bgp_process_queue_init) process work queue init
(bgp_process) call init function if required, set up queue item
and add to queue, rather than calling process functions directly.
(bgp_rib_remove) let bgp_info_delete manage bgp_info refcounts
(bgp_rib_withdraw) ditto
(bgp_update_rsclient) let bgp_info_add manage refcounts
(bgp_update_main) ditto
(bgp_clear_route_node) clear_node_queue work function, does
per-node aspects of what bgp_clear_route_table did previously
(bgp_clear_node_queue_del) clear_node_queue item delete function
(bgp_clear_node_complete) clear_node_queue completion function,
it unplugs the process queues, which have to be blocked while
clear_node_queue is being processed to prevent a race.
(bgp_clear_node_queue_init) init function for clear_node_queue
work queues
(bgp_clear_route_table) Sets up items onto a workqueue now, rather
than clearing each node directly. Plugs both process queues to
avoid potential race.
(bgp_static_withdraw_rsclient) let bgp_info_{add,delete} manage
bgp_info refcounts.
(bgp_static_update_rsclient) ditto
(bgp_static_update_main) ditto
(bgp_static_update_vpnv4) ditto, remove unneeded cast.
(bgp_static_withdraw) see bgp_static_withdraw_rsclient
(bgp_static_withdraw_vpnv4) ditto
(bgp_aggregate_{route,add,delete}) ditto
(bgp_redistribute_{add,delete,withdraw}) ditto
* bgpd/bgp_vty.c: (peer_rsclient_set_vty) lock rsclient list peer
reference
(peer_rsclient_unset_vty) ditto, but unlock same reference
* bgpd/bgpd.c: (peer_free) handle frees of info to be kept for lifetime
of struct peer.
(peer_lock,peer_unlock) peer refcount helpers
(peer_new) add initial refcounts
(peer_create,peer_create_accept) lock peer as appropriate
(peer_delete) unlock as appropriate, move out some free's to
peer_free.
(peer_group_bind,peer_group_unbind) peer refcounting as
appropriate.
(bgp_create) check CALLOC return value.
(bgp_terminate) free workqueues too.
* lib/memtypes.c: Add MTYPE_BGP_PROCESS_QUEUE and
MTYPE_BGP_CLEAR_NODE_QUEUE
2005-06-01 13:17:05 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
struct bgp_nexthop_cache;
|
2016-09-05 11:07:25 +02:00
|
|
|
struct bgp_route_evpn;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2017-01-20 11:44:18 +01:00
|
|
|
enum bgp_show_type {
|
|
|
|
bgp_show_type_normal,
|
|
|
|
bgp_show_type_regexp,
|
|
|
|
bgp_show_type_prefix_list,
|
|
|
|
bgp_show_type_filter_list,
|
|
|
|
bgp_show_type_route_map,
|
|
|
|
bgp_show_type_neighbor,
|
|
|
|
bgp_show_type_cidr_only,
|
|
|
|
bgp_show_type_prefix_longer,
|
|
|
|
bgp_show_type_community_all,
|
|
|
|
bgp_show_type_community,
|
|
|
|
bgp_show_type_community_exact,
|
|
|
|
bgp_show_type_community_list,
|
|
|
|
bgp_show_type_community_list_exact,
|
2017-01-27 17:44:42 +01:00
|
|
|
bgp_show_type_lcommunity_all,
|
|
|
|
bgp_show_type_lcommunity,
|
2019-05-06 12:59:19 +02:00
|
|
|
bgp_show_type_lcommunity_exact,
|
2017-01-27 17:44:42 +01:00
|
|
|
bgp_show_type_lcommunity_list,
|
2019-05-06 12:59:19 +02:00
|
|
|
bgp_show_type_lcommunity_list_exact,
|
2017-01-20 11:44:18 +01:00
|
|
|
bgp_show_type_flap_statistics,
|
|
|
|
bgp_show_type_flap_neighbor,
|
|
|
|
bgp_show_type_dampend_paths,
|
2018-02-19 17:17:41 +01:00
|
|
|
bgp_show_type_damp_neighbor,
|
|
|
|
bgp_show_type_detail,
|
2017-01-20 11:44:18 +01:00
|
|
|
};
|
|
|
|
|
2018-05-16 19:17:42 +02:00
|
|
|
enum bgp_show_adj_route_type {
|
|
|
|
bgp_show_adj_route_advertised,
|
|
|
|
bgp_show_adj_route_received,
|
|
|
|
bgp_show_adj_route_filtered,
|
2020-07-31 18:12:37 +02:00
|
|
|
bgp_show_adj_route_bestpath,
|
2018-05-16 19:17:42 +02:00
|
|
|
};
|
|
|
|
|
2017-01-20 11:44:18 +01:00
|
|
|
|
2015-05-20 03:03:47 +02:00
|
|
|
#define BGP_SHOW_SCODE_HEADER \
|
2018-04-09 22:28:11 +02:00
|
|
|
"Status codes: s suppressed, d damped, " \
|
2017-07-13 18:50:29 +02:00
|
|
|
"h history, * valid, > best, = multipath,\n" \
|
2018-04-09 22:28:11 +02:00
|
|
|
" i internal, r RIB-failure, S Stale, R Removed\n"
|
|
|
|
#define BGP_SHOW_OCODE_HEADER "Origin codes: i - IGP, e - EGP, ? - incomplete\n\n"
|
|
|
|
#define BGP_SHOW_NCODE_HEADER "Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self\n"
|
2017-07-13 18:50:29 +02:00
|
|
|
#define BGP_SHOW_HEADER " Network Next Hop Metric LocPrf Weight Path\n"
|
2020-07-23 11:20:52 +02:00
|
|
|
#define BGP_SHOW_HEADER_WIDE " Network Next Hop Metric LocPrf Weight Path\n"
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2017-11-21 11:42:05 +01:00
|
|
|
/* Maximum number of labels we can process or send with a prefix. We
|
|
|
|
* really do only 1 for MPLS (BGP-LU) but we can do 2 for EVPN-VxLAN.
|
|
|
|
*/
|
|
|
|
#define BGP_MAX_LABELS 2
|
|
|
|
|
2020-01-09 04:00:43 +01:00
|
|
|
/* Maximum number of sids we can process or send with a prefix. */
|
|
|
|
#define BGP_MAX_SIDS 6
|
|
|
|
|
2019-04-15 22:53:20 +02:00
|
|
|
/* Error codes for handling NLRI */
|
|
|
|
#define BGP_NLRI_PARSE_OK 0
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_PREFIX_OVERFLOW -1
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW -2
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH -3
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_PACKET_LENGTH -4
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_LABEL_LENGTH -5
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_MISSING_TYPE -6
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE2_SIZE -7
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE3_SIZE -8
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE4_SIZE -9
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE5_SIZE -10
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_IPV6_NOT_SUPPORTED -11
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_NLRI_SIZELIMIT -12
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_BAD_FORMAT -13
|
|
|
|
#define BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY -14
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE1_SIZE -15
|
2019-04-15 22:53:20 +02:00
|
|
|
#define BGP_NLRI_PARSE_ERROR -32
|
|
|
|
|
2020-05-28 20:55:55 +02:00
|
|
|
/* MAC-IP/type-2 path_info in the VNI routing table is linked to the
|
2020-05-09 04:36:47 +02:00
|
|
|
* destination ES
|
|
|
|
*/
|
|
|
|
struct bgp_path_es_info {
|
|
|
|
/* back pointer to the route */
|
|
|
|
struct bgp_path_info *pi;
|
|
|
|
vni_t vni;
|
|
|
|
/* destination ES */
|
|
|
|
struct bgp_evpn_es *es;
|
|
|
|
/* memory used for linking the path to the destination ES */
|
|
|
|
struct listnode es_listnode;
|
|
|
|
};
|
|
|
|
|
2018-10-02 22:41:30 +02:00
|
|
|
/* Ancillary information to struct bgp_path_info,
|
2007-05-04 22:15:47 +02:00
|
|
|
* used for uncommonly used data (aggregation, MPLS, etc.)
|
|
|
|
* and lazily allocated to save memory.
|
|
|
|
*/
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_extra {
|
2007-05-04 22:15:47 +02:00
|
|
|
/* Pointer to dampening structure. */
|
|
|
|
struct bgp_damp_info *damp_info;
|
|
|
|
|
2020-10-22 02:22:04 +02:00
|
|
|
/** List of aggregations that suppress this path. */
|
|
|
|
struct list *aggr_suppressors;
|
2007-05-04 22:15:47 +02:00
|
|
|
|
|
|
|
/* Nexthop reachability check. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t igpmetric;
|
2007-05-04 22:15:47 +02:00
|
|
|
|
2017-11-21 11:42:05 +01:00
|
|
|
/* MPLS label(s) - VNI(s) for EVPN-VxLAN */
|
|
|
|
mpls_label_t label[BGP_MAX_LABELS];
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t num_labels;
|
2017-10-27 23:15:45 +02:00
|
|
|
|
2019-08-09 03:58:03 +02:00
|
|
|
/* af specific flags */
|
|
|
|
uint16_t af_flags;
|
|
|
|
#define BGP_EVPN_MACIP_TYPE_SVI_IP (1 << 0)
|
|
|
|
|
2020-01-09 04:00:43 +01:00
|
|
|
/* SRv6 SID(s) for SRv6-VPN */
|
|
|
|
struct in6_addr sid[BGP_MAX_SIDS];
|
|
|
|
uint32_t num_sids;
|
|
|
|
|
2020-04-01 21:05:26 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
union {
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
struct {
|
|
|
|
void *rfapi_handle; /* export: NVE advertising this
|
|
|
|
route */
|
|
|
|
struct list *local_nexthops; /* optional, for static
|
|
|
|
routes */
|
|
|
|
} export;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
struct {
|
2017-05-05 23:08:44 +02:00
|
|
|
struct thread *timer;
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
void *hme; /* encap monitor, if this is a VPN route */
|
|
|
|
struct prefix_rd
|
|
|
|
rd; /* import: route's route-distinguisher */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t un_family; /* family of cached un address, 0 if
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
unset */
|
|
|
|
union {
|
|
|
|
struct in_addr addr4;
|
|
|
|
struct in6_addr addr6;
|
|
|
|
} un; /* cached un address */
|
|
|
|
time_t create_time;
|
2018-03-06 20:02:52 +01:00
|
|
|
struct prefix aux_prefix; /* AFI_L2VPN: the IP addr,
|
|
|
|
if family set */
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
} import;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
} vnc;
|
|
|
|
#endif
|
2017-05-15 23:34:04 +02:00
|
|
|
|
|
|
|
/* For imported routes into a VNI (or VRF), this points to the parent.
|
|
|
|
*/
|
|
|
|
void *parent;
|
2018-03-09 21:52:55 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some tunnelish parameters follow. Maybe consolidate into an
|
|
|
|
* internal tunnel structure?
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Original bgp instance for imported routes. Needed for:
|
|
|
|
* 1. Find all routes from a specific vrf for deletion
|
|
|
|
* 2. vrf context of original nexthop
|
|
|
|
*
|
|
|
|
* Store pointer to bgp instance rather than bgp->vrf_id because
|
|
|
|
* bgp->vrf_id is not always valid (or may change?).
|
|
|
|
*
|
|
|
|
* Set to NULL if route is not imported from another bgp instance.
|
|
|
|
*/
|
|
|
|
struct bgp *bgp_orig;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Nexthop in context of original bgp instance. Needed
|
|
|
|
* for label resolution of core mpls routes exported to a vrf.
|
|
|
|
* Set nexthop_orig.family to 0 if not valid.
|
|
|
|
*/
|
|
|
|
struct prefix nexthop_orig;
|
2018-11-30 14:56:40 +01:00
|
|
|
/* presence of FS pbr firewall based entry */
|
2018-06-28 17:26:22 +02:00
|
|
|
struct list *bgp_fs_pbr;
|
2018-11-30 14:56:40 +01:00
|
|
|
/* presence of FS pbr iprule based entry */
|
|
|
|
struct list *bgp_fs_iprule;
|
2020-05-09 04:36:47 +02:00
|
|
|
/* Destination Ethernet Segment links for EVPN MH */
|
|
|
|
struct bgp_path_es_info *es_info;
|
2007-05-04 22:15:47 +02:00
|
|
|
};
|
|
|
|
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info {
|
2002-12-13 21:15:29 +01:00
|
|
|
/* For linked list. */
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *next;
|
|
|
|
struct bgp_path_info *prev;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* For nexthop linked list */
|
2018-10-02 22:41:30 +02:00
|
|
|
LIST_ENTRY(bgp_path_info) nh_thread;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Back pointer to the prefix node */
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *net;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Back pointer to the nexthop structure */
|
|
|
|
struct bgp_nexthop_cache *nexthop;
|
|
|
|
|
|
|
|
/* Peer structure. */
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
/* Attribute structure. */
|
|
|
|
struct attr *attr;
|
|
|
|
|
|
|
|
/* Extra information */
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_extra *extra;
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2006-03-30 15:20:48 +02:00
|
|
|
|
2007-05-04 22:15:47 +02:00
|
|
|
/* Multipath information */
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_mpath *mpath;
|
2011-07-21 05:46:01 +02:00
|
|
|
|
|
|
|
/* Uptime. */
|
|
|
|
time_t uptime;
|
|
|
|
|
2006-03-30 15:20:48 +02:00
|
|
|
/* reference count */
|
|
|
|
int lock;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* BGP information status. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint16_t flags;
|
2018-09-14 02:34:42 +02:00
|
|
|
#define BGP_PATH_IGP_CHANGED (1 << 0)
|
|
|
|
#define BGP_PATH_DAMPED (1 << 1)
|
|
|
|
#define BGP_PATH_HISTORY (1 << 2)
|
|
|
|
#define BGP_PATH_SELECTED (1 << 3)
|
|
|
|
#define BGP_PATH_VALID (1 << 4)
|
|
|
|
#define BGP_PATH_ATTR_CHANGED (1 << 5)
|
|
|
|
#define BGP_PATH_DMED_CHECK (1 << 6)
|
|
|
|
#define BGP_PATH_DMED_SELECTED (1 << 7)
|
|
|
|
#define BGP_PATH_STALE (1 << 8)
|
|
|
|
#define BGP_PATH_REMOVED (1 << 9)
|
|
|
|
#define BGP_PATH_COUNTED (1 << 10)
|
|
|
|
#define BGP_PATH_MULTIPATH (1 << 11)
|
|
|
|
#define BGP_PATH_MULTIPATH_CHG (1 << 12)
|
|
|
|
#define BGP_PATH_RIB_ATTR_CHG (1 << 13)
|
|
|
|
#define BGP_PATH_ANNC_NH_SELF (1 << 14)
|
2020-03-24 21:57:44 +01:00
|
|
|
#define BGP_PATH_LINK_BW_CHG (1 << 15)
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2006-03-30 15:20:48 +02:00
|
|
|
/* BGP route type. This can be static, RIP, OSPF, BGP etc. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t type;
|
2006-03-30 15:20:48 +02:00
|
|
|
|
|
|
|
/* When above type is BGP. This sub type specify BGP sub type
|
|
|
|
information. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t sub_type;
|
2006-03-30 15:20:48 +02:00
|
|
|
#define BGP_ROUTE_NORMAL 0
|
|
|
|
#define BGP_ROUTE_STATIC 1
|
|
|
|
#define BGP_ROUTE_AGGREGATE 2
|
2019-09-17 10:27:03 +02:00
|
|
|
#define BGP_ROUTE_REDISTRIBUTE 3
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
2019-09-17 10:27:03 +02:00
|
|
|
# define BGP_ROUTE_RFP 4
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
#endif
|
2018-03-09 21:52:55 +01:00
|
|
|
#define BGP_ROUTE_IMPORTED 5 /* from another bgp instance/safi */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned short instance;
|
2015-05-20 03:03:45 +02:00
|
|
|
|
|
|
|
/* Addpath identifiers */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t addpath_rx_id;
|
bgpd: Re-use TX Addpath IDs where possible
The motivation for this patch is to address a concerning behavior of
tx-addpath-bestpath-per-AS. Prior to this patch, all paths' TX ID was
pre-determined as the path was received from a peer. However, this meant
that any time the path selected as best from an AS changed, bgpd had no
choice but to withdraw the previous best path, and advertise the new
best-path under a new TX ID. This could cause significant network
disruption, especially for the subset of prefixes coming from only one
AS that were also communicated over a bestpath-per-AS session.
The patch's general approach is best illustrated by
txaddpath_update_ids. After a bestpath run (required for best-per-AS to
know what will and will not be sent as addpaths) ID numbers will be
stripped from paths that no longer need to be sent, and held in a pool.
Then, paths that will be sent as addpaths and do not already have ID
numbers will allocate new ID numbers, pulling first from that pool.
Finally, anything left in the pool will be returned to the allocator.
In order for this to work, ID numbers had to be split by strategy. The
tx-addpath-All strategy would keep every ID number "in use" constantly,
preventing IDs from being transferred to different paths. Rather than
create two variables for ID, this patch create a more generic array that
will easily enable more addpath strategies to be implemented. The
previously described ID manipulations will happen per addpath strategy,
and will only be run for strategies that are enabled on at least one
peer.
Finally, the ID numbers are allocated from an allocator that tracks per
AFI/SAFI/Addpath Strategy which IDs are in use. Though it would be very
improbable, there was the possibility with the free-running counter
approach for rollover to cause two paths on the same prefix to get
assigned the same TX ID. As remote as the possibility is, we prefer to
not leave it to chance.
This ID re-use method is not perfect. In some cases you could still get
withdraw-then-add behaviors where not strictly necessary. In the case of
bestpath-per-AS this requires one AS to advertise a prefix for the first
time, then a second AS withdraws that prefix, all within the space of an
already pending MRAI timer. In those situations a withdraw-then-add is
more forgivable, and fixing it would probably require a much more
significant effort, as IDs would need to be moved to ADVs instead of
paths.
Signed-off-by Mitchell Skiba <mskiba@amazon.com>
2018-05-10 01:10:02 +02:00
|
|
|
struct bgp_addpath_info_data tx_addpath;
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
/* Structure used in BGP path selection */
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_pair {
|
|
|
|
struct bgp_path_info *old;
|
|
|
|
struct bgp_path_info *new;
|
2017-05-15 23:34:04 +02:00
|
|
|
};
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* BGP static route configuration. */
|
|
|
|
struct bgp_static {
|
|
|
|
/* Backdoor configuration. */
|
|
|
|
int backdoor;
|
|
|
|
|
2017-03-09 17:43:59 +01:00
|
|
|
/* Label index configuration; applies to LU prefixes. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t label_index;
|
2017-03-09 17:43:59 +01:00
|
|
|
#define BGP_INVALID_LABEL_INDEX 0xFFFFFFFF
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Import check status. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t valid;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* IGP metric. */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t igpmetric;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* IGP nexthop. */
|
|
|
|
struct in_addr igpnexthop;
|
|
|
|
|
2007-08-06 17:24:51 +02:00
|
|
|
/* Atomic set reference count (ie cause of pathlimit) */
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t atomic;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-01-12 19:41:53 +01:00
|
|
|
/* BGP redistribute route-map. */
|
2017-07-17 14:03:14 +02:00
|
|
|
struct {
|
2016-01-12 19:41:53 +01:00
|
|
|
char *name;
|
|
|
|
struct route_map *map;
|
2017-07-17 14:03:14 +02:00
|
|
|
} rmap;
|
2016-01-12 19:41:53 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Route Distinguisher */
|
2017-06-16 21:12:57 +02:00
|
|
|
struct prefix_rd prd;
|
2016-09-05 14:07:01 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* MPLS label. */
|
2017-06-16 21:12:57 +02:00
|
|
|
mpls_label_t label;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-09-05 14:07:01 +02:00
|
|
|
/* EVPN */
|
2020-03-27 22:37:16 +01:00
|
|
|
esi_t *eth_s_id;
|
2017-02-09 08:44:13 +01:00
|
|
|
struct ethaddr *router_mac;
|
2016-08-10 10:46:53 +02:00
|
|
|
uint16_t encap_tunneltype;
|
2017-01-20 10:48:42 +01:00
|
|
|
struct prefix gatewayIp;
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2019-02-06 14:44:20 +01:00
|
|
|
/* Aggreagete address:
|
|
|
|
*
|
|
|
|
* advertise-map Set condition to advertise attribute
|
|
|
|
* as-set Generate AS set path information
|
|
|
|
* attribute-map Set attributes of aggregate
|
|
|
|
* route-map Set parameters of aggregate
|
|
|
|
* summary-only Filter more specific routes from updates
|
|
|
|
* suppress-map Conditionally filter more specific routes from updates
|
|
|
|
* <cr>
|
|
|
|
*/
|
|
|
|
struct bgp_aggregate {
|
|
|
|
/* Summary-only flag. */
|
|
|
|
uint8_t summary_only;
|
|
|
|
|
|
|
|
/* AS set generation. */
|
|
|
|
uint8_t as_set;
|
|
|
|
|
|
|
|
/* Route-map for aggregated route. */
|
2019-08-21 17:16:05 +02:00
|
|
|
struct {
|
|
|
|
char *name;
|
|
|
|
struct route_map *map;
|
|
|
|
} rmap;
|
2019-02-06 14:44:20 +01:00
|
|
|
|
|
|
|
/* Suppress-count. */
|
|
|
|
unsigned long count;
|
|
|
|
|
|
|
|
/* Count of routes of origin type incomplete under this aggregate. */
|
|
|
|
unsigned long incomplete_origin_count;
|
|
|
|
|
|
|
|
/* Count of routes of origin type egp under this aggregate. */
|
|
|
|
unsigned long egp_origin_count;
|
|
|
|
|
2020-02-12 20:19:02 +01:00
|
|
|
/* Optional modify flag to override ORIGIN */
|
|
|
|
uint8_t origin;
|
|
|
|
|
2019-02-06 14:44:20 +01:00
|
|
|
/* Hash containing the communities of all the
|
|
|
|
* routes under this aggregate.
|
|
|
|
*/
|
|
|
|
struct hash *community_hash;
|
|
|
|
|
|
|
|
/* Hash containing the extended communities of all the
|
|
|
|
* routes under this aggregate.
|
|
|
|
*/
|
|
|
|
struct hash *ecommunity_hash;
|
|
|
|
|
|
|
|
/* Hash containing the large communities of all the
|
|
|
|
* routes under this aggregate.
|
|
|
|
*/
|
|
|
|
struct hash *lcommunity_hash;
|
|
|
|
|
|
|
|
/* Hash containing the AS-Path of all the
|
|
|
|
* routes under this aggregate.
|
|
|
|
*/
|
|
|
|
struct hash *aspath_hash;
|
|
|
|
|
|
|
|
/* Aggregate route's community. */
|
|
|
|
struct community *community;
|
|
|
|
|
|
|
|
/* Aggregate route's extended community. */
|
|
|
|
struct ecommunity *ecommunity;
|
|
|
|
|
|
|
|
/* Aggregate route's large community. */
|
|
|
|
struct lcommunity *lcommunity;
|
|
|
|
|
|
|
|
/* Aggregate route's as-path. */
|
|
|
|
struct aspath *aspath;
|
|
|
|
|
|
|
|
/* SAFI configuration. */
|
|
|
|
safi_t safi;
|
2020-10-02 20:47:17 +02:00
|
|
|
|
|
|
|
/** Match only equal MED. */
|
|
|
|
bool match_med;
|
|
|
|
/* MED matching state. */
|
|
|
|
/** Did we get the first MED value? */
|
|
|
|
bool med_initialized;
|
|
|
|
/** Are there MED mismatches? */
|
|
|
|
bool med_mismatched;
|
|
|
|
/** MED value found in current group. */
|
|
|
|
uint32_t med_matched_value;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Test if aggregated address MED of all route match, otherwise
|
|
|
|
* returns `false`. This macro will also return `true` if MED
|
|
|
|
* matching is disabled.
|
|
|
|
*/
|
|
|
|
#define AGGREGATE_MED_VALID(aggregate) \
|
|
|
|
(((aggregate)->match_med && !(aggregate)->med_mismatched) \
|
|
|
|
|| !(aggregate)->match_med)
|
2020-10-19 00:17:02 +02:00
|
|
|
|
|
|
|
/** Suppress map route map name (`NULL` when disabled). */
|
|
|
|
char *suppress_map_name;
|
|
|
|
/** Suppress map route map pointer. */
|
|
|
|
struct route_map *suppress_map;
|
2019-02-06 14:44:20 +01:00
|
|
|
};
|
|
|
|
|
2017-01-14 14:34:22 +01:00
|
|
|
#define BGP_NEXTHOP_AFI_FROM_NHLEN(nhlen) \
|
|
|
|
((nhlen) < IPV4_MAX_BYTELEN \
|
|
|
|
? 0 \
|
|
|
|
: ((nhlen) < IPV6_MAX_BYTELEN ? AFI_IP : AFI_IP6))
|
|
|
|
|
2015-06-11 18:19:12 +02:00
|
|
|
#define BGP_ATTR_NEXTHOP_AFI_IP6(attr) \
|
|
|
|
(!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) \
|
2019-02-26 22:22:27 +01:00
|
|
|
&& ((attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL \
|
|
|
|
|| (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL \
|
|
|
|
|| (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL \
|
|
|
|
|| (attr)->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL))
|
2018-09-14 02:34:42 +02:00
|
|
|
#define BGP_PATH_COUNTABLE(BI) \
|
|
|
|
(!CHECK_FLAG((BI)->flags, BGP_PATH_HISTORY) \
|
|
|
|
&& !CHECK_FLAG((BI)->flags, BGP_PATH_REMOVED))
|
2015-05-20 02:40:38 +02:00
|
|
|
|
[bgpd] Handle pcount as flags are changed, fixing pcount issues
2006-09-06 Paul Jakma <paul.jakma@sun.com>
* (general) Squash any and all prefix-count issues by
abstracting route flag changes, and maintaining count as and
when flags are modified (rather than relying on explicit
modifications of count being sprinkled in just the right
places throughout the code).
* bgp_route.c: (bgp_pcount_{dec,inc}rement) removed.
(bgp_pcount_adjust) new, update prefix count as
needed for a given route.
(bgp_info_{uns,s}et_flag) set/unset a BGP_INFO route status
flag, calling previous function when appropriate.
(general) Update all set/unsets of flags to use previous.
Remove pcount_{dec,inc}rement calls.
No need to unset BGP_INFO_VALID in places where
bgp_info_delete is called, it does that anyway.
* bgp_{damp,nexthop}.c: Update to use bgp_info_{un,}set_flag.
* bgp_route.h: Export bgp_info_{un,}set_flag.
Add a 'meta' BGP_INFO flag, BGP_INFO_UNUSEABLE.
Move BGP_INFO_HOLDDOWN macro to here from bgpd.h
2006-09-07 02:24:49 +02:00
|
|
|
/* Flags which indicate a route is unuseable in some form */
|
2018-09-14 02:34:42 +02:00
|
|
|
#define BGP_PATH_UNUSEABLE \
|
|
|
|
(BGP_PATH_HISTORY | BGP_PATH_DAMPED | BGP_PATH_REMOVED)
|
[bgpd] Handle pcount as flags are changed, fixing pcount issues
2006-09-06 Paul Jakma <paul.jakma@sun.com>
* (general) Squash any and all prefix-count issues by
abstracting route flag changes, and maintaining count as and
when flags are modified (rather than relying on explicit
modifications of count being sprinkled in just the right
places throughout the code).
* bgp_route.c: (bgp_pcount_{dec,inc}rement) removed.
(bgp_pcount_adjust) new, update prefix count as
needed for a given route.
(bgp_info_{uns,s}et_flag) set/unset a BGP_INFO route status
flag, calling previous function when appropriate.
(general) Update all set/unsets of flags to use previous.
Remove pcount_{dec,inc}rement calls.
No need to unset BGP_INFO_VALID in places where
bgp_info_delete is called, it does that anyway.
* bgp_{damp,nexthop}.c: Update to use bgp_info_{un,}set_flag.
* bgp_route.h: Export bgp_info_{un,}set_flag.
Add a 'meta' BGP_INFO flag, BGP_INFO_UNUSEABLE.
Move BGP_INFO_HOLDDOWN macro to here from bgpd.h
2006-09-07 02:24:49 +02:00
|
|
|
/* Macro to check BGP information is alive or not. Sadly,
|
|
|
|
* not equivalent to just checking previous, because of the
|
|
|
|
* sense of the additional VALID flag.
|
|
|
|
*/
|
2018-09-14 02:34:42 +02:00
|
|
|
#define BGP_PATH_HOLDDOWN(BI) \
|
|
|
|
(!CHECK_FLAG((BI)->flags, BGP_PATH_VALID) \
|
|
|
|
|| CHECK_FLAG((BI)->flags, BGP_PATH_UNUSEABLE))
|
[bgpd] Handle pcount as flags are changed, fixing pcount issues
2006-09-06 Paul Jakma <paul.jakma@sun.com>
* (general) Squash any and all prefix-count issues by
abstracting route flag changes, and maintaining count as and
when flags are modified (rather than relying on explicit
modifications of count being sprinkled in just the right
places throughout the code).
* bgp_route.c: (bgp_pcount_{dec,inc}rement) removed.
(bgp_pcount_adjust) new, update prefix count as
needed for a given route.
(bgp_info_{uns,s}et_flag) set/unset a BGP_INFO route status
flag, calling previous function when appropriate.
(general) Update all set/unsets of flags to use previous.
Remove pcount_{dec,inc}rement calls.
No need to unset BGP_INFO_VALID in places where
bgp_info_delete is called, it does that anyway.
* bgp_{damp,nexthop}.c: Update to use bgp_info_{un,}set_flag.
* bgp_route.h: Export bgp_info_{un,}set_flag.
Add a 'meta' BGP_INFO flag, BGP_INFO_UNUSEABLE.
Move BGP_INFO_HOLDDOWN macro to here from bgpd.h
2006-09-07 02:24:49 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
#define DISTRIBUTE_IN_NAME(F) ((F)->dlist[FILTER_IN].name)
|
|
|
|
#define DISTRIBUTE_IN(F) ((F)->dlist[FILTER_IN].alist)
|
|
|
|
#define DISTRIBUTE_OUT_NAME(F) ((F)->dlist[FILTER_OUT].name)
|
|
|
|
#define DISTRIBUTE_OUT(F) ((F)->dlist[FILTER_OUT].alist)
|
|
|
|
|
|
|
|
#define PREFIX_LIST_IN_NAME(F) ((F)->plist[FILTER_IN].name)
|
|
|
|
#define PREFIX_LIST_IN(F) ((F)->plist[FILTER_IN].plist)
|
|
|
|
#define PREFIX_LIST_OUT_NAME(F) ((F)->plist[FILTER_OUT].name)
|
|
|
|
#define PREFIX_LIST_OUT(F) ((F)->plist[FILTER_OUT].plist)
|
|
|
|
|
|
|
|
#define FILTER_LIST_IN_NAME(F) ((F)->aslist[FILTER_IN].name)
|
|
|
|
#define FILTER_LIST_IN(F) ((F)->aslist[FILTER_IN].aslist)
|
|
|
|
#define FILTER_LIST_OUT_NAME(F) ((F)->aslist[FILTER_OUT].name)
|
|
|
|
#define FILTER_LIST_OUT(F) ((F)->aslist[FILTER_OUT].aslist)
|
|
|
|
|
2004-09-13 Jose Luis Rubio <jrubio@dit.upm.es>
(at Technical University of Madrid as part of Euro6ix Project)
Enhanced Route Server functionality and Route-Maps:
* bgpd/bgpd.h: Modified 'struct peer' and 'struct bgp_filter' to
support rs-clients. A 'struct bgp_table *rib' has been added to the
first (to mantain a separated RIB for each rs-client) and two new
route-maps have been added to the last (for import/export policies).
Added the following #defines: RMAP_{IN|OUT|IMPORT|EXPORT|MAX},
PEER_RMAP_TYPE_{IMPORT|EXPORT} and BGP_CLEAR_SOFT_RSCLIENT.
* bgpd/bgpd.c: Modified the functions that create/delete/etc peers in
order to consider the new fields included in 'struct peer' for
supporting rs-clients, i.e. the import/export route-maps and the
'struct bgp_table'.
* bgpd/bgp_route.{ch}: Modified several functions related with
receiving/sending announces in order to support the new Route Server
capabilities.
Function 'bgp_process' has been reorganized, creating an auxiliar
function for best path selection ('bgp_best_selection').
Modified 'bgp_show' and 'bgp_show_route' for displaying information
about any RIB (and not only the main bgp RIB).
Added commands for displaying information about RS-clients RIBs:
'show bgp rsclient (A.B.C.D|X:X::X:X)', 'show bgp rsclient
(A.B.C.D|X:X::X:X) X:X::X:X/M', etc
* bgpd/bgp_table.{ch}: The structure 'struct bgp_table' now has two
new fields: type (which can take the values BGP_TABLE_{MAIN|RSCLIENT})
and 'void *owner' which points to 'struct bgp' or 'struct peer' which
owns the table.
When creating a new bgp_table by default 'type=BGP_TABLE_MAIN' is set.
* bgpd/bgp_vty.c: The commands 'neighbor ... route-server-client' and
'no neighbor ... route-server-client' now not only set/unset the flag
PEER_FLAG_RSERVER_CLIENT, but they create/destroy the 'struct
bgp_table' of the peer. Special actions are taken for peer_groups.
Command 'neighbor ... route-map WORD (in|out)' now also supports two
new kinds of route-map: 'import' and 'export'.
Added commands 'clear bgp * rsclient', etc. These commands allow a new
kind of soft_reconfig which affects only the RIB of the specified
RS-client.
Added commands 'show bgp rsclient summary', etc which display a
summary of the rs-clients configured for the corresponding address
family.
* bgpd/bgp_routemap.c: A new match statement is available,
'match peer (A.B.C.D|X:X::X:X)'. This statement can only be used in
import/export route-maps, and it matches when the peer who announces
(when used in an import route-map) or is going to receive (when used
in an export route-map) the route is the same than the one specified
in the statement.
For peer-groups the statement matches if the specified peer is member
of the peer-group.
A special version of the command, 'match peer local', matches with
routes originated by the Route Server (defined with 'network ...',
redistributed routes and default-originate).
* lib/routemap.{ch}: Added a new clause 'call NAME' for use in
route-maps. It jumps into the specified route-map and when it returns
the first route-map ends if the called RM returns DENY_MATCH, or
continues in other case.
2004-09-13 07:12:46 +02:00
|
|
|
#define ROUTE_MAP_IN_NAME(F) ((F)->map[RMAP_IN].name)
|
|
|
|
#define ROUTE_MAP_IN(F) ((F)->map[RMAP_IN].map)
|
|
|
|
#define ROUTE_MAP_OUT_NAME(F) ((F)->map[RMAP_OUT].name)
|
|
|
|
#define ROUTE_MAP_OUT(F) ((F)->map[RMAP_OUT].map)
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
#define UNSUPPRESS_MAP_NAME(F) ((F)->usmap.name)
|
|
|
|
#define UNSUPPRESS_MAP(F) ((F)->usmap.map)
|
|
|
|
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
#define ADVERTISE_MAP_NAME(F) ((F)->advmap.aname)
|
|
|
|
#define ADVERTISE_MAP(F) ((F)->advmap.amap)
|
|
|
|
|
|
|
|
#define ADVERTISE_CONDITION(F) ((F)->advmap.condition)
|
|
|
|
|
|
|
|
#define CONDITION_MAP_NAME(F) ((F)->advmap.cname)
|
|
|
|
#define CONDITION_MAP(F) ((F)->advmap.cmap)
|
|
|
|
|
2015-12-07 20:56:02 +01:00
|
|
|
/* path PREFIX (addpath rxid NUMBER) */
|
|
|
|
#define PATH_ADDPATH_STR_BUFFER PREFIX2STR_BUFFER + 32
|
|
|
|
|
2018-10-02 21:50:10 +02:00
|
|
|
enum bgp_path_type {
|
|
|
|
BGP_PATH_SHOW_ALL,
|
|
|
|
BGP_PATH_SHOW_BESTPATH,
|
|
|
|
BGP_PATH_SHOW_MULTIPATH
|
|
|
|
};
|
2015-05-20 02:58:11 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
static inline void bgp_bump_version(struct bgp_dest *dest)
|
2015-05-20 03:03:47 +02:00
|
|
|
{
|
2020-03-27 00:11:58 +01:00
|
|
|
dest->version = bgp_table_next_version(bgp_dest_table(dest));
|
2015-05-20 03:03:47 +02:00
|
|
|
}
|
|
|
|
|
2017-03-09 15:54:20 +01:00
|
|
|
static inline int bgp_fibupd_safi(safi_t safi)
|
|
|
|
{
|
|
|
|
if (safi == SAFI_UNICAST || safi == SAFI_MULTICAST
|
2018-01-26 18:36:24 +01:00
|
|
|
|| safi == SAFI_LABELED_UNICAST
|
|
|
|
|| safi == SAFI_FLOWSPEC)
|
2017-03-09 15:54:20 +01:00
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-28 12:11:01 +01:00
|
|
|
/* Flag if the route path's family matches params. */
|
|
|
|
static inline bool is_pi_family_matching(struct bgp_path_info *pi,
|
|
|
|
afi_t afi, safi_t safi)
|
|
|
|
{
|
|
|
|
struct bgp_table *table;
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest;
|
2019-02-28 12:11:01 +01:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
dest = pi->net;
|
|
|
|
if (!dest)
|
2019-02-28 12:11:01 +01:00
|
|
|
return false;
|
2020-03-27 00:11:58 +01:00
|
|
|
table = bgp_dest_table(dest);
|
2019-02-28 12:11:01 +01:00
|
|
|
if (table &&
|
|
|
|
table->afi == afi &&
|
|
|
|
table->safi == safi)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-18 23:40:04 +01:00
|
|
|
static inline void prep_for_rmap_apply(struct bgp_path_info *dst_pi,
|
|
|
|
struct bgp_path_info_extra *dst_pie,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest,
|
2020-03-18 23:40:04 +01:00
|
|
|
struct bgp_path_info *src_pi,
|
|
|
|
struct peer *peer, struct attr *attr)
|
|
|
|
{
|
|
|
|
memset(dst_pi, 0, sizeof(struct bgp_path_info));
|
|
|
|
dst_pi->peer = peer;
|
|
|
|
dst_pi->attr = attr;
|
2020-03-27 00:11:58 +01:00
|
|
|
dst_pi->net = dest;
|
2020-03-24 21:57:44 +01:00
|
|
|
dst_pi->flags = src_pi->flags;
|
|
|
|
dst_pi->type = src_pi->type;
|
|
|
|
dst_pi->sub_type = src_pi->sub_type;
|
|
|
|
dst_pi->mpath = src_pi->mpath;
|
2020-03-18 23:40:04 +01:00
|
|
|
if (src_pi->extra) {
|
|
|
|
memcpy(dst_pie, src_pi->extra,
|
|
|
|
sizeof(struct bgp_path_info_extra));
|
|
|
|
dst_pi->extra = dst_pie;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-06 04:25:56 +01:00
|
|
|
static inline bool bgp_check_advertise(struct bgp *bgp, struct bgp_dest *dest)
|
|
|
|
{
|
|
|
|
return (!(BGP_SUPPRESS_FIB_ENABLED(bgp) &&
|
|
|
|
CHECK_FLAG(dest->flags, BGP_NODE_FIB_INSTALL_PENDING) &&
|
|
|
|
(!bgp_option_check(BGP_OPT_NO_FIB))));
|
|
|
|
}
|
|
|
|
|
2019-05-09 11:12:14 +02:00
|
|
|
/* called before bgp_process() */
|
|
|
|
DECLARE_HOOK(bgp_process,
|
2020-03-27 00:11:58 +01:00
|
|
|
(struct bgp * bgp, afi_t afi, safi_t safi, struct bgp_dest *bn,
|
|
|
|
struct peer *peer, bool withdraw),
|
|
|
|
(bgp, afi, safi, bn, peer, withdraw))
|
2019-05-09 11:12:14 +02:00
|
|
|
|
bgpd: show < ip > bgp < ipv4 | ipv6 > all
This commit
=> provides "all" option, to display the table entries for all(or specific) AFI/SAFIs.
=> Also introduced "show_flags" to avoid passing multiple arguments(use_json, wide, all)
to functions
1. show <ip> bgp <ipv4/ipv6> <all> <wide|json>
2. show <ip> bgp <ipv4/ipv6> <all> summary <json>
3. show <ip> bgp <ipv4/ipv6> <all> cidr-only <wide|json>
4. show <ip> bgp <ipv4/ipv6> <all> community <wide|json>
5. show <ip> bgp <ipv4/ipv6> <all> dampening <dampened-paths|flap-statistics|parameters> <wide|json>
6. show <ip> bgp <ipv4/ipv6> <all> neighbors A.B.C.D advertised-routes|filtered-routes|received-routes <wide|json>
show bgp all summary == show ip bgp all summary => output is same => display entries for all AFIs and for each SAFI.
show bgp ipv4 all summary == show ip bgp ipv4 all summary => output is same => display entries for each SAFI in AFI_IP
show bgp ipv6 all summary == show ip bgp ipv6 all summart => output is same => display entries for each SAFI in AFI_IP6
similarly for all other commands.
sample output
1. show <ip> bgp <ipv4/ipv6> <all> <wide|json>
router# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 6, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.1/32 0.0.0.0 0 32768 ?
*>i2.2.2.2/32 192.168.56.152 0 100 0 ?
* i10.0.2.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
* i192.168.56.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
*>i192.168.123.245/32 192.168.56.152 0 100 0 ?
*>i192.168.223.245/32 192.168.56.152 0 100 0 ?
Displayed 6 routes and 8 total paths
For address family: IPv6 Unicast
BGP table version is 3, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 32768 ?
*>i2001:db8::2/128 fe80::a00:27ff:fefc:2aa 0 100 0 ?
*> 2001:db8:85a3::8a2e:370:7334/128 :: 0 32768 ?
Displayed 3 routes and 3 total paths
router#
router# show ip bgp ipv4 all wide
For address family: IPv4 Unicast
BGP table version is 6, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.1/32 0.0.0.0 0 32768 ?
*>i2.2.2.2/32 192.168.56.152 0 100 0 ?
* i10.0.2.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
* i192.168.56.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
*>i192.168.123.245/32 192.168.56.152 0 100 0 ?
*>i192.168.223.245/32 192.168.56.152 0 100 0 ?
Displayed 6 routes and 8 total paths
router#
router#
router# show ip bgp ipv6 all wide
For address family: IPv6 Unicast
BGP table version is 3, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 32768 ?
*>i2001:db8::2/128 fe80::a00:27ff:fefc:2aa 0 100 0 ?
*> 2001:db8:85a3::8a2e:370:7334/128 :: 0 32768 ?
Displayed 3 routes and 3 total paths
router#
router# show bgp all wide
For address family: IPv4 Unicast
BGP table version is 6, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.1/32 0.0.0.0 0 32768 ?
*>i2.2.2.2/32 192.168.56.152 0 100 0 ?
* i10.0.2.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
* i192.168.56.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
*>i192.168.123.245/32 192.168.56.152 0 100 0 ?
*>i192.168.223.245/32 192.168.56.152 0 100 0 ?
Displayed 6 routes and 8 total paths
For address family: IPv6 Unicast
BGP table version is 3, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 32768 ?
*>i2001:db8::2/128 fe80::a00:27ff:fefc:2aa 0 100 0 ?
*> 2001:db8:85a3::8a2e:370:7334/128 :: 0 32768 ?
Displayed 3 routes and 3 total paths
router#
router#
router# show bgp ipv4 all wide
For address family: IPv4 Unicast
BGP table version is 6, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.1/32 0.0.0.0 0 32768 ?
*>i2.2.2.2/32 192.168.56.152 0 100 0 ?
* i10.0.2.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
* i192.168.56.0/24 192.168.56.152 0 100 0 ?
*> 0.0.0.0 0 32768 ?
*>i192.168.123.245/32 192.168.56.152 0 100 0 ?
*>i192.168.223.245/32 192.168.56.152 0 100 0 ?
Displayed 6 routes and 8 total paths
router#
router# show bgp ipv6 all wide
For address family: IPv6 Unicast
BGP table version is 3, local router ID is 1.1.1.1, vrf id 0
Default local pref 100, local AS 1
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 32768 ?
*>i2001:db8::2/128 fe80::a00:27ff:fefc:2aa 0 100 0 ?
*> 2001:db8:85a3::8a2e:370:7334/128 :: 0 32768 ?
Displayed 3 routes and 3 total paths
router#
Router1# show bgp all dampening parameters
For address family: IPv4 Unicast
Half-life time: 15 min
Reuse penalty: 750
Suppress penalty: 2000
Max suppress time: 60 min
Max suppress penalty: 12000
For address family: IPv4 Multicast
Half-life time: 20 min
Reuse penalty: 1000
Suppress penalty: 10000
Max suppress time: 40 min
Max suppress penalty: 4000
For address family: IPv4 VPN
dampening not enabled for IPv4 VPN
For address family: IPv4 Encap
dampening not enabled for IPv4 Encap
For address family: IPv4 Labeled Unicast
dampening not enabled for IPv4 Labeled Unicast
For address family: IPv4 Flowspec
dampening not enabled for IPv4 Flowspec
For address family: IPv6 Unicast
dampening not enabled for IPv6 Unicast
For address family: IPv6 Multicast
Half-life time: 10 min
Reuse penalty: 1500
Suppress penalty: 15000
Max suppress time: 20 min
Max suppress penalty: 6000
For address family: IPv6 VPN
dampening not enabled for IPv6 VPN
For address family: IPv6 Encap
dampening not enabled for IPv6 Encap
For address family: IPv6 Labeled Unicast
dampening not enabled for IPv6 Labeled Unicast
For address family: IPv6 Flowspec
dampening not enabled for IPv6 Flowspec
For address family: L2VPN EVPN
dampening not enabled for L2VPN EVPN
router#
bgpd: all option with json-c apis used
Replaced vty_out with json-c wrapper functions for all option
support to show <ip> bgp commands
Sample output:
Router2# show bgp all json
{
"ipv4Unicast":{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 8,
"routerId": "128.16.16.1",
"defaultLocPrf": 100,
"localAS": 2,
"routes": { "128.16.16.0/24": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"128.16.16.0",
"prefixLen":24,
"network":"128.16.16.0\/24",
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"path":"",
"origin":"IGP",
"nexthops":[
{
"ip":"0.0.0.0",
"hostname":"router",
"afi":"ipv4",
"used":true
}
]
}
],"130.130.0.0/16": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"130.130.0.0",
"prefixLen":16,
"network":"130.130.0.0\/16",
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"path":"",
"origin":"IGP",
"nexthops":[
{
"ip":"0.0.0.0",
"hostname":"router",
"afi":"ipv4",
"used":true
}
]
}
],"192.168.50.0/24": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"192.168.50.0",
"prefixLen":24,
"network":"192.168.50.0\/24",
"metric":0,
"weight":0,
"peerId":"10.10.20.3",
"path":"3",
"origin":"IGP",
"nexthops":[
{
"ip":"10.10.20.3",
"hostname":"router",
"afi":"ipv4",
"used":true
}
]
}
],"200.200.200.0/24": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"200.200.200.0",
"prefixLen":24,
"network":"200.200.200.0\/24",
"metric":0,
"weight":0,
"peerId":"10.10.10.1",
"path":"1",
"origin":"IGP",
"nexthops":[
{
"ip":"10.10.10.1",
"hostname":"router",
"afi":"ipv4",
"used":true
}
]
}
] } }
,
"ipv4Multicast":{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 0,
"routerId": "128.16.16.1",
"defaultLocPrf": 100,
"localAS": 2,
"routes": { } }
,
"ipv4Flowspec":{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 0,
"routerId": "128.16.16.1",
"defaultLocPrf": 100,
"localAS": 2,
"routes": { } }
,
"ipv6Unicast":{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 11,
"routerId": "128.16.16.1",
"defaultLocPrf": 100,
"localAS": 2,
"routes": { "2001:db8::2/128": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"2001:db8::2",
"prefixLen":128,
"network":"2001:db8::2\/128",
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"path":"",
"origin":"incomplete",
"nexthops":[
{
"ip":"::",
"hostname":"router",
"afi":"ipv6",
"scope":"global",
"used":true
}
]
}
],"2001:db8::3/128": [
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"2001:db8::3",
"prefixLen":128,
"network":"2001:db8::3\/128",
"metric":0,
"weight":0,
"peerId":"10.10.20.3",
"path":"3",
"origin":"incomplete",
"nexthops":[
{
"ip":"2001:db8:0:20::3",
"hostname":"router",
"afi":"ipv6",
"scope":"global"
},
{
"ip":"fe80::a00:27ff:fe76:6738",
"hostname":"router",
"afi":"ipv6",
"scope":"link-local",
"used":true
}
]
}
],"2001:db8:0:20::/64": [
{
"valid":true,
"pathFrom":"external",
"prefix":"2001:db8:0:20::",
"prefixLen":64,
"network":"2001:db8:0:20::\/64",
"metric":0,
"weight":0,
"peerId":"10.10.20.3",
"path":"3",
"origin":"incomplete",
"nexthops":[
{
"ip":"2001:db8:0:20::3",
"hostname":"router",
"afi":"ipv6",
"scope":"global"
},
{
"ip":"fe80::a00:27ff:fe76:6738",
"hostname":"router",
"afi":"ipv6",
"scope":"link-local",
"used":true
}
]
},
{
"valid":true,
"bestpath":true,
"pathFrom":"external",
"prefix":"2001:db8:0:20::",
"prefixLen":64,
"network":"2001:db8:0:20::\/64",
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"path":"",
"origin":"incomplete",
"nexthops":[
{
"ip":"::",
"hostname":"router",
"afi":"ipv6",
"scope":"global",
"used":true
}
]
}
] } }
,
"ipv6Multicast":{
"vrfId": 0,
"vrfName": "default",
"tableVersion": 0,
"routerId": "128.16.16.1",
"defaultLocPrf": 100,
"localAS": 2,
"routes": { } }
}
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-07-30 17:19:09 +02:00
|
|
|
/* BGP show options */
|
|
|
|
#define BGP_SHOW_OPT_JSON (1 << 0)
|
|
|
|
#define BGP_SHOW_OPT_WIDE (1 << 1)
|
|
|
|
#define BGP_SHOW_OPT_AFI_ALL (1 << 2)
|
|
|
|
#define BGP_SHOW_OPT_AFI_IP (1 << 3)
|
|
|
|
#define BGP_SHOW_OPT_AFI_IP6 (1 << 4)
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Prototypes. */
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_rib_remove(struct bgp_dest *dest, struct bgp_path_info *pi,
|
2017-08-17 08:19:58 +02:00
|
|
|
struct peer *peer, afi_t afi, safi_t safi);
|
2020-10-17 15:43:14 +02:00
|
|
|
extern void bgp_process_queue_init(struct bgp *bgp);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern void bgp_route_init(void);
|
[bgpd] Stability fixes including bugs 397, 492
I've spent the last several weeks working on stability fixes to bgpd.
These patches fix all of the numerous crashes, assertion failures, memory
leaks and memory stomping I could find. Valgrind was used extensively.
Added new function bgp_exit() to help catch problems. If "debug bgp" is
configured and bgpd exits with status of 0, statistics on remaining
lib/memory.c allocations are printed to stderr. It is my hope that other
developers will use this to stay on top of memory issues.
Example questionable exit:
bgpd: memstats: Current memory utilization in module LIB:
bgpd: memstats: Link List : 6
bgpd: memstats: Link Node : 5
bgpd: memstats: Hash : 8
bgpd: memstats: Hash Bucket : 2
bgpd: memstats: Hash Index : 8
bgpd: memstats: Work queue : 3
bgpd: memstats: Work queue item : 2
bgpd: memstats: Work queue name string : 3
bgpd: memstats: Current memory utilization in module BGP:
bgpd: memstats: BGP instance : 1
bgpd: memstats: BGP peer : 1
bgpd: memstats: BGP peer hostname : 1
bgpd: memstats: BGP attribute : 1
bgpd: memstats: BGP extra attributes : 1
bgpd: memstats: BGP aspath : 1
bgpd: memstats: BGP aspath str : 1
bgpd: memstats: BGP table : 24
bgpd: memstats: BGP node : 1
bgpd: memstats: BGP route : 1
bgpd: memstats: BGP synchronise : 8
bgpd: memstats: BGP Process queue : 1
bgpd: memstats: BGP node clear queue : 1
bgpd: memstats: NOTE: If configuration exists, utilization may be expected.
Example clean exit:
bgpd: memstats: No remaining tracked memory utilization.
This patch fixes bug #397: "Invalid free in bgp_announce_check()".
This patch fixes bug #492: "SIGBUS in bgpd/bgp_route.c:
bgp_clear_route_node()".
My apologies for not separating out these changes into individual patches.
The complexity of doing so boggled what is left of my brain. I hope this
is all still useful to the community.
This code has been production tested, in non-route-server-client mode, on
a linux 32-bit box and a 64-bit box.
Release/reset functions, used by bgp_exit(), added to:
bgpd/bgp_attr.c,h
bgpd/bgp_community.c,h
bgpd/bgp_dump.c,h
bgpd/bgp_ecommunity.c,h
bgpd/bgp_filter.c,h
bgpd/bgp_nexthop.c,h
bgpd/bgp_route.c,h
lib/routemap.c,h
File by file analysis:
* bgpd/bgp_aspath.c: Prevent re-use of ashash after it is released.
* bgpd/bgp_attr.c: #if removed uncalled cluster_dup().
* bgpd/bgp_clist.c,h: Allow community_list_terminate() to be called from
bgp_exit().
* bgpd/bgp_filter.c: Fix aslist->name use without allocation check, and
also fix memory leak.
* bgpd/bgp_main.c: Created bgp_exit() exit routine. This function frees
allocations made as part of bgpd initialization and, to some extent,
configuration. If "debug bgp" is configured, memory stats are printed
as described above.
* bgpd/bgp_nexthop.c: zclient_new() already allocates stream for
ibuf/obuf, so bgp_scan_init() shouldn't do it too. Also, made it so
zlookup is global so bgp_exit() can use it.
* bgpd/bgp_packet.c: bgp_capability_msg_parse() call to bgp_clear_route()
adjusted to use new BGP_CLEAR_ROUTE_NORMAL flag.
* bgpd/bgp_route.h: Correct reference counter "lock" to be signed.
bgp_clear_route() now accepts a bgp_clear_route_type of either
BGP_CLEAR_ROUTE_NORMAL or BGP_CLEAR_ROUTE_MY_RSCLIENT.
* bgpd/bgp_route.c:
- bgp_process_rsclient(): attr was being zero'ed and then
bgp_attr_extra_free() was being called with it, even though it was
never filled with valid data.
- bgp_process_rsclient(): Make sure rsclient->group is not NULL before
use.
- bgp_processq_del(): Add call to bgp_table_unlock().
- bgp_process(): Add call to bgp_table_lock().
- bgp_update_rsclient(): memset clearing of new_attr not needed since
declarationw with "= { 0 }" does it. memset was already commented
out.
- bgp_update_rsclient(): Fix screwed up misleading indentation.
- bgp_withdraw_rsclient(): Fix screwed up misleading indentation.
- bgp_clear_route_node(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_node_queue_del(): Add call to bgp_table_unlock() and also
free struct bgp_clear_node_queue used for work item.
- bgp_clear_node_complete(): Do peer_unlock() after BGP_EVENT_ADD() in
case peer is released by peer_unlock() call.
- bgp_clear_route_table(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT. Use
struct bgp_clear_node_queue to supply data to worker. Add call to
bgp_table_lock().
- bgp_clear_route(): Add support for BGP_CLEAR_ROUTE_NORMAL or
BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_route_all(): Use BGP_CLEAR_ROUTE_NORMAL.
Bug 397 fixes:
- bgp_default_originate()
- bgp_announce_table()
* bgpd/bgp_table.h:
- struct bgp_table: Added reference count. Changed type of owner to be
"struct peer *" rather than "void *".
- struct bgp_node: Correct reference counter "lock" to be signed.
* bgpd/bgp_table.c:
- Added bgp_table reference counting.
- bgp_table_free(): Fixed cleanup code. Call peer_unlock() on owner if
set.
- bgp_unlock_node(): Added assertion.
- bgp_node_get(): Added call to bgp_lock_node() to code path that it was
missing from.
* bgpd/bgp_vty.c:
- peer_rsclient_set_vty(): Call peer_lock() as part of peer assignment
to owner. Handle failure gracefully.
- peer_rsclient_unset_vty(): Add call to bgp_clear_route() with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
* bgpd/bgp_zebra.c: Made it so zclient is global so bgp_exit() can use it.
* bgpd/bgpd.c:
- peer_lock(): Allow to be called when status is "Deleted".
- peer_deactivate(): Supply BGP_CLEAR_ROUTE_NORMAL purpose to
bgp_clear_route() call.
- peer_delete(): Common variable listnode pn. Fix bug in which rsclient
was only dealt with if not part of a peer group. Call
bgp_clear_route() for rsclient, if appropriate, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- peer_group_get(): Use XSTRDUP() instead of strdup() for conf->host.
- peer_group_bind(): Call bgp_clear_route() for rsclient, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- bgp_create(): Use XSTRDUP() instead of strdup() for peer_self->host.
- bgp_delete(): Delete peers before groups, rather than after. And then
rather than deleting rsclients, verify that there are none at this
point.
- bgp_unlock(): Add assertion.
- bgp_free(): Call bgp_table_finish() rather than doing XFREE() itself.
* lib/command.c,h: Compiler warning fixes. Add cmd_terminate(). Fixed
massive leak in install_element() in which cmd_make_descvec() was being
called more than once for the same cmd->strvec/string/doc.
* lib/log.c: Make closezlog() check fp before calling fclose().
* lib/memory.c: Catch when alloc count goes negative by using signed
counts. Correct #endif comment. Add log_memstats_stderr().
* lib/memory.h: Add log_memstats_stderr().
* lib/thread.c: thread->funcname was being accessed in thread_call() after
it had been freed. Rearranged things so that thread_call() frees
funcname. Also made it so thread_master_free() cleans up cpu_record.
* lib/vty.c,h: Use global command_cr. Add vty_terminate().
* lib/zclient.c,h: Re-enable zclient_free().
2009-07-18 07:44:03 +02:00
|
|
|
extern void bgp_route_finish(void);
|
2016-10-25 04:04:24 +02:00
|
|
|
extern void bgp_cleanup_routes(struct bgp *);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern void bgp_announce_route(struct peer *, afi_t, safi_t);
|
2015-05-20 03:03:47 +02:00
|
|
|
extern void bgp_stop_announce_route_timer(struct peer_af *paf);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern void bgp_announce_route_all(struct peer *);
|
|
|
|
extern void bgp_default_originate(struct peer *, afi_t, safi_t, int);
|
|
|
|
extern void bgp_soft_reconfig_in(struct peer *, afi_t, safi_t);
|
2015-11-10 16:29:12 +01:00
|
|
|
extern void bgp_clear_route(struct peer *, afi_t, safi_t);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern void bgp_clear_route_all(struct peer *);
|
|
|
|
extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t);
|
|
|
|
extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t);
|
2020-10-01 22:08:06 +02:00
|
|
|
extern void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi);
|
2020-03-20 10:57:54 +01:00
|
|
|
extern bool bgp_outbound_policy_exists(struct peer *, struct bgp_filter *);
|
|
|
|
extern bool bgp_inbound_policy_exists(struct peer *, struct bgp_filter *);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
extern struct bgp_dest *bgp_afi_node_get(struct bgp_table *table, afi_t afi,
|
2020-03-22 02:56:36 +01:00
|
|
|
safi_t safi, const struct prefix *p,
|
2015-05-20 03:29:19 +02:00
|
|
|
struct prefix_rd *prd);
|
2018-10-03 00:15:34 +02:00
|
|
|
extern struct bgp_path_info *bgp_path_info_lock(struct bgp_path_info *path);
|
|
|
|
extern struct bgp_path_info *bgp_path_info_unlock(struct bgp_path_info *path);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_path_info_add(struct bgp_dest *dest, struct bgp_path_info *pi);
|
2018-12-05 15:09:35 +01:00
|
|
|
extern void bgp_path_info_extra_free(struct bgp_path_info_extra **extra);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_path_info_reap(struct bgp_dest *dest, struct bgp_path_info *pi);
|
|
|
|
extern void bgp_path_info_delete(struct bgp_dest *dest,
|
|
|
|
struct bgp_path_info *pi);
|
2018-10-02 22:41:30 +02:00
|
|
|
extern struct bgp_path_info_extra *
|
2018-10-03 00:15:34 +02:00
|
|
|
bgp_path_info_extra_get(struct bgp_path_info *path);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_path_info_set_flag(struct bgp_dest *dest,
|
2018-10-03 00:15:34 +02:00
|
|
|
struct bgp_path_info *path, uint32_t flag);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_path_info_unset_flag(struct bgp_dest *dest,
|
2018-10-03 00:15:34 +02:00
|
|
|
struct bgp_path_info *path, uint32_t flag);
|
2018-10-03 02:43:07 +02:00
|
|
|
extern void bgp_path_info_path_with_addpath_rx_str(struct bgp_path_info *pi,
|
2018-10-03 00:15:34 +02:00
|
|
|
char *buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: Regularise bgp_update_receive, add missing notifies and checks
* bgp_packet.c: (bgp_update_receive) Lots of repeated code, doing same
thing for each AFI/SAFI. Except when it doesn't, e.g. the IPv4/VPN
case was missing the EoR bgp_clear_stale_route call - the only action
really needed for EoR.
Make this function a lot more regular, using common, AFI/SAFI
independent blocks so far as possible.
Replace the 4 separate bgp_nlris with an array, indexed by an enum.
The distinct blocks that handle calling bgp_nlri_parse for each
different AFI/SAFI can now be replaced with a loop.
Transmogrify the nlri SAFI from the SAFI_MPLS_LABELED_VPN code-point
used on the wire, to the SAFI_MPLS_VPN safi_t enum we use internally
as early as possible.
The existing code was not necessarily sending a NOTIFY for NLRI
parsing errors, if they arose via bgp_nlri_sanity_check. Send the
correct NOTIFY - INVAL_NETWORK for the classic NLRIs and OPT_ATTR_ERR
for the MP ones.
EoR can now be handled in one block. The existing code seemed broken
for EoR recognition in a number of ways:
1. A v4/unicast EoR should be an empty UPDATE. However, it seemed
to be treating an UPDATE with attributes, inc. MP REACH/UNREACH,
but no classic NLRIs, as a v4/uni EoR.
2. For other AFI/SAFIs, it was treating UPDATEs with no classic
withraw and with a zero-length MP withdraw as EoRs. However, that
would mean an UPDATE packet _with_ update NLRIs and a 0-len MP
withdraw could be classed as an EoR.
This seems to be loose coding leading to ambiguous protocol
situations and likely incorrect behaviour, rather than simply being
liberal. Be more strict about checking that an UPDATE really is an
EoR and definitely is not trying to update any NLRIs.
This same loose EoR parsing was noted by Chris Hall previously on
list.
(bgp_nlri_parse) Front end NLRI parse function, to fan-out to the correct
parser for the AFI/SAFI.
* bgp_route.c: (bgp_nlri_sanity_check) We try convert NLRI safi to
internal code-point ASAP, adjust switch for that. Leave the wire
code point in for defensive coding.
(bgp_nlri_parse) rename to bgp_nlri_parse_ip.
* tests/bgp_mp_attr_test.c: Can just use bgp_nlri_parse frontend.
2016-02-04 14:27:04 +01:00
|
|
|
extern int bgp_nlri_parse_ip(struct peer *, struct attr *, struct bgp_nlri *);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
extern bool bgp_maximum_prefix_overflow(struct peer *, afi_t, safi_t, int);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-15 19:22:56 +01:00
|
|
|
extern void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
|
|
|
|
const union g_addr *nexthop, ifindex_t ifindex,
|
|
|
|
enum nexthop_types_t nhtype, uint32_t metric,
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t type, unsigned short instance,
|
2017-11-15 19:22:56 +01:00
|
|
|
route_tag_t tag);
|
2018-03-27 21:13:34 +02:00
|
|
|
extern void bgp_redistribute_delete(struct bgp *, struct prefix *, uint8_t,
|
|
|
|
unsigned short);
|
|
|
|
extern void bgp_redistribute_withdraw(struct bgp *, afi_t, int, unsigned short);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
extern void bgp_static_add(struct bgp *);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern void bgp_static_delete(struct bgp *);
|
2015-05-20 03:04:20 +02:00
|
|
|
extern void bgp_static_redo_import_check(struct bgp *);
|
2016-02-12 21:18:28 +01:00
|
|
|
extern void bgp_purge_static_redist_routes(struct bgp *bgp);
|
2020-03-24 12:58:08 +01:00
|
|
|
extern void bgp_static_update(struct bgp *bgp, const struct prefix *p,
|
|
|
|
struct bgp_static *s, afi_t afi, safi_t safi);
|
|
|
|
extern void bgp_static_withdraw(struct bgp *bgp, const struct prefix *p,
|
|
|
|
afi_t afi, safi_t safi);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-17 20:54:10 +02:00
|
|
|
extern int bgp_static_set_safi(afi_t afi, safi_t safi, struct vty *vty,
|
2016-10-27 08:02:36 +02:00
|
|
|
const char *, const char *, const char *,
|
|
|
|
const char *, int, const char *, const char *,
|
|
|
|
const char *, const char *);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-17 20:54:10 +02:00
|
|
|
extern int bgp_static_unset_safi(afi_t afi, safi_t safi, struct vty *,
|
2016-10-27 08:02:36 +02:00
|
|
|
const char *, const char *, const char *, int,
|
|
|
|
const char *, const char *, const char *);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 14:44:16 +02:00
|
|
|
/* this is primarily for MPLS-VPN */
|
2020-03-22 02:56:36 +01:00
|
|
|
extern int bgp_update(struct peer *peer, const struct prefix *p,
|
|
|
|
uint32_t addpath_id, struct attr *attr,
|
|
|
|
afi_t afi, safi_t safi, int type, int sub_type,
|
|
|
|
struct prefix_rd *prd, mpls_label_t *label,
|
|
|
|
uint32_t num_labels, int soft_reconfig,
|
|
|
|
struct bgp_route_evpn *evpn);
|
2020-03-22 04:37:24 +01:00
|
|
|
extern int bgp_withdraw(struct peer *peer, const struct prefix *p,
|
|
|
|
uint32_t addpath_id, struct attr *attr, afi_t afi,
|
|
|
|
safi_t safi, int type, int sub_type,
|
|
|
|
struct prefix_rd *prd, mpls_label_t *label,
|
|
|
|
uint32_t num_labels, struct bgp_route_evpn *evpn);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-06-28 14:44:16 +02:00
|
|
|
/* for bgp_nexthop and bgp_damp */
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_process(struct bgp *, struct bgp_dest *, afi_t, safi_t);
|
bgpd: bgpd-mrai.patch
BGP: Event-driven route announcement taking into account min route advertisement interval
ISSUE
BGP starts the routeadv timer (peer->t_routeadv) to expire in 1 sec
when a peer is established. From then on, the timer expires
periodically based on the configured MRAI value (default: 30sec for
EBGP, 5sec for IBGP). At the expiry, the write thread is triggered
that takes the routes from peer's sync FIFO (adj-rib-out) and sends
UPDATEs. This has a few drawbacks:
(1) Delay in new route announcement: Even when the last UPDATE message
was sent a while back, the next route change will necessarily have
to wait for routeadv expiry
(2) CPU usage: The timer is always armed. If the operator chooses to
configure a lower value of MRAI (zero second is a preferred choice
in many deployments) for better convergence, it leads to high CPU
usage for BGP process, even at the times of no network churn.
PATCH
Make the route advertisement event-driven - When routes are added to
peer's sync FIFO, check if the routeadv timer needs to be adjusted (or
started). Conversely, do not arm the routeadv timer unconditionally.
The patch also addresses route announcements during read-only mode
(update-delay). During read-only mode operation, the routeadv timer
is not started. When BGP comes out of read-only mode and all the
routes are processed, the timer is started for all peers with zero
expiry, so that the UPDATEs can be sent all at once. This leads to
(near-)optimal UPDATE packing.
Finally, the patch makes the "max # packets to write to peer socket at
a time" configurable. Currently it is hard-coded to 10. The command is
at the top router-bgp mode and is called "write-quanta <number>". It
is a useful convergence parameter to tweak.
Signed-off-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
2015-05-20 02:40:37 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add an end-of-initial-update marker to the process queue. This is just a
|
|
|
|
* queue element with NULL bgp node.
|
|
|
|
*/
|
2015-11-10 16:29:12 +01:00
|
|
|
extern void bgp_add_eoiu_mark(struct bgp *);
|
2017-08-30 17:23:01 +02:00
|
|
|
extern void bgp_config_write_table_map(struct vty *, struct bgp *, afi_t,
|
|
|
|
safi_t);
|
2017-08-27 22:18:32 +02:00
|
|
|
extern void bgp_config_write_network(struct vty *, struct bgp *, afi_t, safi_t);
|
2017-08-30 17:23:01 +02:00
|
|
|
extern void bgp_config_write_distance(struct vty *, struct bgp *, afi_t,
|
|
|
|
safi_t);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-24 12:58:08 +01:00
|
|
|
extern void bgp_aggregate_delete(struct bgp *bgp, const struct prefix *p,
|
|
|
|
afi_t afi, safi_t safi,
|
|
|
|
struct bgp_aggregate *aggregate);
|
|
|
|
extern void bgp_aggregate_route(struct bgp *bgp, const struct prefix *p,
|
|
|
|
afi_t afi, safi_t safi,
|
|
|
|
struct bgp_aggregate *aggregate);
|
2020-03-22 02:56:36 +01:00
|
|
|
extern void bgp_aggregate_increment(struct bgp *bgp, const struct prefix *p,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *path, afi_t afi,
|
|
|
|
safi_t safi);
|
2020-03-22 02:56:36 +01:00
|
|
|
extern void bgp_aggregate_decrement(struct bgp *bgp, const struct prefix *p,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *path, afi_t afi,
|
|
|
|
safi_t safi);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-23 00:54:38 +01:00
|
|
|
extern uint8_t bgp_distance_apply(const struct prefix *p,
|
|
|
|
struct bgp_path_info *path, afi_t afi,
|
|
|
|
safi_t safi, struct bgp *bgp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-06-28 14:44:16 +02:00
|
|
|
extern afi_t bgp_node_afi(struct vty *);
|
|
|
|
extern safi_t bgp_node_safi(struct vty *);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-02 22:41:30 +02:00
|
|
|
extern struct bgp_path_info *info_make(int type, int sub_type,
|
|
|
|
unsigned short instance,
|
|
|
|
struct peer *peer, struct attr *attr,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest);
|
2018-10-02 22:41:30 +02:00
|
|
|
|
2020-03-22 19:50:46 +01:00
|
|
|
extern void route_vty_out(struct vty *vty, const struct prefix *p,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *path, int display, safi_t safi,
|
2020-07-23 11:20:52 +02:00
|
|
|
json_object *json_paths, bool wide);
|
2020-03-22 19:50:46 +01:00
|
|
|
extern void route_vty_out_tag(struct vty *vty, const struct prefix *p,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *path, int display,
|
|
|
|
safi_t safi, json_object *json);
|
2020-03-24 12:58:08 +01:00
|
|
|
extern void route_vty_out_tmp(struct vty *vty, const struct prefix *p,
|
2018-08-30 17:54:46 +02:00
|
|
|
struct attr *attr, safi_t safi, bool use_json,
|
2020-07-23 11:20:52 +02:00
|
|
|
json_object *json_ar, bool wide);
|
2020-03-22 19:50:46 +01:00
|
|
|
extern void route_vty_out_overlay(struct vty *vty, const struct prefix *p,
|
2018-10-03 00:34:03 +02:00
|
|
|
struct bgp_path_info *path, int display,
|
2017-01-09 18:26:24 +01:00
|
|
|
json_object *json);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-23 18:09:12 +02:00
|
|
|
extern void bgp_notify_conditional_adv_scanner(struct update_subgroup *subgrp);
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
extern void subgroup_process_announce_selected(struct update_subgroup *subgrp,
|
|
|
|
struct bgp_path_info *selected,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest,
|
2020-03-20 10:57:54 +01:00
|
|
|
uint32_t addpath_tx_id);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
extern bool subgroup_announce_check(struct bgp_dest *dest,
|
2020-03-20 10:57:54 +01:00
|
|
|
struct bgp_path_info *pi,
|
|
|
|
struct update_subgroup *subgrp,
|
bgpd: conditional advertisement
Implemented as per the feature description given in the source link.
Descriprion:
The BGP conditional advertisement feature uses the non-exist-map or exist-map
and the advertise-map keywords of the neighbor advertise-map command in order
to track routes by the route prefix.
non-exist-map :
If a route prefix is not present in output of the non-exist-map command, then
the route specified by the advertise-map command is announced.
exist-map :
If a route prefix is present in output of the exist-map command, then the route
specified by the advertise-map command is announced.
The conditional BGP announcements are sent in addition to the normal
announcements that a BGP router sends to its peers.
The conditional advertisement process is triggered by the BGP scanner process,
which runs every 60 seconds. This means that the maximum time for the conditional
advertisement to take effect is 60 seconds. The conditional advertisement can take
effect sooner, depending on when the tracked route is removed from the BGP table
and when the next instance of the BGP scanner occurs.
Sample Configuration on DUT
---------------------------
Router2# show running-config
Building configuration...
Current configuration:
!
frr version 7.6-dev-MyOwnFRRVersion
frr defaults traditional
hostname router
log file /var/log/frr/bgpd.log
log syslog informational
hostname Router2
service integrated-vtysh-config
!
debug bgp updates in
debug bgp updates out
!
debug route-map
!
ip route 200.200.0.0/16 blackhole
ipv6 route 2001:db8::200/128 blackhole
!
interface enp0s9
ip address 10.10.10.2/24
!
interface enp0s10
ip address 10.10.20.2/24
!
interface lo
ip address 2.2.2.2/24
ipv6 address 2001:db8::2/128
!
router bgp 2
bgp log-neighbor-changes
no bgp ebgp-requires-policy
neighbor 10.10.10.1 remote-as 1
neighbor 10.10.20.3 remote-as 3
!
address-family ipv4 unicast
network 2.2.2.0/24
network 200.200.0.0/16
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE non-exist-map CONDITION
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
address-family ipv6 unicast
network 2001:db8::2/128
network 2001:db8::200/128
neighbor 10.10.10.1 activate
neighbor 10.10.10.1 soft-reconfiguration inbound
neighbor 10.10.10.1 advertise-map ADVERTISE_6 non-exist-map CONDITION_6
neighbor 10.10.20.3 activate
neighbor 10.10.20.3 soft-reconfiguration inbound
exit-address-family
!
access-list CONDITION seq 5 permit 3.3.3.0/24
access-list ADVERTISE seq 5 permit 2.2.2.0/24
access-list ADVERTISE seq 6 permit 200.200.0.0/16
access-list ADVERTISE seq 7 permit 20.20.0.0/16
!
ipv6 access-list ADVERTISE_6 seq 5 permit 2001:db8::2/128
ipv6 access-list CONDITION_6 seq 5 permit 2001:db8::3/128
!
route-map ADVERTISE permit 10
match ip address ADVERTISE
!
route-map CONDITION permit 10
match ip address CONDITION
!
route-map ADVERTISE_6 permit 10
match ipv6 address ADVERTISE_6
!
route-map CONDITION_6 permit 10
match ipv6 address CONDITION_6
!
line vty
!
end
Router2#
Withdraw when non-exist-map prefixes present in BGP table:
----------------------------------------------------------
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 3.3.3.0/24 10.10.20.3 0 0 3 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 4 routes and 4 total paths
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::3/128 fe80::a00:27ff:fe76:6738 0 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 4 routes and 4 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
BGP neighbor is 10.10.10.1, remote AS 1, local AS 2, external link
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Withdraw
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Withdraw
1 accepted prefixes
!--- Output suppressed.
Router2#
Here 2.2.2.0/24 & 200.200.0.0/16 (prefixes in advertise-map) are withdrawn
by conditional advertisement scanner as the prefix(3.3.3.0/24) specified
by non-exist-map is present in BGP table.
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 3.3.3.0/24 0.0.0.0 0 3 i
Total number of prefixes 2
For address family: IPv6 Unicast
BGP table version is 8, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::3/128 :: 0 3 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Advertise when non-exist-map prefixes not present in BGP table:
---------------------------------------------------------------
After Removing 3.3.3.0/24 (prefix present in non-exist-map),
2.2.2.0/24 & 200.200.0.0/16 (prefixes present in advertise-map) are advertised
Router2# show ip bgp all wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 10.10.10.1 0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Displayed 3 routes and 3 total paths
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 fe80::a00:27ff:fecb:ad57 0 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Displayed 3 routes and 3 total paths
Router2#
Router2# show ip bgp neighbors 10.10.10.1
!--- Output suppressed.
For address family: IPv4 Unicast
Update group 9, subgroup 5
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION, Advertise-map *ADVERTISE, status: Advertise
1 accepted prefixes
For address family: IPv6 Unicast
Update group 10, subgroup 6
Packet Queue length 0
Inbound soft reconfiguration allowed
Community attribute sent to this neighbor(all)
Condition NON_EXIST, Condition-map *CONDITION_6, Advertise-map *ADVERTISE_6, status: Advertise
1 accepted prefixes
!--- Output suppressed.
Router2#
Router2# show ip bgp all neighbors 10.10.10.1 advertised-routes wide
For address family: IPv4 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 1.1.1.0/24 0.0.0.0 0 1 i
*> 2.2.2.0/24 0.0.0.0 0 32768 i
*> 200.200.0.0/16 0.0.0.0 0 32768 i
Total number of prefixes 3
For address family: IPv6 Unicast
BGP table version is 9, local router ID is 2.2.2.2, vrf id 0
Default local pref 100, local AS 2
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2001:db8::1/128 :: 0 1 i
*> 2001:db8::2/128 :: 0 32768 i
*> 2001:db8::200/128 :: 0 32768 i
Total number of prefixes 3
Router2#
Signed-off-by: Madhuri Kuruganti <k.madhuri@samsung.com>
2020-09-29 11:46:04 +02:00
|
|
|
const struct prefix *p, struct attr *attr,
|
|
|
|
bool skip_rmap_check);
|
2015-05-20 03:03:47 +02:00
|
|
|
|
2016-01-12 19:41:57 +01:00
|
|
|
extern void bgp_peer_clear_node_queue_drain_immediate(struct peer *peer);
|
|
|
|
extern void bgp_process_queues_drain_immediate(void);
|
|
|
|
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
/* for encap/vpn */
|
2020-03-27 00:11:58 +01:00
|
|
|
extern struct bgp_dest *bgp_afi_node_lookup(struct bgp_table *table, afi_t afi,
|
2020-03-22 19:50:46 +01:00
|
|
|
safi_t safi, const struct prefix *p,
|
2017-05-15 23:34:04 +02:00
|
|
|
struct prefix_rd *prd);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_path_info_restore(struct bgp_dest *dest,
|
2018-10-03 00:15:34 +02:00
|
|
|
struct bgp_path_info *path);
|
|
|
|
|
|
|
|
extern int bgp_path_info_cmp_compatible(struct bgp *bgp,
|
|
|
|
struct bgp_path_info *new,
|
|
|
|
struct bgp_path_info *exist,
|
2019-05-16 03:05:37 +02:00
|
|
|
char *pfx_buf, afi_t afi, safi_t safi,
|
|
|
|
enum bgp_path_selection_reason *reason);
|
2017-08-25 20:27:49 +02:00
|
|
|
extern void bgp_attr_add_gshut_community(struct attr *attr);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
|
2017-05-15 23:34:04 +02:00
|
|
|
struct bgp_maxpaths_cfg *mpath_cfg,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_pair *result, afi_t afi,
|
2017-05-15 23:34:04 +02:00
|
|
|
safi_t safi);
|
2020-03-27 00:11:58 +01:00
|
|
|
extern void bgp_zebra_clear_route_change_flags(struct bgp_dest *dest);
|
|
|
|
extern bool bgp_zebra_has_route_changed(struct bgp_path_info *selected);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:53:31 +02:00
|
|
|
extern void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest,
|
2017-05-15 23:53:31 +02:00
|
|
|
struct prefix_rd *prd, afi_t afi,
|
|
|
|
safi_t safi, json_object *json);
|
|
|
|
extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *bn,
|
|
|
|
struct bgp_path_info *path, afi_t afi,
|
|
|
|
safi_t safi, json_object *json_paths);
|
2017-09-29 00:51:31 +02:00
|
|
|
extern int bgp_show_table_rd(struct vty *vty, struct bgp *bgp, safi_t safi,
|
|
|
|
struct bgp_table *table, struct prefix_rd *prd,
|
|
|
|
enum bgp_show_type type, void *output_arg,
|
2018-08-29 14:19:54 +02:00
|
|
|
bool use_json);
|
2019-10-23 20:14:51 +02:00
|
|
|
extern int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi);
|
bgpd: Force self-next-hop check in next-hop update.
Problem Description:
=====================
+--+ +--+
|R1|-(192.201.202.1)----iBGP----(192.201.202.2)-|R2|
+--+ +--+
Routes on R2:
=============
S>* 202.202.202.202/32 [1/0] via 192.201.78.1, ens256, 00:40:48
Where, the next-hop network, 192.201.78.0/24, is a directly connected network address.
C>* 192.201.78.0/24 is directly connected, ens256, 00:40:48
Configurations on R1:
=====================
!
router bgp 201
bgp router-id 192.168.0.1
neighbor 192.201.202.2 remote-as 201
!
Configurations on R2:
=====================
!
ip route 202.202.202.202/32 192.201.78.1
!
router bgp 201
bgp router-id 192.168.0.2
neighbor 192.201.202.1 remote-as 201
!
address-family ipv4 unicast
redistribute static
exit-address-family
!
Step-1:
=======
R1 receives the route 202.202.202.202/32 from R2.
R1 installs the route in its BGP RIB.
Step-2:
=======
On R1, a connected interface address is added.
The address is the same as the next-hop of the BGP route received from R2 (192.201.78.1).
Point of Failure:
=================
R1 resolves the BGP route even though the route's next-hop is its own connected address.
Even though this appears to be a misconfiguration it would still be better to safeguard the code against it.
Fix:
====
When BGP receives a connected route from Zebra, it processes the
routes for the next-hop update.
While doing so, BGP must ignore routes whose next-hop address matches
the address of the connected route for which Zebra sent the next-hop update
message.
Signed-off-by: NaveenThanikachalam <nthanikachal@vmware.com>
2020-04-09 09:27:54 +02:00
|
|
|
extern bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi,
|
|
|
|
uint8_t type, uint8_t stype,
|
2020-03-27 00:11:58 +01:00
|
|
|
struct attr *attr, struct bgp_dest *dest);
|
2020-03-28 17:51:14 +01:00
|
|
|
extern int bgp_evpn_path_info_cmp(struct bgp *bgp, struct bgp_path_info *new,
|
|
|
|
struct bgp_path_info *exist, int *paths_eq);
|
2020-10-22 02:22:04 +02:00
|
|
|
extern void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate,
|
|
|
|
struct bgp *bgp,
|
|
|
|
const struct prefix *p, afi_t afi,
|
|
|
|
safi_t safi, bool suppress);
|
2020-08-19 06:21:16 +02:00
|
|
|
extern int bgp_static_set(struct bgp *bgp, const char *negate,
|
|
|
|
struct prefix *pfx, afi_t afi, safi_t safi,
|
|
|
|
const char *rmap, int backdoor, uint32_t label_index,
|
|
|
|
char *errmsg, size_t errmsg_len);
|
|
|
|
|
|
|
|
extern int bgp_aggregate_set(struct bgp *bgp, struct prefix *prefix, afi_t afi,
|
|
|
|
safi_t safi, const char *rmap,
|
|
|
|
uint8_t summary_only, uint8_t as_set,
|
|
|
|
uint8_t origin, bool match_med,
|
|
|
|
const char *suppress_map, char *errmsg,
|
|
|
|
size_t errmsg_len);
|
|
|
|
|
|
|
|
extern int bgp_aggregate_unset(struct bgp *bgp, struct prefix *prefix,
|
|
|
|
afi_t afi, safi_t safi, char *errmsg,
|
|
|
|
size_t errmsg_len);
|
|
|
|
|
|
|
|
extern void bgp_announce_routes_distance_update(struct bgp *bgp,
|
|
|
|
afi_t update_afi,
|
|
|
|
safi_t update_safi);
|
|
|
|
|
|
|
|
extern int bgp_distance_set(uint8_t distance, const char *ip_str,
|
|
|
|
const char *access_list_str, afi_t afi, safi_t safi,
|
|
|
|
char *errmsg, size_t errmsg_len);
|
|
|
|
|
|
|
|
extern int bgp_distance_unset(uint8_t distance, const char *ip_str,
|
|
|
|
const char *access_list_str, afi_t afi,
|
|
|
|
safi_t safi, char *errmsg, size_t errmsg_len);
|
2020-10-29 20:41:12 +01:00
|
|
|
extern void subgroup_announce_reset_nhop(uint8_t family, struct attr *attr);
|
2005-05-23 16:19:54 +02:00
|
|
|
#endif /* _QUAGGA_BGP_ROUTE_H */
|