2002-12-13 21:15:29 +01:00
|
|
|
/* zebra client
|
2017-05-13 10:25:29 +02:00
|
|
|
* Copyright (C) 1997, 98, 99 Kunihiro Ishiguro
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "command.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "sockunion.h"
|
|
|
|
#include "zclient.h"
|
|
|
|
#include "routemap.h"
|
|
|
|
#include "thread.h"
|
2015-05-20 03:03:47 +02:00
|
|
|
#include "queue.h"
|
2015-08-26 16:44:57 +02:00
|
|
|
#include "memory.h"
|
2015-08-12 15:59:18 +02:00
|
|
|
#include "lib/json.h"
|
2016-06-21 12:39:58 +02:00
|
|
|
#include "lib/bfd.h"
|
2016-01-07 16:03:01 +01:00
|
|
|
#include "filter.h"
|
2017-03-09 15:54:20 +01:00
|
|
|
#include "mpls.h"
|
2017-05-15 23:34:04 +02:00
|
|
|
#include "vxlan.h"
|
2018-11-29 15:14:41 +01:00
|
|
|
#include "pbr.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
#include "bgpd/bgpd.h"
|
|
|
|
#include "bgpd/bgp_route.h"
|
|
|
|
#include "bgpd/bgp_attr.h"
|
2020-12-05 21:34:59 +01:00
|
|
|
#include "bgpd/bgp_aspath.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "bgpd/bgp_nexthop.h"
|
|
|
|
#include "bgpd/bgp_zebra.h"
|
|
|
|
#include "bgpd/bgp_fsm.h"
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
#include "bgpd/bgp_debug.h"
|
2018-06-15 23:08:53 +02:00
|
|
|
#include "bgpd/bgp_errors.h"
|
2011-07-21 05:47:07 +02:00
|
|
|
#include "bgpd/bgp_mpath.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "bgpd/bgp_nexthop.h"
|
2015-05-20 03:12:17 +02:00
|
|
|
#include "bgpd/bgp_nht.h"
|
2016-09-08 19:03:30 +02:00
|
|
|
#include "bgpd/bgp_bfd.h"
|
2017-03-09 15:54:20 +01:00
|
|
|
#include "bgpd/bgp_label.h"
|
2020-04-01 21:05:26 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
2016-09-29 00:03:43 +02:00
|
|
|
#include "bgpd/rfapi/rfapi_backend.h"
|
|
|
|
#include "bgpd/rfapi/vnc_export_bgp.h"
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
#endif
|
2017-05-15 23:34:04 +02:00
|
|
|
#include "bgpd/bgp_evpn.h"
|
2018-03-09 21:52:55 +01:00
|
|
|
#include "bgpd/bgp_mplsvpn.h"
|
2018-04-07 20:13:07 +02:00
|
|
|
#include "bgpd/bgp_labelpool.h"
|
2018-03-08 15:39:19 +01:00
|
|
|
#include "bgpd/bgp_pbr.h"
|
2018-11-01 16:28:08 +01:00
|
|
|
#include "bgpd/bgp_evpn_private.h"
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
#include "bgpd/bgp_evpn_mh.h"
|
2018-10-12 16:08:28 +02:00
|
|
|
#include "bgpd/bgp_mac.h"
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* All information about zebra. */
|
[bgpd] Stability fixes including bugs 397, 492
I've spent the last several weeks working on stability fixes to bgpd.
These patches fix all of the numerous crashes, assertion failures, memory
leaks and memory stomping I could find. Valgrind was used extensively.
Added new function bgp_exit() to help catch problems. If "debug bgp" is
configured and bgpd exits with status of 0, statistics on remaining
lib/memory.c allocations are printed to stderr. It is my hope that other
developers will use this to stay on top of memory issues.
Example questionable exit:
bgpd: memstats: Current memory utilization in module LIB:
bgpd: memstats: Link List : 6
bgpd: memstats: Link Node : 5
bgpd: memstats: Hash : 8
bgpd: memstats: Hash Bucket : 2
bgpd: memstats: Hash Index : 8
bgpd: memstats: Work queue : 3
bgpd: memstats: Work queue item : 2
bgpd: memstats: Work queue name string : 3
bgpd: memstats: Current memory utilization in module BGP:
bgpd: memstats: BGP instance : 1
bgpd: memstats: BGP peer : 1
bgpd: memstats: BGP peer hostname : 1
bgpd: memstats: BGP attribute : 1
bgpd: memstats: BGP extra attributes : 1
bgpd: memstats: BGP aspath : 1
bgpd: memstats: BGP aspath str : 1
bgpd: memstats: BGP table : 24
bgpd: memstats: BGP node : 1
bgpd: memstats: BGP route : 1
bgpd: memstats: BGP synchronise : 8
bgpd: memstats: BGP Process queue : 1
bgpd: memstats: BGP node clear queue : 1
bgpd: memstats: NOTE: If configuration exists, utilization may be expected.
Example clean exit:
bgpd: memstats: No remaining tracked memory utilization.
This patch fixes bug #397: "Invalid free in bgp_announce_check()".
This patch fixes bug #492: "SIGBUS in bgpd/bgp_route.c:
bgp_clear_route_node()".
My apologies for not separating out these changes into individual patches.
The complexity of doing so boggled what is left of my brain. I hope this
is all still useful to the community.
This code has been production tested, in non-route-server-client mode, on
a linux 32-bit box and a 64-bit box.
Release/reset functions, used by bgp_exit(), added to:
bgpd/bgp_attr.c,h
bgpd/bgp_community.c,h
bgpd/bgp_dump.c,h
bgpd/bgp_ecommunity.c,h
bgpd/bgp_filter.c,h
bgpd/bgp_nexthop.c,h
bgpd/bgp_route.c,h
lib/routemap.c,h
File by file analysis:
* bgpd/bgp_aspath.c: Prevent re-use of ashash after it is released.
* bgpd/bgp_attr.c: #if removed uncalled cluster_dup().
* bgpd/bgp_clist.c,h: Allow community_list_terminate() to be called from
bgp_exit().
* bgpd/bgp_filter.c: Fix aslist->name use without allocation check, and
also fix memory leak.
* bgpd/bgp_main.c: Created bgp_exit() exit routine. This function frees
allocations made as part of bgpd initialization and, to some extent,
configuration. If "debug bgp" is configured, memory stats are printed
as described above.
* bgpd/bgp_nexthop.c: zclient_new() already allocates stream for
ibuf/obuf, so bgp_scan_init() shouldn't do it too. Also, made it so
zlookup is global so bgp_exit() can use it.
* bgpd/bgp_packet.c: bgp_capability_msg_parse() call to bgp_clear_route()
adjusted to use new BGP_CLEAR_ROUTE_NORMAL flag.
* bgpd/bgp_route.h: Correct reference counter "lock" to be signed.
bgp_clear_route() now accepts a bgp_clear_route_type of either
BGP_CLEAR_ROUTE_NORMAL or BGP_CLEAR_ROUTE_MY_RSCLIENT.
* bgpd/bgp_route.c:
- bgp_process_rsclient(): attr was being zero'ed and then
bgp_attr_extra_free() was being called with it, even though it was
never filled with valid data.
- bgp_process_rsclient(): Make sure rsclient->group is not NULL before
use.
- bgp_processq_del(): Add call to bgp_table_unlock().
- bgp_process(): Add call to bgp_table_lock().
- bgp_update_rsclient(): memset clearing of new_attr not needed since
declarationw with "= { 0 }" does it. memset was already commented
out.
- bgp_update_rsclient(): Fix screwed up misleading indentation.
- bgp_withdraw_rsclient(): Fix screwed up misleading indentation.
- bgp_clear_route_node(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_node_queue_del(): Add call to bgp_table_unlock() and also
free struct bgp_clear_node_queue used for work item.
- bgp_clear_node_complete(): Do peer_unlock() after BGP_EVENT_ADD() in
case peer is released by peer_unlock() call.
- bgp_clear_route_table(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT. Use
struct bgp_clear_node_queue to supply data to worker. Add call to
bgp_table_lock().
- bgp_clear_route(): Add support for BGP_CLEAR_ROUTE_NORMAL or
BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_route_all(): Use BGP_CLEAR_ROUTE_NORMAL.
Bug 397 fixes:
- bgp_default_originate()
- bgp_announce_table()
* bgpd/bgp_table.h:
- struct bgp_table: Added reference count. Changed type of owner to be
"struct peer *" rather than "void *".
- struct bgp_node: Correct reference counter "lock" to be signed.
* bgpd/bgp_table.c:
- Added bgp_table reference counting.
- bgp_table_free(): Fixed cleanup code. Call peer_unlock() on owner if
set.
- bgp_unlock_node(): Added assertion.
- bgp_node_get(): Added call to bgp_lock_node() to code path that it was
missing from.
* bgpd/bgp_vty.c:
- peer_rsclient_set_vty(): Call peer_lock() as part of peer assignment
to owner. Handle failure gracefully.
- peer_rsclient_unset_vty(): Add call to bgp_clear_route() with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
* bgpd/bgp_zebra.c: Made it so zclient is global so bgp_exit() can use it.
* bgpd/bgpd.c:
- peer_lock(): Allow to be called when status is "Deleted".
- peer_deactivate(): Supply BGP_CLEAR_ROUTE_NORMAL purpose to
bgp_clear_route() call.
- peer_delete(): Common variable listnode pn. Fix bug in which rsclient
was only dealt with if not part of a peer group. Call
bgp_clear_route() for rsclient, if appropriate, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- peer_group_get(): Use XSTRDUP() instead of strdup() for conf->host.
- peer_group_bind(): Call bgp_clear_route() for rsclient, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- bgp_create(): Use XSTRDUP() instead of strdup() for peer_self->host.
- bgp_delete(): Delete peers before groups, rather than after. And then
rather than deleting rsclients, verify that there are none at this
point.
- bgp_unlock(): Add assertion.
- bgp_free(): Call bgp_table_finish() rather than doing XFREE() itself.
* lib/command.c,h: Compiler warning fixes. Add cmd_terminate(). Fixed
massive leak in install_element() in which cmd_make_descvec() was being
called more than once for the same cmd->strvec/string/doc.
* lib/log.c: Make closezlog() check fp before calling fclose().
* lib/memory.c: Catch when alloc count goes negative by using signed
counts. Correct #endif comment. Add log_memstats_stderr().
* lib/memory.h: Add log_memstats_stderr().
* lib/thread.c: thread->funcname was being accessed in thread_call() after
it had been freed. Rearranged things so that thread_call() frees
funcname. Also made it so thread_master_free() cleans up cpu_record.
* lib/vty.c,h: Use global command_cr. Add vty_terminate().
* lib/zclient.c,h: Re-enable zclient_free().
2009-07-18 07:44:03 +02:00
|
|
|
struct zclient *zclient = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-09-28 17:35:35 +02:00
|
|
|
/* hook to indicate vrf status change for SNMP */
|
|
|
|
DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp),
|
|
|
|
(bgp, ifp))
|
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Can we install into zebra? */
|
2020-03-20 10:57:54 +01:00
|
|
|
static inline bool bgp_install_info_to_zebra(struct bgp *bgp)
|
2016-02-12 21:18:28 +01:00
|
|
|
{
|
|
|
|
if (zclient->sock <= 0)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2016-02-12 21:18:28 +01:00
|
|
|
|
2018-10-05 15:43:28 +02:00
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: No zebra instance to talk to, not installing information",
|
|
|
|
__func__);
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2018-10-05 15:43:28 +02:00
|
|
|
}
|
2016-02-12 21:18:28 +01:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2016-02-12 21:18:28 +01:00
|
|
|
}
|
|
|
|
|
2016-10-07 15:44:42 +02:00
|
|
|
int zclient_num_connects;
|
|
|
|
|
2004-10-03 20:18:34 +02:00
|
|
|
/* Router-id update message from zebra. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_router_id_update(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2004-10-03 20:18:34 +02:00
|
|
|
struct prefix router_id;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2004-10-03 20:18:34 +02:00
|
|
|
zebra_router_id_update_read(zclient->ibuf, &router_id);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Rx Router Id update VRF %u Id %pFX", vrf_id,
|
|
|
|
&router_id);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2016-07-28 17:23:34 +02:00
|
|
|
bgp_router_id_zebra_bump(vrf_id, &router_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Nexthop update message from zebra. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_read_nexthop_update(ZAPI_CALLBACK_ARGS)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2019-05-03 21:42:59 +02:00
|
|
|
bgp_parse_nexthop_update(cmd, vrf_id);
|
2015-05-20 03:04:20 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_read_import_check_update(ZAPI_CALLBACK_ARGS)
|
2015-05-20 03:04:20 +02:00
|
|
|
{
|
2019-05-03 21:42:59 +02:00
|
|
|
bgp_parse_nexthop_update(cmd, vrf_id);
|
2015-05-20 02:40:34 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
/* Set or clear interface on which unnumbered neighbor is configured. This
|
|
|
|
* would in turn cause BGP to initiate or turn off IPv6 RAs on this
|
|
|
|
* interface.
|
|
|
|
*/
|
|
|
|
static void bgp_update_interface_nbrs(struct bgp *bgp, struct interface *ifp,
|
|
|
|
struct interface *upd_ifp)
|
|
|
|
{
|
|
|
|
struct listnode *node, *nnode;
|
|
|
|
struct peer *peer;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
|
|
|
|
if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0)) {
|
|
|
|
if (upd_ifp) {
|
2016-05-03 05:52:00 +02:00
|
|
|
peer->ifp = upd_ifp;
|
|
|
|
bgp_zebra_initiate_radv(bgp, peer);
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
} else {
|
2016-05-03 05:52:00 +02:00
|
|
|
bgp_zebra_terminate_radv(bgp, peer);
|
|
|
|
peer->ifp = upd_ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-05-03 05:52:00 +02:00
|
|
|
}
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-09 15:54:20 +01:00
|
|
|
static int bgp_read_fec_update(int command, struct zclient *zclient,
|
|
|
|
zebra_size_t length)
|
|
|
|
{
|
|
|
|
bgp_parse_fec_update();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
static void bgp_start_interface_nbrs(struct bgp *bgp, struct interface *ifp)
|
2015-05-20 02:40:40 +02:00
|
|
|
{
|
2016-02-02 13:36:20 +01:00
|
|
|
struct listnode *node, *nnode;
|
2015-05-20 02:40:40 +02:00
|
|
|
struct peer *peer;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
|
|
|
|
if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0)
|
|
|
|
&& peer->status != Established) {
|
|
|
|
if (peer_active(peer))
|
|
|
|
BGP_EVENT_ADD(peer, BGP_Stop);
|
|
|
|
BGP_EVENT_ADD(peer, BGP_Start);
|
2015-05-20 02:40:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
static void bgp_nbr_connected_add(struct bgp *bgp, struct nbr_connected *ifc)
|
2015-06-12 16:59:09 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct connected *connected;
|
|
|
|
struct interface *ifp;
|
|
|
|
struct prefix *p;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:09 +02:00
|
|
|
/* Kick-off the FSM for any relevant peers only if there is a
|
|
|
|
* valid local address on the interface.
|
|
|
|
*/
|
|
|
|
ifp = ifc->ifp;
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, connected)) {
|
|
|
|
p = connected->address;
|
|
|
|
if (p->family == AF_INET6
|
|
|
|
&& IN6_IS_ADDR_LINKLOCAL(&p->u.prefix6))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!connected)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_start_interface_nbrs(bgp, ifp);
|
2015-06-12 16:59:09 +02:00
|
|
|
}
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
static void bgp_nbr_connected_delete(struct bgp *bgp, struct nbr_connected *ifc,
|
|
|
|
int del)
|
2015-05-20 02:40:40 +02:00
|
|
|
{
|
2016-02-02 13:36:20 +01:00
|
|
|
struct listnode *node, *nnode;
|
2015-05-20 02:40:40 +02:00
|
|
|
struct peer *peer;
|
2015-06-12 16:59:09 +02:00
|
|
|
struct interface *ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
|
|
|
|
if (peer->conf_if
|
|
|
|
&& (strcmp(peer->conf_if, ifc->ifp->name) == 0)) {
|
2016-04-23 00:15:25 +02:00
|
|
|
peer->last_reset = PEER_DOWN_NBR_ADDR_DEL;
|
2016-02-02 13:36:20 +01:00
|
|
|
BGP_EVENT_ADD(peer, BGP_Stop);
|
2015-05-20 02:40:40 +02:00
|
|
|
}
|
|
|
|
}
|
2015-06-12 16:59:09 +02:00
|
|
|
/* Free neighbor also, if we're asked to. */
|
|
|
|
if (del) {
|
|
|
|
ifp = ifc->ifp;
|
|
|
|
listnode_delete(ifp->nbr_connected, ifc);
|
|
|
|
nbr_connected_free(ifc);
|
|
|
|
}
|
2015-05-20 02:40:40 +02:00
|
|
|
}
|
|
|
|
|
2019-09-19 15:40:57 +02:00
|
|
|
static int bgp_ifp_destroy(struct interface *ifp)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
struct bgp *bgp;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-09-19 15:40:57 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(ifp->vrf_id);
|
2016-03-02 08:40:14 +01:00
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2019-10-03 14:57:55 +02:00
|
|
|
zlog_debug("Rx Intf del VRF %u IF %s", ifp->vrf_id, ifp->name);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2020-09-28 17:35:35 +02:00
|
|
|
if (bgp) {
|
2018-09-25 21:25:51 +02:00
|
|
|
bgp_update_interface_nbrs(bgp, ifp, NULL);
|
2020-09-28 17:35:35 +02:00
|
|
|
hook_call(bgp_vrf_status_changed, bgp, ifp);
|
|
|
|
}
|
2017-07-26 15:57:35 +02:00
|
|
|
|
2018-10-12 16:08:28 +02:00
|
|
|
bgp_mac_del_mac_entry(ifp);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-19 05:07:44 +02:00
|
|
|
static int bgp_ifp_up(struct interface *ifp)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *c;
|
2015-05-20 02:40:40 +02:00
|
|
|
struct nbr_connected *nc;
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
struct listnode *node, *nnode;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
|
|
|
|
2019-09-19 05:07:44 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(ifp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-10-12 16:08:28 +02:00
|
|
|
bgp_mac_add_mac_entry(ifp);
|
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2019-09-19 05:07:44 +02:00
|
|
|
zlog_debug("Rx Intf up VRF %u IF %s", ifp->vrf_id, ifp->name);
|
2016-02-12 21:18:28 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, c))
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_connected_add(bgp, c);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 02:40:40 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_nbr_connected_add(bgp, nc);
|
2015-05-20 02:40:40 +02:00
|
|
|
|
2020-09-28 17:35:35 +02:00
|
|
|
hook_call(bgp_vrf_status_changed, bgp, ifp);
|
2020-12-17 15:46:30 +01:00
|
|
|
bgp_nht_ifp_up(ifp);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-19 05:55:34 +02:00
|
|
|
static int bgp_ifp_down(struct interface *ifp)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *c;
|
2015-05-20 02:40:40 +02:00
|
|
|
struct nbr_connected *nc;
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
struct listnode *node, *nnode;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
2018-09-13 14:59:46 +02:00
|
|
|
struct peer *peer;
|
2016-02-02 13:36:20 +01:00
|
|
|
|
2019-09-19 05:55:34 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(ifp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2018-10-12 16:08:28 +02:00
|
|
|
bgp_mac_del_mac_entry(ifp);
|
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2019-09-19 05:55:34 +02:00
|
|
|
zlog_debug("Rx Intf down VRF %u IF %s", ifp->vrf_id, ifp->name);
|
2016-02-12 21:18:28 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, c))
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_connected_delete(bgp, c);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2015-05-20 02:40:40 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_nbr_connected_delete(bgp, nc, 1);
|
2015-05-20 02:40:40 +02:00
|
|
|
|
2013-09-11 05:33:55 +02:00
|
|
|
/* Fast external-failover */
|
2018-09-13 14:59:46 +02:00
|
|
|
if (!CHECK_FLAG(bgp->flags, BGP_FLAG_NO_FAST_EXT_FAILOVER)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
|
2016-09-08 19:03:30 +02:00
|
|
|
#if defined(HAVE_CUMULUS)
|
|
|
|
/* Take down directly connected EBGP peers as well as
|
|
|
|
* 1-hop BFD
|
|
|
|
* tracked (directly connected) IBGP peers.
|
|
|
|
*/
|
2019-11-27 09:48:17 +01:00
|
|
|
if ((peer->ttl != BGP_DEFAULT_TTL)
|
2020-02-10 15:17:40 +01:00
|
|
|
&& (peer->gtsm_hops != BGP_GTSM_HOPS_CONNECTED)
|
2016-09-08 19:03:30 +02:00
|
|
|
&& (!peer->bfd_info
|
|
|
|
|| bgp_bfd_is_peer_multihop(peer)))
|
|
|
|
#else
|
|
|
|
/* Take down directly connected EBGP peers */
|
2019-11-27 09:48:17 +01:00
|
|
|
if ((peer->ttl != BGP_DEFAULT_TTL)
|
2020-02-10 15:17:40 +01:00
|
|
|
&& (peer->gtsm_hops != BGP_GTSM_HOPS_CONNECTED))
|
2016-09-08 19:03:30 +02:00
|
|
|
#endif
|
2016-02-12 21:18:28 +01:00
|
|
|
continue;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
if (ifp == peer->nexthop.ifp) {
|
2016-04-23 00:15:25 +02:00
|
|
|
BGP_EVENT_ADD(peer, BGP_Stop);
|
|
|
|
peer->last_reset = PEER_DOWN_IF_DOWN;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-28 17:35:35 +02:00
|
|
|
hook_call(bgp_vrf_status_changed, bgp, ifp);
|
2020-12-17 15:46:30 +01:00
|
|
|
bgp_nht_ifp_down(ifp);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_interface_address_add(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *ifc;
|
2017-10-09 17:22:52 +02:00
|
|
|
struct bgp *bgp;
|
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
ifc = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (ifc == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (bgp_debug_zebra(ifc->address))
|
|
|
|
zlog_debug("Rx Intf address add VRF %u IF %s addr %pFX", vrf_id,
|
|
|
|
ifc->ifp->name, ifc->address);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
2002-12-13 22:03:13 +01:00
|
|
|
if (if_is_operative(ifc->ifp)) {
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_connected_add(bgp, ifc);
|
2017-10-09 17:22:52 +02:00
|
|
|
|
2015-06-12 16:59:09 +02:00
|
|
|
/* If we have learnt of any neighbors on this interface,
|
|
|
|
* check to kick off any BGP interface-based neighbors,
|
|
|
|
* but only if this is a link-local address.
|
|
|
|
*/
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&ifc->address->u.prefix6)
|
|
|
|
&& !list_isempty(ifc->ifp->nbr_connected))
|
2016-02-02 13:36:20 +01:00
|
|
|
bgp_start_interface_nbrs(bgp, ifc->ifp);
|
2015-06-12 16:59:09 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_interface_address_delete(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
struct connected *ifc;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
|
|
|
|
2017-10-09 17:22:52 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
ifc = zebra_interface_address_read(cmd, zclient->ibuf, vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
if (ifc == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (bgp_debug_zebra(ifc->address))
|
|
|
|
zlog_debug("Rx Intf address del VRF %u IF %s addr %pFX", vrf_id,
|
|
|
|
ifc->ifp->name, ifc->address);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (bgp && if_is_operative(ifc->ifp)) {
|
2017-10-09 17:22:52 +02:00
|
|
|
bgp_connected_delete(bgp, ifc);
|
2016-02-12 21:18:28 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-10-30 01:16:28 +01:00
|
|
|
connected_free(&ifc);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_interface_nbr_address_add(ZAPI_CALLBACK_ARGS)
|
2015-05-20 02:40:40 +02:00
|
|
|
{
|
|
|
|
struct nbr_connected *ifc = NULL;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
ifc = zebra_interface_nbr_address_read(cmd, zclient->ibuf, vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:40 +02:00
|
|
|
if (ifc == NULL)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (bgp_debug_zebra(ifc->address))
|
|
|
|
zlog_debug("Rx Intf neighbor add VRF %u IF %s addr %pFX",
|
|
|
|
vrf_id, ifc->ifp->name, ifc->address);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:40 +02:00
|
|
|
if (if_is_operative(ifc->ifp)) {
|
2016-02-12 21:18:28 +01:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (bgp)
|
|
|
|
bgp_nbr_connected_add(bgp, ifc);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:40 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_interface_nbr_address_delete(ZAPI_CALLBACK_ARGS)
|
2015-05-20 02:40:40 +02:00
|
|
|
{
|
|
|
|
struct nbr_connected *ifc = NULL;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
ifc = zebra_interface_nbr_address_read(cmd, zclient->ibuf, vrf_id);
|
2015-05-20 02:40:40 +02:00
|
|
|
|
|
|
|
if (ifc == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (bgp_debug_zebra(ifc->address))
|
|
|
|
zlog_debug("Rx Intf neighbor del VRF %u IF %s addr %pFX",
|
|
|
|
vrf_id, ifc->ifp->name, ifc->address);
|
2015-05-20 02:40:40 +02:00
|
|
|
|
|
|
|
if (if_is_operative(ifc->ifp)) {
|
2016-02-12 21:18:28 +01:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (bgp)
|
|
|
|
bgp_nbr_connected_delete(bgp, ifc, 0);
|
|
|
|
}
|
2015-05-20 02:40:40 +02:00
|
|
|
|
|
|
|
nbr_connected_free(ifc);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-25 20:39:25 +01:00
|
|
|
/* VRF update for an interface. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_interface_vrf_update(ZAPI_CALLBACK_ARGS)
|
2016-02-25 20:39:25 +01:00
|
|
|
{
|
|
|
|
struct interface *ifp;
|
|
|
|
vrf_id_t new_vrf_id;
|
|
|
|
struct connected *c;
|
|
|
|
struct nbr_connected *nc;
|
|
|
|
struct listnode *node, *nnode;
|
|
|
|
struct bgp *bgp;
|
2018-09-13 14:59:46 +02:00
|
|
|
struct peer *peer;
|
2016-02-25 20:39:25 +01:00
|
|
|
|
|
|
|
ifp = zebra_interface_vrf_update_read(zclient->ibuf, vrf_id,
|
|
|
|
&new_vrf_id);
|
|
|
|
if (!ifp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA) && ifp)
|
|
|
|
zlog_debug("Rx Intf VRF change VRF %u IF %s NewVRF %u", vrf_id,
|
|
|
|
ifp->name, new_vrf_id);
|
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (bgp) {
|
|
|
|
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, c))
|
|
|
|
bgp_connected_delete(bgp, c);
|
2016-02-25 20:39:25 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
|
|
|
|
bgp_nbr_connected_delete(bgp, nc, 1);
|
2016-02-25 20:39:25 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
/* Fast external-failover */
|
|
|
|
if (!CHECK_FLAG(bgp->flags, BGP_FLAG_NO_FAST_EXT_FAILOVER)) {
|
|
|
|
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
|
2019-11-27 09:48:17 +01:00
|
|
|
if ((peer->ttl != BGP_DEFAULT_TTL)
|
2020-02-10 15:17:40 +01:00
|
|
|
&& (peer->gtsm_hops
|
|
|
|
!= BGP_GTSM_HOPS_CONNECTED))
|
2018-09-25 21:25:51 +02:00
|
|
|
continue;
|
2016-02-25 20:39:25 +01:00
|
|
|
|
2018-09-25 21:25:51 +02:00
|
|
|
if (ifp == peer->nexthop.ifp)
|
|
|
|
BGP_EVENT_ADD(peer, BGP_Stop);
|
|
|
|
}
|
2016-02-25 20:39:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-24 01:46:39 +02:00
|
|
|
if_update_to_new_vrf(ifp, new_vrf_id);
|
2016-02-25 20:39:25 +01:00
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(new_vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, c))
|
|
|
|
bgp_connected_add(bgp, c);
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc))
|
|
|
|
bgp_nbr_connected_add(bgp, nc);
|
2020-09-28 17:35:35 +02:00
|
|
|
|
|
|
|
hook_call(bgp_vrf_status_changed, bgp, ifp);
|
2016-02-25 20:39:25 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Zebra route add and delete treatment. */
|
2019-05-03 21:42:59 +02:00
|
|
|
static int zebra_read_route(ZAPI_CALLBACK_ARGS)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-11-15 19:22:56 +01:00
|
|
|
enum nexthop_types_t nhtype;
|
2017-08-21 03:10:50 +02:00
|
|
|
struct zapi_route api;
|
|
|
|
union g_addr nexthop;
|
2017-11-15 19:22:56 +01:00
|
|
|
ifindex_t ifindex;
|
2017-08-21 03:10:50 +02:00
|
|
|
int add, i;
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
if (zapi_route_decode(zclient->ibuf, &api) < 0)
|
|
|
|
return -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
/* we completely ignore srcdest routes for now. */
|
|
|
|
if (CHECK_FLAG(api.message, ZAPI_MESSAGE_SRCPFX))
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
/* ignore link-local address. */
|
|
|
|
if (api.prefix.family == AF_INET6
|
|
|
|
&& IN6_IS_ADDR_LINKLOCAL(&api.prefix.u.prefix6))
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
nexthop = api.nexthops[0].gate;
|
|
|
|
ifindex = api.nexthops[0].ifindex;
|
2017-11-15 19:22:56 +01:00
|
|
|
nhtype = api.nexthops[0].type;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
add = (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD);
|
2017-08-21 03:10:50 +02:00
|
|
|
if (add) {
|
2017-07-17 14:03:14 +02:00
|
|
|
/*
|
2016-02-02 13:36:20 +01:00
|
|
|
* The ADD message is actually an UPDATE and there is no
|
|
|
|
* explicit DEL
|
|
|
|
* for a prior redistributed route, if any. So, perform an
|
2015-10-29 18:30:45 +01:00
|
|
|
* implicit
|
|
|
|
* DEL processing for the same redistributed route from any
|
2017-07-17 14:03:14 +02:00
|
|
|
* other
|
2015-10-29 18:30:45 +01:00
|
|
|
* source type.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-10-29 18:30:45 +01:00
|
|
|
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
|
2016-02-02 13:36:20 +01:00
|
|
|
if (i != api.type)
|
2017-08-21 03:10:50 +02:00
|
|
|
bgp_redistribute_delete(bgp, &api.prefix, i,
|
2016-02-02 13:36:20 +01:00
|
|
|
api.instance);
|
2015-10-29 18:30:45 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-10-29 18:30:45 +01:00
|
|
|
/* Now perform the add/update. */
|
2017-08-21 03:10:50 +02:00
|
|
|
bgp_redistribute_add(bgp, &api.prefix, &nexthop, ifindex,
|
2018-02-09 19:22:50 +01:00
|
|
|
nhtype, api.metric, api.type, api.instance,
|
|
|
|
api.tag);
|
2016-02-12 21:18:28 +01:00
|
|
|
} else {
|
2017-08-21 03:10:50 +02:00
|
|
|
bgp_redistribute_delete(bgp, &api.prefix, api.type,
|
|
|
|
api.instance);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 03:10:50 +02:00
|
|
|
if (bgp_debug_zebra(&api.prefix)) {
|
2020-10-18 13:33:54 +02:00
|
|
|
char buf[PREFIX_STRLEN];
|
2017-08-21 03:10:50 +02:00
|
|
|
|
2018-04-05 19:42:27 +02:00
|
|
|
if (add) {
|
2020-10-18 13:33:54 +02:00
|
|
|
inet_ntop(api.prefix.family, &nexthop, buf,
|
|
|
|
sizeof(buf));
|
2018-04-05 19:42:27 +02:00
|
|
|
zlog_debug(
|
2020-12-02 01:02:36 +01:00
|
|
|
"Rx route ADD VRF %u %s[%d] %pFX nexthop %s (type %d if %u) metric %u distance %u tag %" ROUTE_TAG_PRI,
|
2018-04-05 19:42:27 +02:00
|
|
|
vrf_id, zebra_route_string(api.type),
|
2020-10-18 13:33:54 +02:00
|
|
|
api.instance, &api.prefix, buf, nhtype, ifindex,
|
2020-12-02 01:02:36 +01:00
|
|
|
api.metric, api.distance, api.tag);
|
2018-04-05 19:42:27 +02:00
|
|
|
} else {
|
2020-11-14 23:40:53 +01:00
|
|
|
zlog_debug("Rx route DEL VRF %u %s[%d] %pFX", vrf_id,
|
2020-10-18 13:33:54 +02:00
|
|
|
zebra_route_string(api.type), api.instance,
|
2020-11-14 23:40:53 +01:00
|
|
|
&api.prefix);
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
struct interface *if_lookup_by_ipv4(struct in_addr *addr, vrf_id_t vrf_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-10-03 03:06:01 +02:00
|
|
|
struct vrf *vrf;
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct interface *ifp;
|
|
|
|
struct connected *connected;
|
|
|
|
struct prefix_ipv4 p;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-03 03:06:01 +02:00
|
|
|
vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
if (!vrf)
|
|
|
|
return NULL;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
p.family = AF_INET;
|
|
|
|
p.prefix = *addr;
|
|
|
|
p.prefixlen = IPV4_MAX_BITLEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp) {
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET)
|
|
|
|
if (prefix_match(cp, (struct prefix *)&p))
|
|
|
|
return ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
struct interface *if_lookup_by_ipv4_exact(struct in_addr *addr, vrf_id_t vrf_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-10-03 03:06:01 +02:00
|
|
|
struct vrf *vrf;
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct interface *ifp;
|
|
|
|
struct connected *connected;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-03 03:06:01 +02:00
|
|
|
vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
if (!vrf)
|
|
|
|
return NULL;
|
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp) {
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET)
|
|
|
|
if (IPV4_ADDR_SAME(&cp->u.prefix4, addr))
|
|
|
|
return ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-01-18 11:12:10 +01:00
|
|
|
struct interface *if_lookup_by_ipv6(struct in6_addr *addr, ifindex_t ifindex,
|
|
|
|
vrf_id_t vrf_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-10-03 03:06:01 +02:00
|
|
|
struct vrf *vrf;
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct interface *ifp;
|
|
|
|
struct connected *connected;
|
|
|
|
struct prefix_ipv6 p;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-03 03:06:01 +02:00
|
|
|
vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
if (!vrf)
|
|
|
|
return NULL;
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
p.family = AF_INET6;
|
|
|
|
p.prefix = *addr;
|
|
|
|
p.prefixlen = IPV6_MAX_BITLEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp) {
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET6)
|
|
|
|
if (prefix_match(cp, (struct prefix *)&p)) {
|
2016-08-04 15:07:29 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(
|
|
|
|
&cp->u.prefix6)) {
|
2015-05-20 03:04:08 +02:00
|
|
|
if (ifindex == ifp->ifindex)
|
|
|
|
return ifp;
|
|
|
|
} else
|
|
|
|
return ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:04:08 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-01-18 11:12:10 +01:00
|
|
|
struct interface *if_lookup_by_ipv6_exact(struct in6_addr *addr,
|
|
|
|
ifindex_t ifindex, vrf_id_t vrf_id)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-10-03 03:06:01 +02:00
|
|
|
struct vrf *vrf;
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct interface *ifp;
|
|
|
|
struct connected *connected;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-10-03 03:06:01 +02:00
|
|
|
vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
if (!vrf)
|
|
|
|
return NULL;
|
|
|
|
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp) {
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET6)
|
|
|
|
if (IPV6_ADDR_SAME(&cp->u.prefix6, addr)) {
|
2015-05-20 03:04:08 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(
|
|
|
|
&cp->u.prefix6)) {
|
|
|
|
if (ifindex == ifp->ifindex)
|
|
|
|
return ifp;
|
|
|
|
} else
|
|
|
|
return ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:04:08 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int if_get_ipv6_global(struct interface *ifp, struct in6_addr *addr)
|
|
|
|
{
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct connected *connected;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET6)
|
|
|
|
if (!IN6_IS_ADDR_LINKLOCAL(&cp->u.prefix6)) {
|
|
|
|
memcpy(addr, &cp->u.prefix6, IPV6_MAX_BYTELEN);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int if_get_ipv6_local(struct interface *ifp, struct in6_addr *addr)
|
|
|
|
{
|
2004-09-23 21:18:23 +02:00
|
|
|
struct listnode *cnode;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct connected *connected;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-04-07 Paul Jakma <paul.jakma@sun.com>
* (global): Fix up list loops to match changes in lib/linklist,
and some basic auditing of usage.
* configure.ac: define QUAGGA_NO_DEPRECATED_INTERFACES
* HACKING: Add notes about deprecating interfaces and commands.
* lib/linklist.h: Add usage comments.
Rename getdata macro to listgetdata.
Rename nextnode to listnextnode and fix its odd behaviour to be
less dangerous.
Make listgetdata macro assert node is not null, NULL list entries
should be bug condition.
ALL_LIST_ELEMENTS, new macro, forward-referencing macro for use
with for loop, Suggested by Jim Carlson of Sun.
Add ALL_LIST_ELEMENTS_RO for cases which obviously do not need the
"safety" of previous macro.
LISTNODE_ADD and DELETE macros renamed to ATTACH, DETACH, to
distinguish from the similarly named functions, and reflect their
effect better.
Add a QUAGGA_NO_DEPRECATED_INTERFACES define guarded section
with the old defines which were modified above,
for backwards compatibility - guarded to prevent Quagga using it..
* lib/linklist.c: fix up for linklist.h changes.
* ospf6d/ospf6_abr.c: (ospf6_abr_examin_brouter) change to a single
scan of the area list, rather than scanning all areas first for
INTER_ROUTER and then again for INTER_NETWORK. According to
16.2, the scan should be area specific anyway, and further
ospf6d does not seem to implement 16.3 anyway.
2005-04-07 09:30:20 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
2002-12-13 21:15:29 +01:00
|
|
|
cp = connected->address;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (cp->family == AF_INET6)
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&cp->u.prefix6)) {
|
|
|
|
memcpy(addr, &cp->u.prefix6, IPV6_MAX_BYTELEN);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-12 19:30:13 +01:00
|
|
|
static int if_get_ipv4_address(struct interface *ifp, struct in_addr *addr)
|
|
|
|
{
|
|
|
|
struct listnode *cnode;
|
|
|
|
struct connected *connected;
|
|
|
|
struct prefix *cp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2014-01-12 19:30:13 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, connected)) {
|
|
|
|
cp = connected->address;
|
|
|
|
if ((cp->family == AF_INET)
|
|
|
|
&& !ipv4_martian(&(cp->u.prefix4))) {
|
|
|
|
*addr = cp->u.prefix4;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-06 16:51:08 +02:00
|
|
|
|
|
|
|
bool bgp_zebra_nexthop_set(union sockunion *local, union sockunion *remote,
|
|
|
|
struct bgp_nexthop *nexthop, struct peer *peer)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct interface *ifp = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
memset(nexthop, 0, sizeof(struct bgp_nexthop));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (!local)
|
2018-09-06 16:51:08 +02:00
|
|
|
return false;
|
2002-12-13 21:15:29 +01:00
|
|
|
if (!remote)
|
2018-09-06 16:51:08 +02:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (local->sa.sa_family == AF_INET) {
|
|
|
|
nexthop->v4 = local->sin.sin_addr;
|
2015-05-20 03:03:54 +02:00
|
|
|
if (peer->update_if)
|
2017-03-11 13:27:15 +01:00
|
|
|
ifp = if_lookup_by_name(peer->update_if,
|
2019-06-24 01:46:39 +02:00
|
|
|
peer->bgp->vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
else
|
2016-02-02 13:36:20 +01:00
|
|
|
ifp = if_lookup_by_ipv4_exact(&local->sin.sin_addr,
|
2017-03-11 13:27:15 +01:00
|
|
|
peer->bgp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-03-11 13:27:15 +01:00
|
|
|
if (local->sa.sa_family == AF_INET6) {
|
2019-03-27 11:41:57 +01:00
|
|
|
memcpy(&nexthop->v6_global, &local->sin6.sin6_addr, IPV6_MAX_BYTELEN);
|
2015-05-20 03:04:08 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&local->sin6.sin6_addr)) {
|
2016-02-02 13:36:20 +01:00
|
|
|
if (peer->conf_if || peer->ifname)
|
2002-12-13 21:15:29 +01:00
|
|
|
ifp = if_lookup_by_name(peer->conf_if
|
|
|
|
? peer->conf_if
|
2015-11-15 16:17:47 +01:00
|
|
|
: peer->ifname,
|
2019-06-24 01:46:39 +02:00
|
|
|
peer->bgp->vrf_id);
|
2016-02-02 13:36:20 +01:00
|
|
|
} else if (peer->update_if)
|
|
|
|
ifp = if_lookup_by_name(peer->update_if,
|
2019-06-24 01:46:39 +02:00
|
|
|
peer->bgp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
else
|
2015-05-20 03:04:08 +02:00
|
|
|
ifp = if_lookup_by_ipv6_exact(&local->sin6.sin6_addr,
|
2016-02-02 13:36:20 +01:00
|
|
|
local->sin6.sin6_scope_id,
|
|
|
|
peer->bgp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-06 16:51:08 +02:00
|
|
|
if (!ifp) {
|
|
|
|
/*
|
|
|
|
* BGP views do not currently get proper data
|
|
|
|
* from zebra( when attached ) to be able to
|
|
|
|
* properly resolve nexthops, so give this
|
|
|
|
* instance type a pass.
|
|
|
|
*/
|
|
|
|
if (peer->bgp->inst_type == BGP_INSTANCE_TYPE_VIEW)
|
|
|
|
return true;
|
|
|
|
/*
|
|
|
|
* If we have no interface data but we have established
|
|
|
|
* some connection w/ zebra than something has gone
|
|
|
|
* terribly terribly wrong here, so say this failed
|
|
|
|
* If we do not any zebra connection then not
|
|
|
|
* having a ifp pointer is ok.
|
|
|
|
*/
|
|
|
|
return zclient_num_connects ? false : true;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->ifp = ifp;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* IPv4 connection, fetch and store IPv6 local address(es) if any. */
|
|
|
|
if (local->sa.sa_family == AF_INET) {
|
|
|
|
/* IPv6 nexthop*/
|
|
|
|
ret = if_get_ipv6_global(ifp, &nexthop->v6_global);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
if (!ret) {
|
|
|
|
/* There is no global nexthop. Use link-local address as
|
2015-06-12 16:59:08 +02:00
|
|
|
* both the
|
|
|
|
* global and link-local nexthop. In this scenario, the
|
|
|
|
* expectation
|
|
|
|
* for interop is that the network admin would use a
|
|
|
|
* route-map to
|
|
|
|
* specify the global IPv6 nexthop.
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
if_get_ipv6_local(ifp, &nexthop->v6_global);
|
|
|
|
memcpy(&nexthop->v6_local, &nexthop->v6_global,
|
|
|
|
IPV6_MAX_BYTELEN);
|
2015-06-12 16:59:08 +02:00
|
|
|
} else
|
|
|
|
if_get_ipv6_local(ifp, &nexthop->v6_local);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:08 +02:00
|
|
|
if (if_lookup_by_ipv4(&remote->sin.sin_addr, peer->bgp->vrf_id))
|
|
|
|
peer->shared_network = 1;
|
|
|
|
else
|
2015-11-15 16:17:47 +01:00
|
|
|
peer->shared_network = 0;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2015-11-15 16:17:47 +01:00
|
|
|
/* IPv6 connection, fetch and store IPv4 local address if any. */
|
|
|
|
if (local->sa.sa_family == AF_INET6) {
|
2002-12-13 21:15:29 +01:00
|
|
|
struct interface *direct = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2014-01-12 19:30:13 +01:00
|
|
|
/* IPv4 nexthop. */
|
|
|
|
ret = if_get_ipv4_address(ifp, &nexthop->v4);
|
2020-02-06 07:49:02 +01:00
|
|
|
if (!ret && peer->local_id.s_addr != INADDR_ANY)
|
2002-12-13 21:15:29 +01:00
|
|
|
nexthop->v4 = peer->local_id;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Global address*/
|
|
|
|
if (!IN6_IS_ADDR_LINKLOCAL(&local->sin6.sin6_addr)) {
|
|
|
|
memcpy(&nexthop->v6_global, &local->sin6.sin6_addr,
|
2015-11-15 16:17:47 +01:00
|
|
|
IPV6_MAX_BYTELEN);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* If directory connected set link-local address. */
|
2016-08-03 15:49:09 +02:00
|
|
|
direct = if_lookup_by_ipv6(&remote->sin6.sin6_addr,
|
2016-02-02 13:36:20 +01:00
|
|
|
remote->sin6.sin6_scope_id,
|
2017-03-11 13:27:15 +01:00
|
|
|
peer->bgp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
if (direct)
|
|
|
|
if_get_ipv6_local(ifp, &nexthop->v6_local);
|
2017-07-17 14:03:14 +02:00
|
|
|
} else
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Link-local address. */
|
2017-07-17 14:03:14 +02:00
|
|
|
{
|
2002-12-13 21:15:29 +01:00
|
|
|
ret = if_get_ipv6_global(ifp, &nexthop->v6_global);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-11-15 16:17:47 +01:00
|
|
|
/* If there is no global address. Set link-local
|
|
|
|
address as
|
2002-12-13 21:15:29 +01:00
|
|
|
global. I know this break RFC specification... */
|
2015-11-15 16:17:47 +01:00
|
|
|
/* In this scenario, the expectation for interop is that
|
2017-07-17 14:03:14 +02:00
|
|
|
* the
|
2015-11-15 16:17:47 +01:00
|
|
|
* network admin would use a route-map to specify the
|
2017-07-17 14:03:14 +02:00
|
|
|
* global
|
2015-11-15 16:17:47 +01:00
|
|
|
* IPv6 nexthop.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
if (!ret)
|
2015-11-15 16:17:47 +01:00
|
|
|
memcpy(&nexthop->v6_global,
|
2002-12-13 21:15:29 +01:00
|
|
|
&local->sin6.sin6_addr,
|
2015-11-15 16:17:47 +01:00
|
|
|
IPV6_MAX_BYTELEN);
|
2015-06-12 16:59:08 +02:00
|
|
|
/* Always set the link-local address */
|
2002-12-13 21:15:29 +01:00
|
|
|
memcpy(&nexthop->v6_local, &local->sin6.sin6_addr,
|
2015-11-15 16:17:47 +01:00
|
|
|
IPV6_MAX_BYTELEN);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-11-15 16:17:47 +01:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&local->sin6.sin6_addr)
|
2016-02-02 13:36:20 +01:00
|
|
|
|| if_lookup_by_ipv6(&remote->sin6.sin6_addr,
|
|
|
|
remote->sin6.sin6_scope_id,
|
|
|
|
peer->bgp->vrf_id))
|
2015-11-15 16:17:47 +01:00
|
|
|
peer->shared_network = 1;
|
|
|
|
else
|
|
|
|
peer->shared_network = 0;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* KAME stack specific treatment. */
|
|
|
|
#ifdef KAME
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&nexthop->v6_global)
|
|
|
|
&& IN6_LINKLOCAL_IFINDEX(nexthop->v6_global)) {
|
|
|
|
SET_IN6_LINKLOCAL_IFINDEX(nexthop->v6_global, 0);
|
|
|
|
}
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(&nexthop->v6_local)
|
|
|
|
&& IN6_LINKLOCAL_IFINDEX(nexthop->v6_local)) {
|
|
|
|
SET_IN6_LINKLOCAL_IFINDEX(nexthop->v6_local, 0);
|
|
|
|
}
|
|
|
|
#endif /* KAME */
|
2015-08-29 20:40:32 +02:00
|
|
|
|
|
|
|
/* If we have identified the local interface, there is no error for now.
|
|
|
|
*/
|
2018-09-06 16:51:08 +02:00
|
|
|
return true;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-10-03 00:15:34 +02:00
|
|
|
static struct in6_addr *
|
2018-10-03 02:43:07 +02:00
|
|
|
bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex)
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct in6_addr *nexthop = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
/* Only global address nexthop exists. */
|
2019-02-26 22:22:27 +01:00
|
|
|
if (path->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL
|
|
|
|
|| path->attr->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV6_GLOBAL) {
|
2018-10-03 02:43:07 +02:00
|
|
|
nexthop = &path->attr->mp_nexthop_global;
|
2018-04-05 19:42:27 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(nexthop))
|
2018-10-03 02:43:07 +02:00
|
|
|
*ifindex = path->attr->nh_ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
/* If both global and link-local address present. */
|
2019-02-26 22:22:27 +01:00
|
|
|
if (path->attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
|
|
|
|
|| path->attr->mp_nexthop_len
|
|
|
|
== BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL) {
|
2016-08-03 15:49:09 +02:00
|
|
|
/* Check if route-map is set to prefer global over link-local */
|
2018-10-03 02:43:07 +02:00
|
|
|
if (path->attr->mp_nexthop_prefer_global) {
|
|
|
|
nexthop = &path->attr->mp_nexthop_global;
|
2018-04-05 19:42:27 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(nexthop))
|
2018-10-03 02:43:07 +02:00
|
|
|
*ifindex = path->attr->nh_ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
} else {
|
2016-08-03 15:49:09 +02:00
|
|
|
/* Workaround for Cisco's nexthop bug. */
|
2017-06-06 19:20:38 +02:00
|
|
|
if (IN6_IS_ADDR_UNSPECIFIED(
|
2018-10-03 02:43:07 +02:00
|
|
|
&path->attr->mp_nexthop_global)
|
bgpd: Check for peer->su_remote if not NULL when handling IPv6 nexthop
```
(gdb) bt
0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
1 0x00007fe57ca4a42a in __GI_abort () at abort.c:89
2 0x00007fe57ddd1935 in core_handler (signo=6, siginfo=0x7ffc81067570, context=<optimized out>) at lib/sigevent.c:255
3 <signal handler called>
4 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
5 0x00007fe57ca4a42a in __GI_abort () at abort.c:89
6 0x00007fe57ddd1935 in core_handler (signo=11, siginfo=0x7ffc81067e30, context=<optimized out>) at lib/sigevent.c:255
7 <signal handler called>
8 0x000055a7b25b923f in bgp_path_info_to_ipv6_nexthop (ifindex=ifindex@entry=0x7ffc810683c0, path=<optimized out>, path=<optimized out>) at bgpd/bgp_zebra.c:909
9 0x000055a7b25bb2e5 in bgp_zebra_announce (dest=dest@entry=0x55a7b5239c10, p=p@entry=0x55a7b5239c10, info=info@entry=0x55a7b5239cd0, bgp=bgp@entry=0x55a7b518b090, afi=afi@entry=AFI_IP6, safi=safi@entry=SAFI_UNICAST) at bgpd/bgp_zebra.c:1358
10 0x000055a7b256af6a in bgp_process_main_one (bgp=0x55a7b518b090, dest=0x55a7b5239c10, afi=AFI_IP6, safi=SAFI_UNICAST) at bgpd/bgp_route.c:2918
11 0x000055a7b256b0ee in bgp_process_wq (wq=<optimized out>, data=0x55a7b5221800) at bgpd/bgp_route.c:3027
12 0x00007fe57ddea2e0 in work_queue_run (thread=0x7ffc8106cd60) at lib/workqueue.c:291
13 0x00007fe57dde0781 in thread_call (thread=thread@entry=0x7ffc8106cd60) at lib/thread.c:1684
14 0x00007fe57dda84b8 in frr_run (master=0x55a7b48aaf00) at lib/libfrr.c:1126
15 0x000055a7b250a7da in main (argc=<optimized out>, argv=<optimized out>) at bgpd/bgp_main.c:540
(gdb)
```
This crashes with configs like:
```
router bgp 65534
no bgp ebgp-requires-policy
no bgp network import-check
!
address-family ipv6 unicast
import vrf donatas <<<<<< Crashes when entering this command
exit-address-family
!
router bgp 65534 vrf donatas
no bgp ebgp-requires-policy
no bgp network import-check
neighbor fe80::c15a:ddab:1689:db86 remote-as 65025
neighbor fe80::c15a:ddab:1689:db86 interface eth2
neighbor fe80::c15a:ddab:1689:db86 update-source eth2
neighbor fe80::c15a:ddab:1689:db86 capability extended-nexthop
!
address-family ipv6 unicast
network 2a02:face::/32 <<<<<< Crashes due to static networks
neighbor fe80::c15a:ddab:1689:db86 activate
exit-address-family
!
```
Locally configured routes do not have peer->su_remote.
```
exit1-debian-9# show bgp ipv6 unicast
BGP table version is 3, local router ID is 192.168.100.1, vrf id 0
Default local pref 100, local AS 65534
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
Network Next Hop Metric LocPrf Weight Path
*> 2a02:abc::/64 fe80::c15a:ddab:1689:db86@5<
0 65025 i
2a02:face::/32 ::@5< 0 32768 i
Displayed 2 routes and 2 total paths
exit1-debian-9#
```
Signed-off-by: Donatas Abraitis <donatas.abraitis@gmail.com>
2021-02-14 16:49:19 +01:00
|
|
|
&& path->peer->su_remote
|
2018-10-03 02:43:07 +02:00
|
|
|
&& path->peer->su_remote->sa.sa_family
|
2018-04-12 14:59:08 +02:00
|
|
|
== AF_INET6) {
|
2016-08-03 15:49:09 +02:00
|
|
|
nexthop =
|
2018-10-03 02:43:07 +02:00
|
|
|
&path->peer->su_remote->sin6.sin6_addr;
|
2018-04-05 19:42:27 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(nexthop))
|
2018-10-03 02:43:07 +02:00
|
|
|
*ifindex = path->peer->nexthop.ifp
|
2018-04-12 14:59:08 +02:00
|
|
|
->ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
} else {
|
2018-10-03 02:43:07 +02:00
|
|
|
nexthop = &path->attr->mp_nexthop_local;
|
2018-04-05 19:42:27 +02:00
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(nexthop))
|
2018-10-03 02:43:07 +02:00
|
|
|
*ifindex = path->attr->nh_lla_ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
2016-08-03 15:49:09 +02:00
|
|
|
}
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
return nexthop;
|
|
|
|
}
|
|
|
|
|
2020-03-23 00:54:38 +01:00
|
|
|
static bool bgp_table_map_apply(struct route_map *map, const struct prefix *p,
|
2020-03-20 10:57:54 +01:00
|
|
|
struct bgp_path_info *path)
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2017-09-07 15:19:06 +02:00
|
|
|
route_map_result_t ret;
|
|
|
|
|
2020-11-14 01:35:20 +01:00
|
|
|
ret = route_map_apply(map, p, path);
|
2018-10-03 02:43:07 +02:00
|
|
|
bgp_attr_flush(path->attr);
|
2017-09-07 15:19:06 +02:00
|
|
|
|
|
|
|
if (ret != RMAP_DENYMATCH)
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (bgp_debug_zebra(p)) {
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
if (p->family == AF_INET) {
|
|
|
|
zlog_debug(
|
2021-03-10 01:50:42 +01:00
|
|
|
"Zebra rmap deny: IPv4 route %pFX nexthop %pI4",
|
|
|
|
p, &path->attr->nexthop);
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
if (p->family == AF_INET6) {
|
|
|
|
char buf[2][INET6_ADDRSTRLEN];
|
2018-04-05 19:42:27 +02:00
|
|
|
ifindex_t ifindex;
|
|
|
|
struct in6_addr *nexthop;
|
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
nexthop = bgp_path_info_to_ipv6_nexthop(path, &ifindex);
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
zlog_debug(
|
2020-10-14 17:07:57 +02:00
|
|
|
"Zebra rmap deny: IPv6 route %pFX nexthop %s",
|
|
|
|
p,
|
bgpd: Print IPv4 nexthop for IPv6 prefixes under bgp_table_map_apply()
With latest kernels that's possible to have IPv6 routes with IPv4 nexthops.
We already handled this in bgp_zebra_announce():
nexthop = bgp_path_info_to_ipv6_nexthop(mpinfo_cp,
&ifindex);
if (!nexthop)
nh_updated = update_ipv4nh_for_route_install(
nh_othervrf,
nh_othervrf ? info->extra->bgp_orig
: bgp,
&mpinfo_cp->attr->nexthop,
mpinfo_cp->attr, is_evpn, api_nh);
else
nh_updated = update_ipv6nh_for_route_install(
nh_othervrf,
nh_othervrf ? info->extra->bgp_orig
: bgp,
nexthop, ifindex, mpinfo, info, is_evpn,
api_nh);
Signed-off-by: Donatas Abraitis <donatas.abraitis@gmail.com>
2021-02-11 11:06:54 +01:00
|
|
|
nexthop ? inet_ntop(AF_INET6, nexthop, buf[1],
|
|
|
|
sizeof(buf[1]))
|
|
|
|
: inet_ntop(AF_INET,
|
|
|
|
&path->attr->nexthop,
|
|
|
|
buf[1], sizeof(buf[1])));
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2018-03-05 18:09:57 +01:00
|
|
|
static struct thread *bgp_tm_thread_connect;
|
|
|
|
static bool bgp_tm_status_connected;
|
2018-04-03 15:06:50 +02:00
|
|
|
static bool bgp_tm_chunk_obtained;
|
|
|
|
#define BGP_FLOWSPEC_TABLE_CHUNK 100000
|
|
|
|
static uint32_t bgp_tm_min, bgp_tm_max, bgp_tm_chunk_size;
|
2018-05-23 14:14:53 +02:00
|
|
|
struct bgp *bgp_tm_bgp;
|
2018-03-05 18:09:57 +01:00
|
|
|
|
|
|
|
static int bgp_zebra_tm_connect(struct thread *t)
|
|
|
|
{
|
|
|
|
struct zclient *zclient;
|
|
|
|
int delay = 10, ret = 0;
|
|
|
|
|
|
|
|
zclient = THREAD_ARG(t);
|
|
|
|
if (bgp_tm_status_connected && zclient->sock > 0)
|
|
|
|
delay = 60;
|
|
|
|
else {
|
|
|
|
bgp_tm_status_connected = false;
|
|
|
|
ret = tm_table_manager_connect(zclient);
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
2018-08-16 02:32:36 +02:00
|
|
|
zlog_info("Error connecting to table manager!");
|
2018-03-05 18:09:57 +01:00
|
|
|
bgp_tm_status_connected = false;
|
|
|
|
} else {
|
|
|
|
if (!bgp_tm_status_connected)
|
|
|
|
zlog_debug("Connecting to table manager. Success");
|
|
|
|
bgp_tm_status_connected = true;
|
2018-04-03 15:06:50 +02:00
|
|
|
if (!bgp_tm_chunk_obtained) {
|
|
|
|
if (bgp_zebra_get_table_range(bgp_tm_chunk_size,
|
|
|
|
&bgp_tm_min,
|
2018-05-23 14:14:53 +02:00
|
|
|
&bgp_tm_max) >= 0) {
|
2018-04-03 15:06:50 +02:00
|
|
|
bgp_tm_chunk_obtained = true;
|
2018-05-23 14:14:53 +02:00
|
|
|
/* parse non installed entries */
|
|
|
|
bgp_zebra_announce_table(bgp_tm_bgp, AFI_IP, SAFI_FLOWSPEC);
|
|
|
|
}
|
2018-04-03 15:06:50 +02:00
|
|
|
}
|
2018-03-05 18:09:57 +01:00
|
|
|
}
|
|
|
|
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
|
|
|
&bgp_tm_thread_connect);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-23 14:14:53 +02:00
|
|
|
bool bgp_zebra_tm_chunk_obtained(void)
|
|
|
|
{
|
|
|
|
return bgp_tm_chunk_obtained;
|
|
|
|
}
|
|
|
|
|
2018-04-03 15:06:50 +02:00
|
|
|
uint32_t bgp_zebra_tm_get_id(void)
|
|
|
|
{
|
|
|
|
static int table_id;
|
|
|
|
|
|
|
|
if (!bgp_tm_chunk_obtained)
|
|
|
|
return ++table_id;
|
|
|
|
return bgp_tm_min++;
|
|
|
|
}
|
|
|
|
|
2018-05-23 14:14:53 +02:00
|
|
|
void bgp_zebra_init_tm_connect(struct bgp *bgp)
|
2018-03-05 18:09:57 +01:00
|
|
|
{
|
|
|
|
int delay = 1;
|
|
|
|
|
|
|
|
/* if already set, do nothing
|
|
|
|
*/
|
|
|
|
if (bgp_tm_thread_connect != NULL)
|
|
|
|
return;
|
|
|
|
bgp_tm_status_connected = false;
|
2018-04-03 15:06:50 +02:00
|
|
|
bgp_tm_chunk_obtained = false;
|
|
|
|
bgp_tm_min = bgp_tm_max = 0;
|
|
|
|
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
|
2018-05-23 14:14:53 +02:00
|
|
|
bgp_tm_bgp = bgp;
|
2018-03-05 18:09:57 +01:00
|
|
|
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
|
|
|
|
&bgp_tm_thread_connect);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bgp_zebra_get_table_range(uint32_t chunk_size,
|
|
|
|
uint32_t *start, uint32_t *end)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!bgp_tm_status_connected)
|
|
|
|
return -1;
|
|
|
|
ret = tm_get_table_chunk(zclient, chunk_size, start, end);
|
|
|
|
if (ret < 0) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_TABLE_CHUNK,
|
2018-09-13 21:38:57 +02:00
|
|
|
"BGP: Error getting table chunk %u", chunk_size);
|
2018-03-05 18:09:57 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
zlog_info("BGP: Table Manager returns range from chunk %u is [%u %u]",
|
|
|
|
chunk_size, *start, *end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool update_ipv4nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
|
|
|
|
struct in_addr *nexthop,
|
|
|
|
struct attr *attr, bool is_evpn,
|
|
|
|
struct zapi_nexthop *api_nh)
|
2018-04-05 19:42:27 +02:00
|
|
|
{
|
|
|
|
api_nh->gate.ipv4 = *nexthop;
|
2019-02-27 13:25:53 +01:00
|
|
|
api_nh->vrf_id = nh_bgp->vrf_id;
|
2018-04-05 19:42:27 +02:00
|
|
|
|
|
|
|
/* Need to set fields appropriately for EVPN routes imported into
|
|
|
|
* a VRF (which are programmed as onlink on l3-vni SVI) as well as
|
|
|
|
* connected routes leaked into a VRF.
|
|
|
|
*/
|
2019-02-27 13:25:53 +01:00
|
|
|
if (is_evpn) {
|
2018-04-05 19:42:27 +02:00
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
|
2019-11-13 22:06:06 +01:00
|
|
|
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
|
2019-02-27 13:25:53 +01:00
|
|
|
api_nh->ifindex = nh_bgp->l3vni_svi_ifindex;
|
|
|
|
} else if (nh_othervrf &&
|
2018-04-05 19:42:27 +02:00
|
|
|
api_nh->gate.ipv4.s_addr == INADDR_ANY) {
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
api_nh->ifindex = attr->nh_ifindex;
|
|
|
|
} else
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4;
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
static bool update_ipv6nh_for_route_install(int nh_othervrf, struct bgp *nh_bgp,
|
|
|
|
struct in6_addr *nexthop,
|
|
|
|
ifindex_t ifindex,
|
|
|
|
struct bgp_path_info *pi,
|
|
|
|
struct bgp_path_info *best_pi,
|
|
|
|
bool is_evpn,
|
|
|
|
struct zapi_nexthop *api_nh)
|
2018-04-05 19:42:27 +02:00
|
|
|
{
|
|
|
|
struct attr *attr;
|
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
attr = pi->attr;
|
2019-02-27 13:25:53 +01:00
|
|
|
api_nh->vrf_id = nh_bgp->vrf_id;
|
2018-04-05 19:42:27 +02:00
|
|
|
|
2019-02-27 13:25:53 +01:00
|
|
|
if (is_evpn) {
|
2018-04-05 19:42:27 +02:00
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
2019-11-13 22:06:06 +01:00
|
|
|
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
|
2019-02-27 13:25:53 +01:00
|
|
|
api_nh->ifindex = nh_bgp->l3vni_svi_ifindex;
|
|
|
|
} else if (nh_othervrf) {
|
2018-04-05 19:42:27 +02:00
|
|
|
if (IN6_IS_ADDR_UNSPECIFIED(nexthop)) {
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
api_nh->ifindex = attr->nh_ifindex;
|
|
|
|
} else if (IN6_IS_ADDR_LINKLOCAL(nexthop)) {
|
|
|
|
if (ifindex == 0)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2018-04-05 19:42:27 +02:00
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
api_nh->ifindex = ifindex;
|
|
|
|
} else {
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6;
|
|
|
|
api_nh->ifindex = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (IN6_IS_ADDR_LINKLOCAL(nexthop)) {
|
2018-10-03 02:43:07 +02:00
|
|
|
if (pi == best_pi
|
|
|
|
&& attr->mp_nexthop_len
|
|
|
|
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
|
|
|
|
if (pi->peer->nexthop.ifp)
|
|
|
|
ifindex =
|
|
|
|
pi->peer->nexthop.ifp->ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
if (!ifindex) {
|
2018-10-03 02:43:07 +02:00
|
|
|
if (pi->peer->conf_if)
|
|
|
|
ifindex = pi->peer->ifp->ifindex;
|
|
|
|
else if (pi->peer->ifname)
|
2018-04-05 19:42:27 +02:00
|
|
|
ifindex = ifname2ifindex(
|
2018-10-03 02:43:07 +02:00
|
|
|
pi->peer->ifname,
|
|
|
|
pi->peer->bgp->vrf_id);
|
|
|
|
else if (pi->peer->nexthop.ifp)
|
|
|
|
ifindex =
|
|
|
|
pi->peer->nexthop.ifp->ifindex;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ifindex == 0)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2018-04-05 19:42:27 +02:00
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
|
|
|
|
api_nh->ifindex = ifindex;
|
|
|
|
} else {
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6;
|
|
|
|
api_nh->ifindex = 0;
|
|
|
|
}
|
|
|
|
}
|
2020-01-03 19:03:18 +01:00
|
|
|
if (nexthop)
|
|
|
|
api_nh->gate.ipv6 = *nexthop;
|
2018-04-05 19:42:27 +02:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
|
|
|
|
2020-03-24 22:38:37 +01:00
|
|
|
static bool bgp_zebra_use_nhop_weighted(struct bgp *bgp, struct attr *attr,
|
|
|
|
uint64_t tot_bw, uint32_t *nh_weight)
|
2020-03-24 20:25:28 +01:00
|
|
|
{
|
2020-03-24 22:38:37 +01:00
|
|
|
uint32_t bw;
|
|
|
|
uint64_t tmp;
|
|
|
|
|
|
|
|
bw = attr->link_bw;
|
|
|
|
/* zero link-bandwidth and link-bandwidth not present are treated
|
|
|
|
* as the same situation.
|
|
|
|
*/
|
|
|
|
if (!bw) {
|
|
|
|
/* the only situations should be if we're either told
|
|
|
|
* to skip or use default weight.
|
|
|
|
*/
|
|
|
|
if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING)
|
|
|
|
return false;
|
|
|
|
*nh_weight = BGP_ZEBRA_DEFAULT_NHOP_WEIGHT;
|
|
|
|
} else {
|
|
|
|
tmp = (uint64_t)bw * 100;
|
|
|
|
*nh_weight = ((uint32_t)(tmp / tot_bw));
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2020-03-24 20:25:28 +01:00
|
|
|
}
|
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *info, struct bgp *bgp, afi_t afi,
|
2017-03-09 15:54:20 +01:00
|
|
|
safi_t safi)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2020-07-17 16:07:17 +02:00
|
|
|
struct zapi_route api = { 0 };
|
2017-08-21 02:19:25 +02:00
|
|
|
struct zapi_nexthop *api_nh;
|
2017-08-21 02:36:44 +02:00
|
|
|
int nh_family;
|
2017-09-21 14:49:31 +02:00
|
|
|
unsigned int valid_nh_count = 0;
|
2017-08-21 02:36:44 +02:00
|
|
|
int has_valid_label = 0;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t distance;
|
2002-12-13 21:15:29 +01:00
|
|
|
struct peer *peer;
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *mpinfo;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint32_t metric;
|
2017-09-07 15:19:06 +02:00
|
|
|
struct attr local_attr;
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info local_info;
|
|
|
|
struct bgp_path_info *mpinfo_cp = &local_info;
|
2017-06-06 19:20:38 +02:00
|
|
|
route_tag_t tag;
|
2015-05-20 03:03:58 +02:00
|
|
|
mpls_label_t label;
|
2018-03-09 21:52:55 +01:00
|
|
|
int nh_othervrf = 0;
|
2018-03-24 00:57:03 +01:00
|
|
|
char buf_prefix[PREFIX_STRLEN]; /* filled in if we are debugging */
|
2018-07-07 22:34:25 +02:00
|
|
|
bool is_evpn;
|
2020-12-04 14:01:31 +01:00
|
|
|
bool nh_updated = false;
|
2020-03-24 20:25:28 +01:00
|
|
|
bool do_wt_ecmp;
|
|
|
|
uint64_t cum_bw = 0;
|
2020-05-09 04:44:35 +02:00
|
|
|
uint32_t nhg_id = 0;
|
|
|
|
bool is_add;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:58 +02:00
|
|
|
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
|
|
|
|
* know of this instance.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-06-11 18:19:12 +02:00
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
2015-05-20 02:24:44 +02:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
if (bgp->main_zebra_update_hold)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-24 00:57:03 +01:00
|
|
|
if (bgp_debug_zebra(p))
|
2018-05-22 15:53:01 +02:00
|
|
|
prefix2str(p, buf_prefix, sizeof(buf_prefix));
|
2018-03-24 00:57:03 +01:00
|
|
|
|
2018-11-20 16:30:20 +01:00
|
|
|
if (safi == SAFI_FLOWSPEC) {
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_pbr_update_entry(bgp, bgp_dest_get_prefix(dest), info, afi,
|
|
|
|
safi, true);
|
2018-11-20 16:30:20 +01:00
|
|
|
return;
|
|
|
|
}
|
2018-01-26 18:36:24 +01:00
|
|
|
|
2018-03-09 21:52:55 +01:00
|
|
|
/*
|
|
|
|
* vrf leaking support (will have only one nexthop)
|
|
|
|
*/
|
|
|
|
if (info->extra && info->extra->bgp_orig)
|
|
|
|
nh_othervrf = 1;
|
|
|
|
|
2017-08-21 02:19:25 +02:00
|
|
|
/* Make Zebra API structure. */
|
|
|
|
api.vrf_id = bgp->vrf_id;
|
|
|
|
api.type = ZEBRA_ROUTE_BGP;
|
|
|
|
api.safi = safi;
|
|
|
|
api.prefix = *p;
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
peer = info->peer;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-09 21:52:55 +01:00
|
|
|
if (info->type == ZEBRA_ROUTE_BGP
|
|
|
|
&& info->sub_type == BGP_ROUTE_IMPORTED) {
|
|
|
|
|
2018-03-29 07:11:30 +02:00
|
|
|
/* Obtain peer from parent */
|
|
|
|
if (info->extra && info->extra->parent)
|
2018-10-02 22:41:30 +02:00
|
|
|
peer = ((struct bgp_path_info *)(info->extra->parent))
|
|
|
|
->peer;
|
2018-03-09 21:52:55 +01:00
|
|
|
}
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
tag = info->attr->tag;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-30 02:24:00 +02:00
|
|
|
/* If the route's source is EVPN, flag as such. */
|
2018-04-05 19:42:27 +02:00
|
|
|
is_evpn = is_route_parent_evpn(info);
|
|
|
|
if (is_evpn)
|
2017-11-07 10:52:23 +01:00
|
|
|
SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
|
2017-10-11 10:32:54 +02:00
|
|
|
|
2014-03-17 14:01:42 +01:00
|
|
|
if (peer->sort == BGP_PEER_IBGP || peer->sort == BGP_PEER_CONFED
|
|
|
|
|| info->sub_type == BGP_ROUTE_AGGREGATE) {
|
2017-08-21 02:19:25 +02:00
|
|
|
SET_FLAG(api.flags, ZEBRA_FLAG_IBGP);
|
2018-02-27 03:26:33 +01:00
|
|
|
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2019-11-27 09:48:17 +01:00
|
|
|
if ((peer->sort == BGP_PEER_EBGP && peer->ttl != BGP_DEFAULT_TTL)
|
2015-05-20 03:03:49 +02:00
|
|
|
|| CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK)
|
2020-02-06 15:37:20 +01:00
|
|
|
|| CHECK_FLAG(bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-02-27 03:26:33 +01:00
|
|
|
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-04-29 15:26:01 +02:00
|
|
|
if (info->attr->rmap_table_id) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TABLEID);
|
|
|
|
api.tableid = info->attr->rmap_table_id;
|
|
|
|
}
|
|
|
|
|
2020-01-28 12:59:57 +01:00
|
|
|
if (CHECK_FLAG(info->attr->flag, ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR)))
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_SRTE);
|
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
/* Metric is currently based on the best-path only */
|
|
|
|
metric = info->attr->med;
|
2020-03-24 20:25:28 +01:00
|
|
|
|
|
|
|
/* Determine if we're doing weighted ECMP or not */
|
2020-03-24 22:38:37 +01:00
|
|
|
do_wt_ecmp = bgp_path_info_mpath_chkwtd(bgp, info);
|
2020-03-24 20:25:28 +01:00
|
|
|
if (do_wt_ecmp)
|
|
|
|
cum_bw = bgp_path_info_mpath_cumbw(info);
|
|
|
|
|
2020-05-09 04:44:35 +02:00
|
|
|
/* EVPN MAC-IP routes are installed with a L3 NHG id */
|
2020-05-16 01:33:41 +02:00
|
|
|
if (bgp_evpn_path_es_use_nhg(bgp, info, &nhg_id)) {
|
2020-05-09 04:44:35 +02:00
|
|
|
mpinfo = NULL;
|
2020-05-16 01:33:41 +02:00
|
|
|
api.nhgid = nhg_id;
|
|
|
|
if (nhg_id)
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_NHG);
|
|
|
|
} else {
|
2020-05-09 04:44:35 +02:00
|
|
|
mpinfo = info;
|
2020-05-16 01:33:41 +02:00
|
|
|
}
|
2020-05-09 04:44:35 +02:00
|
|
|
|
|
|
|
for (; mpinfo; mpinfo = bgp_path_info_mpath_next(mpinfo)) {
|
2020-03-24 22:38:37 +01:00
|
|
|
uint32_t nh_weight;
|
|
|
|
|
2017-09-21 14:49:31 +02:00
|
|
|
if (valid_nh_count >= multipath_num)
|
|
|
|
break;
|
|
|
|
|
2017-09-07 15:19:06 +02:00
|
|
|
*mpinfo_cp = *mpinfo;
|
2020-03-24 22:38:37 +01:00
|
|
|
nh_weight = 0;
|
2017-09-07 15:19:06 +02:00
|
|
|
|
2017-09-19 14:33:40 +02:00
|
|
|
/* Get nexthop address-family */
|
|
|
|
if (p->family == AF_INET
|
|
|
|
&& !BGP_ATTR_NEXTHOP_AFI_IP6(mpinfo_cp->attr))
|
|
|
|
nh_family = AF_INET;
|
|
|
|
else if (p->family == AF_INET6
|
|
|
|
|| (p->family == AF_INET
|
|
|
|
&& BGP_ATTR_NEXTHOP_AFI_IP6(mpinfo_cp->attr)))
|
|
|
|
nh_family = AF_INET6;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
|
2020-03-24 22:38:37 +01:00
|
|
|
/* If processing for weighted ECMP, determine the next hop's
|
|
|
|
* weight. Based on user setting, we may skip the next hop
|
|
|
|
* in some situations.
|
|
|
|
*/
|
|
|
|
if (do_wt_ecmp) {
|
|
|
|
if (!bgp_zebra_use_nhop_weighted(bgp, mpinfo->attr,
|
|
|
|
cum_bw, &nh_weight))
|
|
|
|
continue;
|
|
|
|
}
|
2018-03-01 01:10:02 +01:00
|
|
|
api_nh = &api.nexthops[valid_nh_count];
|
2020-01-28 12:59:57 +01:00
|
|
|
|
|
|
|
if (CHECK_FLAG(info->attr->flag,
|
|
|
|
ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR)))
|
|
|
|
api_nh->srte_color = info->attr->srte_color;
|
|
|
|
|
2021-01-08 01:24:13 +01:00
|
|
|
if (bgp_debug_zebra(&api.prefix)) {
|
|
|
|
if (mpinfo->extra) {
|
|
|
|
zlog_debug("%s: p=%s, bgp_is_valid_label: %d",
|
|
|
|
__func__, buf_prefix,
|
|
|
|
bgp_is_valid_label(
|
|
|
|
&mpinfo->extra->label[0]));
|
|
|
|
} else {
|
|
|
|
zlog_debug("%s: p=%s, extra is NULL, no label",
|
|
|
|
__func__, buf_prefix);
|
2018-03-09 21:52:55 +01:00
|
|
|
}
|
2021-01-08 01:24:13 +01:00
|
|
|
}
|
2018-03-09 21:52:55 +01:00
|
|
|
|
2021-01-08 01:24:13 +01:00
|
|
|
if (bgp->table_map[afi][safi].name) {
|
|
|
|
/* Copy info and attributes, so the route-map
|
|
|
|
apply doesn't modify the BGP route info. */
|
|
|
|
local_attr = *mpinfo->attr;
|
|
|
|
mpinfo_cp->attr = &local_attr;
|
|
|
|
if (!bgp_table_map_apply(bgp->table_map[afi][safi].map,
|
|
|
|
p, mpinfo_cp))
|
|
|
|
continue;
|
2017-09-07 15:19:06 +02:00
|
|
|
|
2021-01-08 01:24:13 +01:00
|
|
|
/* metric/tag is only allowed to be
|
|
|
|
* overridden on 1st nexthop */
|
|
|
|
if (mpinfo == info) {
|
|
|
|
metric = mpinfo_cp->attr->med;
|
|
|
|
tag = mpinfo_cp->attr->tag;
|
2017-09-07 15:19:06 +02:00
|
|
|
}
|
2021-01-08 01:24:13 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-01-08 01:24:13 +01:00
|
|
|
if (nh_family == AF_INET) {
|
2018-04-05 19:42:27 +02:00
|
|
|
nh_updated = update_ipv4nh_for_route_install(
|
|
|
|
nh_othervrf,
|
2019-02-27 13:25:53 +01:00
|
|
|
nh_othervrf ?
|
|
|
|
info->extra->bgp_orig : bgp,
|
2018-04-05 19:42:27 +02:00
|
|
|
&mpinfo_cp->attr->nexthop,
|
|
|
|
mpinfo_cp->attr, is_evpn, api_nh);
|
2017-08-21 02:36:44 +02:00
|
|
|
} else {
|
2018-05-31 15:37:34 +02:00
|
|
|
ifindex_t ifindex = IFINDEX_INTERNAL;
|
2017-08-21 02:36:44 +02:00
|
|
|
struct in6_addr *nexthop;
|
2017-08-21 02:19:25 +02:00
|
|
|
|
2018-10-03 00:15:34 +02:00
|
|
|
nexthop = bgp_path_info_to_ipv6_nexthop(mpinfo_cp,
|
|
|
|
&ifindex);
|
bgpd: Handle IPv6 prefixes with IPv4 nexthops for zebra
Prevent from crashing as well here:
```
(gdb) bt
0 __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:51
1 0x00007ff54ec5242a in __GI_abort () at abort.c:89
2 0x00007ff54ffb1dd5 in core_handler (signo=11, siginfo=0x7fff189328f0, context=<optimized out>) at lib/sigevent.c:255
3 <signal handler called>
4 update_ipv6nh_for_route_install (api_nh=0x7fff1893309c, is_evpn=<optimized out>, best_pi=0x55c18854f220,
pi=0x55c18854f220, ifindex=0, nexthop=0x0, nh_bgp=0x55c18850db20, nh_othervrf=<optimized out>) at bgpd/bgp_zebra.c:1099
5 bgp_zebra_announce (dest=dest@entry=0x55c188553020, p=p@entry=0x55c188553020, info=info@entry=0x55c18854f220,
bgp=bgp@entry=0x55c18850db20, afi=afi@entry=AFI_IP6, safi=safi@entry=SAFI_UNICAST) at bgpd/bgp_zebra.c:1381
6 0x000055c1858ffa3a in bgp_process_main_one (bgp=0x55c18850db20, dest=0x55c188553020, afi=AFI_IP6, safi=SAFI_UNICAST)
at bgpd/bgp_route.c:2908
7 0x000055c1858ffbbe in bgp_process_wq (wq=<optimized out>, data=0x55c1885550a0) at bgpd/bgp_route.c:3017
8 0x00007ff54ffca560 in work_queue_run (thread=0x7fff189373e0) at lib/workqueue.c:291
9 0x00007ff54ffc0a91 in thread_call (thread=thread@entry=0x7fff189373e0) at lib/thread.c:1681
10 0x00007ff54ff8b978 in frr_run (master=0x55c187caaed0) at lib/libfrr.c:1110
11 0x000055c1858a165b in main (argc=6, argv=0x7fff18937648) at bgpd/bgp_main.c:523
```
```
5 bgp_zebra_announce (dest=dest@entry=0x55c188553020, p=p@entry=0x55c188553020, info=info@entry=0x55c18854f220,
bgp=bgp@entry=0x55c18850db20, afi=afi@entry=AFI_IP6, safi=safi@entry=SAFI_UNICAST) at bgpd/bgp_zebra.c:1381
ifindex = 0
nexthop = 0x0
nh_weight = 0
```
Reproduce:
```
~# echo "announce route 2a02:4780:1::abdc/128 next-hop 192.168.0.2" > /run/exabgp.in
```
Signed-off-by: Donatas Abraitis <donatas.abraitis@gmail.com>
2020-12-04 16:37:36 +01:00
|
|
|
|
|
|
|
if (!nexthop)
|
|
|
|
nh_updated = update_ipv4nh_for_route_install(
|
|
|
|
nh_othervrf,
|
|
|
|
nh_othervrf ? info->extra->bgp_orig
|
|
|
|
: bgp,
|
|
|
|
&mpinfo_cp->attr->nexthop,
|
|
|
|
mpinfo_cp->attr, is_evpn, api_nh);
|
|
|
|
else
|
|
|
|
nh_updated = update_ipv6nh_for_route_install(
|
|
|
|
nh_othervrf,
|
|
|
|
nh_othervrf ? info->extra->bgp_orig
|
|
|
|
: bgp,
|
|
|
|
nexthop, ifindex, mpinfo, info, is_evpn,
|
|
|
|
api_nh);
|
2017-08-21 02:36:44 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-04-05 19:42:27 +02:00
|
|
|
/* Did we get proper nexthop info to update zebra? */
|
|
|
|
if (!nh_updated)
|
|
|
|
continue;
|
|
|
|
|
2018-02-09 19:22:50 +01:00
|
|
|
if (mpinfo->extra
|
|
|
|
&& bgp_is_valid_label(&mpinfo->extra->label[0])
|
2017-11-07 10:52:23 +01:00
|
|
|
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
|
2017-08-21 02:36:44 +02:00
|
|
|
has_valid_label = 1;
|
2017-11-21 11:42:05 +01:00
|
|
|
label = label_pton(&mpinfo->extra->label[0]);
|
2017-08-21 02:19:25 +02:00
|
|
|
|
2019-11-13 22:06:06 +01:00
|
|
|
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_LABEL);
|
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
api_nh->label_num = 1;
|
|
|
|
api_nh->labels[0] = label;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2017-11-29 08:40:30 +01:00
|
|
|
memcpy(&api_nh->rmac, &(mpinfo->attr->rmac),
|
|
|
|
sizeof(struct ethaddr));
|
2020-03-24 22:38:37 +01:00
|
|
|
api_nh->weight = nh_weight;
|
2020-03-24 20:25:28 +01:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
valid_nh_count++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-05-09 04:44:35 +02:00
|
|
|
is_add = (valid_nh_count || nhg_id) ? true : false;
|
|
|
|
|
2020-12-14 19:28:44 +01:00
|
|
|
if (is_add && CHECK_FLAG(bm->flags, BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA)) {
|
2020-12-05 21:34:59 +01:00
|
|
|
struct aspath *aspath = info->attr->aspath;
|
|
|
|
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_OPAQUE);
|
|
|
|
api.opaque.length = strlen(aspath->str) + 1;
|
|
|
|
memcpy(api.opaque.data, aspath->str, api.opaque.length);
|
|
|
|
}
|
|
|
|
|
2018-05-30 02:04:17 +02:00
|
|
|
/*
|
|
|
|
* When we create an aggregate route we must also
|
|
|
|
* install a Null0 route in the RIB, so overwrite
|
|
|
|
* what was written into api with a blackhole route
|
|
|
|
*/
|
|
|
|
if (info->sub_type == BGP_ROUTE_AGGREGATE)
|
|
|
|
zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
|
|
|
|
else
|
2017-08-21 02:36:44 +02:00
|
|
|
api.nexthop_num = valid_nh_count;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
|
|
|
|
api.metric = metric;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
if (tag) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TAG);
|
|
|
|
api.tag = tag;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
distance = bgp_distance_apply(p, info, afi, safi, bgp);
|
|
|
|
if (distance) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
|
|
|
|
api.distance = distance;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
if (bgp_debug_zebra(p)) {
|
|
|
|
char nh_buf[INET6_ADDRSTRLEN];
|
2019-10-19 20:28:15 +02:00
|
|
|
char eth_buf[ETHER_ADDR_STRLEN + 7] = {'\0'};
|
|
|
|
char buf1[ETHER_ADDR_STRLEN];
|
2017-08-21 02:36:44 +02:00
|
|
|
char label_buf[20];
|
|
|
|
int i;
|
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
zlog_debug(
|
|
|
|
"Tx route %s VRF %u %pFX metric %u tag %" ROUTE_TAG_PRI
|
2020-05-09 04:44:35 +02:00
|
|
|
" count %d nhg %d",
|
2020-10-18 13:33:54 +02:00
|
|
|
valid_nh_count ? "add" : "delete", bgp->vrf_id,
|
2020-05-09 04:44:35 +02:00
|
|
|
&api.prefix, api.metric, api.tag, api.nexthop_num,
|
|
|
|
nhg_id);
|
2017-08-21 02:36:44 +02:00
|
|
|
for (i = 0; i < api.nexthop_num; i++) {
|
|
|
|
api_nh = &api.nexthops[i];
|
|
|
|
|
2019-05-25 02:50:46 +02:00
|
|
|
switch (api_nh->type) {
|
|
|
|
case NEXTHOP_TYPE_IFINDEX:
|
2018-04-05 19:42:27 +02:00
|
|
|
nh_buf[0] = '\0';
|
2019-05-25 02:50:46 +02:00
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV4:
|
|
|
|
case NEXTHOP_TYPE_IPV4_IFINDEX:
|
|
|
|
nh_family = AF_INET;
|
2018-04-05 19:42:27 +02:00
|
|
|
inet_ntop(nh_family, &api_nh->gate, nh_buf,
|
|
|
|
sizeof(nh_buf));
|
2019-05-25 02:50:46 +02:00
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_IPV6:
|
|
|
|
case NEXTHOP_TYPE_IPV6_IFINDEX:
|
|
|
|
nh_family = AF_INET6;
|
|
|
|
inet_ntop(nh_family, &api_nh->gate, nh_buf,
|
|
|
|
sizeof(nh_buf));
|
|
|
|
break;
|
|
|
|
case NEXTHOP_TYPE_BLACKHOLE:
|
|
|
|
strlcpy(nh_buf, "blackhole", sizeof(nh_buf));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Note: add new nexthop case */
|
|
|
|
assert(0);
|
|
|
|
break;
|
2018-04-05 19:42:27 +02:00
|
|
|
}
|
2017-08-21 02:36:44 +02:00
|
|
|
|
|
|
|
label_buf[0] = '\0';
|
2019-10-19 20:28:15 +02:00
|
|
|
eth_buf[0] = '\0';
|
2018-02-09 19:22:50 +01:00
|
|
|
if (has_valid_label
|
|
|
|
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
|
2020-01-07 05:45:53 +01:00
|
|
|
snprintf(label_buf, sizeof(label_buf),
|
|
|
|
"label %u", api_nh->labels[0]);
|
2019-10-19 20:28:15 +02:00
|
|
|
if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)
|
|
|
|
&& !is_zero_mac(&api_nh->rmac))
|
2020-01-07 05:45:53 +01:00
|
|
|
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
|
|
|
|
prefix_mac2str(&api_nh->rmac,
|
|
|
|
buf1, sizeof(buf1)));
|
2020-03-24 20:25:28 +01:00
|
|
|
zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s",
|
2018-04-05 19:42:27 +02:00
|
|
|
i + 1, nh_buf, api_nh->ifindex,
|
2020-03-24 20:25:28 +01:00
|
|
|
api_nh->vrf_id, api_nh->weight,
|
|
|
|
label_buf, eth_buf);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-08-21 02:36:44 +02:00
|
|
|
|
2018-03-24 00:57:03 +01:00
|
|
|
int recursion_flag = 0;
|
|
|
|
|
|
|
|
if (CHECK_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION))
|
|
|
|
recursion_flag = 1;
|
|
|
|
|
|
|
|
zlog_debug("%s: %s: announcing to zebra (recursion %sset)",
|
|
|
|
__func__, buf_prefix,
|
|
|
|
(recursion_flag ? "" : "NOT "));
|
|
|
|
}
|
2020-05-09 04:44:35 +02:00
|
|
|
zclient_route_send(is_add ? ZEBRA_ROUTE_ADD : ZEBRA_ROUTE_DELETE,
|
2017-08-21 02:36:44 +02:00
|
|
|
zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
/* Announce all routes of a table to zebra */
|
|
|
|
void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi)
|
|
|
|
{
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest;
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
struct bgp_table *table;
|
2018-10-03 02:43:07 +02:00
|
|
|
struct bgp_path_info *pi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
|
|
|
|
* know of this instance.
|
|
|
|
*/
|
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
table = bgp->rib[afi][safi];
|
2015-05-20 02:40:42 +02:00
|
|
|
if (!table)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest))
|
|
|
|
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next)
|
2018-10-03 02:43:07 +02:00
|
|
|
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) &&
|
2018-03-09 21:52:55 +01:00
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
(pi->type == ZEBRA_ROUTE_BGP
|
|
|
|
&& (pi->sub_type == BGP_ROUTE_NORMAL
|
|
|
|
|| pi->sub_type == BGP_ROUTE_IMPORTED)))
|
2018-03-09 21:52:55 +01:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_zebra_announce(dest,
|
|
|
|
bgp_dest_get_prefix(dest),
|
2020-03-22 05:02:18 +01:00
|
|
|
pi, bgp, afi, safi);
|
bgpd: bgpd-table-map.patch
COMMAND:
table-map <route-map-name>
DESCRIPTION:
This feature is used to apply a route-map on route updates from BGP to Zebra.
All the applicable match operations are allowed, such as match on prefix,
next-hop, communities, etc. Set operations for this attach-point are limited
to metric and next-hop only. Any operation of this feature does not affect
BGPs internal RIB.
Supported for ipv4 and ipv6 address families. It works on multi-paths as well,
however, metric setting is based on the best-path only.
IMPLEMENTATION NOTES:
The route-map application at this point is not supposed to modify any of BGP
route's attributes (anything in bgp_info for that matter). To achieve that,
creating a copy of the bgp_attr was inevitable. Implementation tries to keep
the memory footprint low, code comments do point out the rationale behind a
few choices made.
bgp_zebra_announce() was already a big routine, adding this feature would
extend it further. Patch has created a few smaller routines/macros whereever
possible to keep the size of the routine in check without compromising on the
readability of the code/flow inside this routine.
For updating a partially filtered route (with its nexthops), BGP to Zebra
replacement semantic of the next-hops serves the purpose well. However, with
this patch there could be some redundant withdraws each time BGP announces a
route thats (all the nexthops) gets denied by the route-map application.
Handling of this case could be optimized by keeping state with the prefix and
the nexthops in BGP. The patch doesn't optimizing that case, as even with the
redundant withdraws the total number of updates to zebra are still be capped
by the total number of routes in the table.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Pradosh Mohapatra <pmohapat@cumulusnetworks.com>
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2020-09-02 18:20:08 +02:00
|
|
|
/* Announce routes of any bgp subtype of a table to zebra */
|
|
|
|
void bgp_zebra_announce_table_all_subtypes(struct bgp *bgp, afi_t afi,
|
|
|
|
safi_t safi)
|
|
|
|
{
|
|
|
|
struct bgp_dest *dest;
|
|
|
|
struct bgp_table *table;
|
|
|
|
struct bgp_path_info *pi;
|
|
|
|
|
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
table = bgp->rib[afi][safi];
|
|
|
|
if (!table)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest))
|
|
|
|
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next)
|
|
|
|
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED) &&
|
|
|
|
pi->type == ZEBRA_ROUTE_BGP)
|
|
|
|
bgp_zebra_announce(dest,
|
|
|
|
bgp_dest_get_prefix(dest),
|
|
|
|
pi, bgp, afi, safi);
|
|
|
|
}
|
|
|
|
|
2020-03-22 02:56:36 +01:00
|
|
|
void bgp_zebra_withdraw(const struct prefix *p, struct bgp_path_info *info,
|
2017-11-01 21:36:46 +01:00
|
|
|
struct bgp *bgp, safi_t safi)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-08-21 02:36:44 +02:00
|
|
|
struct zapi_route api;
|
2018-03-28 14:51:57 +02:00
|
|
|
struct peer *peer;
|
2018-03-09 21:52:55 +01:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
|
|
|
|
* know of this instance.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2017-11-01 21:36:46 +01:00
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
2002-12-13 21:15:29 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-28 14:51:57 +02:00
|
|
|
if (safi == SAFI_FLOWSPEC) {
|
|
|
|
peer = info->peer;
|
2018-11-20 16:30:20 +01:00
|
|
|
bgp_pbr_update_entry(peer->bgp, p, info, AFI_IP, safi, false);
|
|
|
|
return;
|
2018-03-28 14:51:57 +02:00
|
|
|
}
|
2018-01-26 18:36:24 +01:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
2017-11-01 21:36:46 +01:00
|
|
|
api.vrf_id = bgp->vrf_id;
|
2017-08-21 02:36:44 +02:00
|
|
|
api.type = ZEBRA_ROUTE_BGP;
|
|
|
|
api.safi = safi;
|
|
|
|
api.prefix = *p;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-07-09 10:59:14 +02:00
|
|
|
if (info->attr->rmap_table_id) {
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TABLEID);
|
|
|
|
api.tableid = info->attr->rmap_table_id;
|
|
|
|
}
|
|
|
|
|
2018-03-30 02:24:00 +02:00
|
|
|
/* If the route's source is EVPN, flag as such. */
|
|
|
|
if (is_route_parent_evpn(info))
|
2017-11-07 10:52:23 +01:00
|
|
|
SET_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE);
|
2017-10-17 02:31:36 +02:00
|
|
|
|
2020-10-18 13:33:54 +02:00
|
|
|
if (bgp_debug_zebra(p))
|
|
|
|
zlog_debug("Tx route delete VRF %u %pFX", bgp->vrf_id,
|
|
|
|
&api.prefix);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-21 02:36:44 +02:00
|
|
|
zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-01-13 13:57:57 +01:00
|
|
|
|
2020-09-02 18:20:08 +02:00
|
|
|
/* Withdraw all entries in a BGP instances RIB table from Zebra */
|
|
|
|
void bgp_zebra_withdraw_table_all_subtypes(struct bgp *bgp, afi_t afi, safi_t safi)
|
|
|
|
{
|
|
|
|
struct bgp_dest *dest;
|
|
|
|
struct bgp_table *table;
|
|
|
|
struct bgp_path_info *pi;
|
|
|
|
|
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
|
|
|
return;
|
|
|
|
|
|
|
|
table = bgp->rib[afi][safi];
|
|
|
|
if (!table)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
|
|
|
|
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
|
|
|
|
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)
|
|
|
|
&& (pi->type == ZEBRA_ROUTE_BGP))
|
|
|
|
bgp_zebra_withdraw(bgp_dest_get_prefix(dest),
|
|
|
|
pi, bgp, safi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct bgp_redist *bgp_redist_lookup(struct bgp *bgp, afi_t afi, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *red_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct bgp_redist *red;
|
|
|
|
|
|
|
|
red_list = bgp->redist[afi][type];
|
|
|
|
if (!red_list)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node, red))
|
|
|
|
if (red->instance == instance)
|
|
|
|
return red;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
struct bgp_redist *bgp_redist_add(struct bgp *bgp, afi_t afi, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct list *red_list;
|
|
|
|
struct bgp_redist *red;
|
|
|
|
|
|
|
|
red = bgp_redist_lookup(bgp, afi, type, instance);
|
|
|
|
if (red)
|
|
|
|
return red;
|
|
|
|
|
|
|
|
if (!bgp->redist[afi][type])
|
|
|
|
bgp->redist[afi][type] = list_new();
|
|
|
|
|
|
|
|
red_list = bgp->redist[afi][type];
|
2019-02-25 21:30:31 +01:00
|
|
|
red = XCALLOC(MTYPE_BGP_REDIST, sizeof(struct bgp_redist));
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red->instance = instance;
|
|
|
|
|
|
|
|
listnode_add(red_list, red);
|
|
|
|
|
|
|
|
return red;
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:13:34 +02:00
|
|
|
static void bgp_redist_del(struct bgp *bgp, afi_t afi, uint8_t type,
|
|
|
|
unsigned short instance)
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
{
|
|
|
|
struct bgp_redist *red;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red = bgp_redist_lookup(bgp, afi, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (red) {
|
|
|
|
listnode_delete(bgp->redist[afi][type], red);
|
2016-10-22 00:13:51 +02:00
|
|
|
XFREE(MTYPE_BGP_REDIST, red);
|
2017-09-28 03:19:20 +02:00
|
|
|
if (!bgp->redist[afi][type]->count)
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&bgp->redist[afi][type]);
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
}
|
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Other routes redistribution into BGP. */
|
2018-03-27 21:13:34 +02:00
|
|
|
int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
|
2018-08-22 14:00:15 +02:00
|
|
|
unsigned short instance, bool changed)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-08-22 14:00:15 +02:00
|
|
|
/* If redistribute options are changed call
|
|
|
|
* bgp_redistribute_unreg() to reset the option and withdraw
|
|
|
|
* the routes
|
|
|
|
*/
|
|
|
|
if (changed)
|
|
|
|
bgp_redistribute_unreg(bgp, afi, type, instance);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
/* Return if already redistribute flag is set. */
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
if (instance) {
|
|
|
|
if (redist_check_instance(&zclient->mi_redist[afi][type],
|
|
|
|
instance))
|
|
|
|
return CMD_WARNING;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
redist_add_instance(&zclient->mi_redist[afi][type], instance);
|
|
|
|
} else {
|
2016-02-02 13:36:20 +01:00
|
|
|
if (vrf_bitmap_check(zclient->redist[afi][type], bgp->vrf_id))
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
|
2020-04-01 21:05:26 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
2019-03-22 13:37:06 +01:00
|
|
|
if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) {
|
bgpd: add L3/L2VPN Virtual Network Control feature
This feature adds an L3 & L2 VPN application that makes use of the VPN
and Encap SAFIs. This code is currently used to support IETF NVO3 style
operation. In NVO3 terminology it provides the Network Virtualization
Authority (NVA) and the ability to import/export IP prefixes and MAC
addresses from Network Virtualization Edges (NVEs). The code supports
per-NVE tables.
The NVE-NVA protocol used to communicate routing and Ethernet / Layer 2
(L2) forwarding information between NVAs and NVEs is referred to as the
Remote Forwarder Protocol (RFP). OpenFlow is an example RFP. For
general background on NVO3 and RFP concepts see [1]. For information on
Openflow see [2].
RFPs are integrated with BGP via the RF API contained in the new "rfapi"
BGP sub-directory. Currently, only a simple example RFP is included in
Quagga. Developers may use this example as a starting point to integrate
Quagga with an RFP of their choosing, e.g., OpenFlow. The RFAPI code
also supports the ability import/export of routing information between
VNC and customer edge routers (CEs) operating within a virtual
network. Import/export may take place between BGP views or to the
default zebera VRF.
BGP, with IP VPNs and Tunnel Encapsulation, is used to distribute VPN
information between NVAs. BGP based IP VPN support is defined in
RFC4364, BGP/MPLS IP Virtual Private Networks (VPNs), and RFC4659,
BGP-MPLS IP Virtual Private Network (VPN) Extension for IPv6 VPN . Use
of both the Encapsulation Subsequent Address Family Identifier (SAFI)
and the Tunnel Encapsulation Attribute, RFC5512, The BGP Encapsulation
Subsequent Address Family Identifier (SAFI) and the BGP Tunnel
Encapsulation Attribute, are supported. MAC address distribution does
not follow any standard BGB encoding, although it was inspired by the
early IETF EVPN concepts.
The feature is conditionally compiled and disabled by default.
Use the --enable-bgp-vnc configure option to enable.
The majority of this code was authored by G. Paul Ziemba
<paulz@labn.net>.
[1] http://tools.ietf.org/html/draft-ietf-nvo3-nve-nva-cp-req
[2] https://www.opennetworking.org/sdn-resources/technical-library
Now includes changes needed to merge with cmaster-next.
2016-05-07 20:18:56 +02:00
|
|
|
vnc_export_bgp_enable(
|
|
|
|
bgp, afi); /* only enables if mode bits cfg'd */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
vrf_bitmap_set(zclient->redist[afi][type], bgp->vrf_id);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-10-26 04:38:50 +02:00
|
|
|
/*
|
|
|
|
* Don't try to register if we're not connected to Zebra or Zebra
|
|
|
|
* doesn't know of this instance.
|
|
|
|
*
|
|
|
|
* When we come up later well resend if needed.
|
2016-02-12 21:18:28 +01:00
|
|
|
*/
|
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
2017-10-26 04:38:50 +02:00
|
|
|
return CMD_SUCCESS;
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2016-02-12 21:18:28 +01:00
|
|
|
zlog_debug("Tx redistribute add VRF %u afi %d %s %d",
|
2015-05-20 03:03:45 +02:00
|
|
|
bgp->vrf_id, afi, zebra_route_string(type),
|
|
|
|
instance);
|
2015-05-20 02:40:45 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Send distribute add message to zebra. */
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, zclient, afi, type,
|
2016-02-02 13:36:20 +01:00
|
|
|
instance, bgp->vrf_id);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
int bgp_redistribute_resend(struct bgp *bgp, afi_t afi, int type,
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned short instance)
|
2015-05-20 02:40:45 +02:00
|
|
|
{
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Don't try to send if we're not connected to Zebra or Zebra doesn't
|
|
|
|
* know of this instance.
|
|
|
|
*/
|
|
|
|
if (!bgp_install_info_to_zebra(bgp))
|
2015-05-20 02:40:45 +02:00
|
|
|
return -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2016-02-12 21:18:28 +01:00
|
|
|
zlog_debug("Tx redistribute del/add VRF %u afi %d %s %d",
|
2015-05-20 03:03:45 +02:00
|
|
|
bgp->vrf_id, afi, zebra_route_string(type),
|
|
|
|
instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:45 +02:00
|
|
|
/* Send distribute add message to zebra. */
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, zclient, afi, type,
|
2016-04-14 03:41:58 +02:00
|
|
|
instance, bgp->vrf_id);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_ADD, zclient, afi, type,
|
2016-04-14 03:41:58 +02:00
|
|
|
instance, bgp->vrf_id);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:45 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Redistribute with route-map specification. */
|
2020-03-20 10:57:54 +01:00
|
|
|
bool bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name,
|
|
|
|
struct route_map *route_map)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (red->rmap.name && (strcmp(red->rmap.name, name) == 0))
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_ROUTE_MAP_NAME, red->rmap.name);
|
2019-02-04 14:27:56 +01:00
|
|
|
/* Decrement the count for existing routemap and
|
|
|
|
* increment the count for new route map.
|
|
|
|
*/
|
|
|
|
route_map_counter_decrement(red->rmap.map);
|
2015-08-26 16:44:57 +02:00
|
|
|
red->rmap.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, name);
|
2018-09-14 10:56:46 +02:00
|
|
|
red->rmap.map = route_map;
|
2019-02-04 14:27:56 +01:00
|
|
|
route_map_counter_increment(red->rmap.map);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Redistribute with metric specification. */
|
2020-03-20 10:57:54 +01:00
|
|
|
bool bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red,
|
|
|
|
afi_t afi, int type, uint32_t metric)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest;
|
2018-10-03 02:43:07 +02:00
|
|
|
struct bgp_path_info *pi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
if (red->redist_metric_flag && red->redist_metric == metric)
|
2020-03-20 10:57:54 +01:00
|
|
|
return false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red->redist_metric_flag = 1;
|
|
|
|
red->redist_metric = metric;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
for (dest = bgp_table_top(bgp->rib[afi][SAFI_UNICAST]); dest;
|
|
|
|
dest = bgp_route_next(dest)) {
|
|
|
|
for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) {
|
2018-10-03 02:43:07 +02:00
|
|
|
if (pi->sub_type == BGP_ROUTE_REDISTRIBUTE
|
|
|
|
&& pi->type == type
|
|
|
|
&& pi->instance == red->instance) {
|
2016-07-26 19:45:51 +02:00
|
|
|
struct attr *old_attr;
|
|
|
|
struct attr new_attr;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-12-03 22:01:19 +01:00
|
|
|
new_attr = *pi->attr;
|
2016-07-26 19:45:51 +02:00
|
|
|
new_attr.med = red->redist_metric;
|
2018-10-03 02:43:07 +02:00
|
|
|
old_attr = pi->attr;
|
|
|
|
pi->attr = bgp_attr_intern(&new_attr);
|
2016-07-26 19:45:51 +02:00
|
|
|
bgp_attr_unintern(&old_attr);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_path_info_set_flag(dest, pi,
|
2018-10-03 00:15:34 +02:00
|
|
|
BGP_PATH_ATTR_CHANGED);
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_process(bgp, dest, afi, SAFI_UNICAST);
|
2016-07-26 19:45:51 +02:00
|
|
|
}
|
|
|
|
}
|
2015-07-31 14:53:12 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-20 10:57:54 +01:00
|
|
|
return true;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Unset redistribution. */
|
2016-02-02 13:36:20 +01:00
|
|
|
int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
struct bgp_redist *red;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
red = bgp_redist_lookup(bgp, afi, type, instance);
|
|
|
|
if (!red)
|
|
|
|
return CMD_SUCCESS;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Return if zebra connection is disabled. */
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
if (instance) {
|
|
|
|
if (!redist_check_instance(&zclient->mi_redist[afi][type],
|
|
|
|
instance))
|
|
|
|
return CMD_WARNING;
|
|
|
|
redist_del_instance(&zclient->mi_redist[afi][type], instance);
|
|
|
|
} else {
|
2016-02-02 13:36:20 +01:00
|
|
|
if (!vrf_bitmap_check(zclient->redist[afi][type], bgp->vrf_id))
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
return CMD_WARNING;
|
2016-02-02 13:36:20 +01:00
|
|
|
vrf_bitmap_unset(zclient->redist[afi][type], bgp->vrf_id);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
if (bgp_install_info_to_zebra(bgp)) {
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
/* Send distribute delete message to zebra. */
|
2015-05-20 02:58:12 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2016-02-12 21:18:28 +01:00
|
|
|
zlog_debug("Tx redistribute del VRF %u afi %d %s %d",
|
|
|
|
bgp->vrf_id, afi, zebra_route_string(type),
|
|
|
|
instance);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zebra_redistribute_send(ZEBRA_REDISTRIBUTE_DELETE, zclient, afi,
|
|
|
|
type, instance, bgp->vrf_id);
|
[bgpd] Implement 'debug bgp zebra' to log all messages to and from zebra.
2006-11-30 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
* bgp_debug.h: Declare new bgp_debug_zebra conf and term flags,
and define BGP_DEBUG_ZEBRA.
* bgp_debug.c: Declare conf_bgp_debug_zebra and term_bgp_debug_zebra.
(debug_bgp_zebra, no_debug_bgp_zebra, undebug_bgp_zebra) New
functions to enable/disable bgp zebra debugging.
(no_debug_bgp_all) Turn off zebra debugging.
(show_debugging_bgp) Show whether zebra debugging is on.
(bgp_config_write_debug) Add 'debug bgp zebra' if configured.
(bgp_debug_init) Add new zebra debugging commands.
* bgp_zebra.c: (bgp_router_id_update, bgp_interface_add,
bgp_interface_delete, bgp_interface_up, bgp_interface_down,
bgp_interface_address_add, bgp_interface_address_delete,
zebra_read_ipv4, zebra_read_ipv6, bgp_zebra_announce,
bgp_zebra_withdraw, bgp_redistribute_set, bgp_redistribute_unset)
If zebra debugging is enabled, log an appropriate debug message.
2006-11-30 17:36:57 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Withdraw redistributed routes from current BGP's routing table. */
|
Multi-Instance OSPF Summary
——————————————-------------
- etc/init.d/quagga is modified to support creating separate ospf daemon
process for each instance. Each individual instance is monitored by
watchquagga just like any protocol daemons.(requires initd-mi.patch).
- Vtysh is modified to able to connect to multiple daemons of the same
protocol (supported for OSPF only for now).
- ospfd is modified to remember the Instance-ID that its invoked with. For
the entire life of the process it caters to any command request that
matches that instance-ID (unless its a non instance specific command).
Routes/messages to zebra are tagged with instance-ID.
- zebra route/redistribute mechanisms are modified to work with
[protocol type + instance-id]
- bgpd now has ability to have multiple instance specific redistribution
for a protocol (OSPF only supported/tested for now).
- zlog ability to display instance-id besides the protocol/daemon name.
- Changes in other daemons are to because of the needed integration with
some of the modified APIs/routines. (Didn’t prefer replicating too many
separate instance specific APIs.)
- config/show/debug commands are modified to take instance-id argument
as appropriate.
Guidelines to start using multi-instance ospf
---------------------------------------------
The patch is backward compatible, i.e for any previous way of single ospf
deamon(router ospf <cr>) will continue to work as is, including all the
show commands etc.
To enable multiple instances, do the following:
1. service quagga stop
2. Modify /etc/quagga/daemons to add instance-ids of each desired
instance in the following format:
ospfd=“yes"
ospfd_instances="1,2,3"
assuming you want to enable 3 instances with those instance ids.
3. Create corresponding ospfd config files as ospfd-1.conf, ospfd-2.conf
and ospfd-3.conf.
4. service quagga start/restart
5. Verify that the deamons are started as expected. You should see
ospfd started with -n <instance-id> option.
ps –ef | grep quagga
With that /var/run/quagga/ should have ospfd-<instance-id>.pid and
ospfd-<instance-id>/vty to each instance.
6. vtysh to work with instances as you would with any other deamons.
7. Overall most quagga semantics are the same working with the instance
deamon, like it is for any other daemon.
NOTE:
To safeguard against errors leading to too many processes getting invoked,
a hard limit on number of instance-ids is in place, currently its 5.
Allowed instance-id range is <1-65535>
Once daemons are up, show running from vtysh should show the instance-id
of each daemon as 'router ospf <instance-id>’ (without needing explicit
configuration)
Instance-id can not be changed via vtysh, other router ospf configuration
is allowed as before.
Signed-off-by: Vipin Kumar <vipin@cumulusnetworks.com>
Reviewed-by: Daniel Walton <dwalton@cumulusnetworks.com>
Reviewed-by: Dinesh G Dutt <ddutt@cumulusnetworks.com>
2015-05-20 03:03:42 +02:00
|
|
|
bgp_redistribute_withdraw(bgp, afi, type, instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
/* Unset redistribution. */
|
|
|
|
int bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type,
|
2018-03-27 21:13:34 +02:00
|
|
|
unsigned short instance)
|
2016-02-02 13:36:20 +01:00
|
|
|
{
|
|
|
|
struct bgp_redist *red;
|
|
|
|
|
2018-03-09 21:52:55 +01:00
|
|
|
/*
|
|
|
|
* vnc and vpn->vrf checks must be before red check because
|
|
|
|
* they operate within bgpd irrespective of zebra connection
|
|
|
|
* status. red lookup fails if there is no zebra connection.
|
|
|
|
*/
|
2020-04-01 21:05:26 +02:00
|
|
|
#ifdef ENABLE_BGP_VNC
|
2019-03-22 13:37:06 +01:00
|
|
|
if (EVPN_ENABLED(bgp) && type == ZEBRA_ROUTE_VNC_DIRECT) {
|
2018-03-09 21:52:55 +01:00
|
|
|
vnc_export_bgp_disable(bgp, afi);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
red = bgp_redist_lookup(bgp, afi, type, instance);
|
|
|
|
if (!red)
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
|
|
|
|
bgp_redistribute_unreg(bgp, afi, type, instance);
|
|
|
|
|
|
|
|
/* Unset route-map. */
|
2019-02-25 21:18:13 +01:00
|
|
|
XFREE(MTYPE_ROUTE_MAP_NAME, red->rmap.name);
|
2019-02-04 14:27:56 +01:00
|
|
|
route_map_counter_decrement(red->rmap.map);
|
2016-02-02 13:36:20 +01:00
|
|
|
red->rmap.map = NULL;
|
|
|
|
|
|
|
|
/* Unset metric. */
|
|
|
|
red->redist_metric_flag = 0;
|
|
|
|
red->redist_metric = 0;
|
|
|
|
|
|
|
|
bgp_redist_del(bgp, afi, type, instance);
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2019-03-13 19:41:29 +01:00
|
|
|
void bgp_redistribute_redo(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
afi_t afi;
|
|
|
|
int i;
|
|
|
|
struct list *red_list;
|
|
|
|
struct listnode *node;
|
|
|
|
struct bgp_redist *red;
|
|
|
|
|
|
|
|
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
|
|
|
|
for (i = 0; i < ZEBRA_ROUTE_MAX; i++) {
|
|
|
|
|
|
|
|
red_list = bgp->redist[afi][i];
|
|
|
|
if (!red_list)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(red_list, node, red)) {
|
|
|
|
bgp_redistribute_resend(bgp, afi, i,
|
|
|
|
red->instance);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 16:17:47 +01:00
|
|
|
/* Unset redistribute vrf bitmap during triggers like
|
|
|
|
restart networking or delete VRFs */
|
|
|
|
void bgp_unset_redist_vrf_bitmaps(struct bgp *bgp, vrf_id_t old_vrf_id)
|
2016-08-06 01:49:39 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
afi_t afi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-06 01:49:39 +02:00
|
|
|
for (afi = AFI_IP; afi < AFI_MAX; afi++)
|
|
|
|
for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
|
2019-03-13 18:41:40 +01:00
|
|
|
if (vrf_bitmap_check(zclient->redist[afi][i],
|
2019-03-14 16:17:47 +01:00
|
|
|
old_vrf_id))
|
2016-08-06 01:49:39 +02:00
|
|
|
vrf_bitmap_unset(zclient->redist[afi][i],
|
|
|
|
old_vrf_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-06-28 14:44:16 +02:00
|
|
|
void bgp_zclient_reset(void)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
zclient_reset(zclient);
|
|
|
|
}
|
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Register this instance with Zebra. Invoked upon connect (for
|
|
|
|
* default instance) and when other VRFs are learnt (or created and
|
|
|
|
* already learnt).
|
|
|
|
*/
|
|
|
|
void bgp_zebra_instance_register(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
/* Don't try to register if we're not connected to Zebra */
|
2016-03-18 14:18:33 +01:00
|
|
|
if (!zclient || zclient->sock < 0)
|
2016-02-12 21:18:28 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Registering VRF %u", bgp->vrf_id);
|
|
|
|
|
|
|
|
/* Register for router-id, interfaces, redistributed routes. */
|
|
|
|
zclient_send_reg_requests(zclient, bgp->vrf_id);
|
2017-05-15 23:30:19 +02:00
|
|
|
|
2019-03-06 19:09:25 +01:00
|
|
|
/* For EVPN instance, register to learn about VNIs, if appropriate. */
|
|
|
|
if (bgp->advertise_all_vni)
|
2017-05-15 23:30:19 +02:00
|
|
|
bgp_zebra_advertise_all_vni(bgp, 1);
|
2018-10-05 17:31:29 +02:00
|
|
|
|
|
|
|
bgp_nht_register_nexthops(bgp);
|
2016-02-12 21:18:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Deregister this instance with Zebra. Invoked upon the instance
|
|
|
|
* being deleted (default or VRF) and it is already registered.
|
|
|
|
*/
|
|
|
|
void bgp_zebra_instance_deregister(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
/* Don't try to deregister if we're not connected to Zebra */
|
|
|
|
if (zclient->sock < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Deregistering VRF %u", bgp->vrf_id);
|
|
|
|
|
2019-03-06 19:09:25 +01:00
|
|
|
/* For EVPN instance, unregister learning about VNIs, if appropriate. */
|
|
|
|
if (bgp->advertise_all_vni)
|
2017-05-15 23:30:19 +02:00
|
|
|
bgp_zebra_advertise_all_vni(bgp, 0);
|
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Deregister for router-id, interfaces, redistributed routes. */
|
|
|
|
zclient_send_dereg_requests(zclient, bgp->vrf_id);
|
|
|
|
}
|
|
|
|
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer)
|
|
|
|
{
|
2016-05-13 01:51:43 +02:00
|
|
|
int ra_interval = BGP_UNNUM_DEFAULT_RA_INTERVAL;
|
|
|
|
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
/* Don't try to initiate if we're not connected to Zebra */
|
|
|
|
if (zclient->sock < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%u: Initiating RA for peer %s", bgp->vrf_id,
|
|
|
|
peer->host);
|
|
|
|
|
2020-04-21 21:01:35 +02:00
|
|
|
/*
|
|
|
|
* If unnumbered peer (peer->ifp) call thru zapi to start RAs.
|
|
|
|
* If we don't have an ifp pointer, call function to find the
|
|
|
|
* ifps for a numbered enhe peer to turn RAs on.
|
|
|
|
*/
|
|
|
|
peer->ifp ? zclient_send_interface_radv_req(zclient, bgp->vrf_id,
|
|
|
|
peer->ifp, 1, ra_interval)
|
|
|
|
: bgp_nht_reg_enhe_cap_intfs(peer);
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer)
|
|
|
|
{
|
|
|
|
/* Don't try to terminate if we're not connected to Zebra */
|
|
|
|
if (zclient->sock < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%u: Terminating RA for peer %s", bgp->vrf_id,
|
|
|
|
peer->host);
|
|
|
|
|
2020-04-21 21:01:35 +02:00
|
|
|
/*
|
|
|
|
* If unnumbered peer (peer->ifp) call thru zapi to stop RAs.
|
|
|
|
* If we don't have an ifp pointer, call function to find the
|
|
|
|
* ifps for a numbered enhe peer to turn RAs off.
|
|
|
|
*/
|
|
|
|
peer->ifp ? zclient_send_interface_radv_req(zclient, bgp->vrf_id,
|
|
|
|
peer->ifp, 0, 0)
|
|
|
|
: bgp_nht_dereg_enhe_cap_intfs(peer);
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
}
|
|
|
|
|
2017-11-20 06:47:04 +01:00
|
|
|
int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise, vni_t vni)
|
|
|
|
{
|
|
|
|
struct stream *s = NULL;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
2018-10-05 15:43:28 +02:00
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: No zebra instance to talk to, cannot advertise subnet",
|
|
|
|
__func__);
|
2017-11-20 06:47:04 +01:00
|
|
|
return 0;
|
2018-10-05 15:43:28 +02:00
|
|
|
}
|
2017-11-20 06:47:04 +01:00
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s, ZEBRA_ADVERTISE_SUBNET, bgp->vrf_id);
|
|
|
|
stream_putc(s, advertise);
|
|
|
|
stream_put3(s, vni);
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2019-02-04 02:29:59 +01:00
|
|
|
int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise, vni_t vni)
|
|
|
|
{
|
|
|
|
struct stream *s = NULL;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s, ZEBRA_ADVERTISE_SVI_MACIP, bgp->vrf_id);
|
|
|
|
stream_putc(s, advertise);
|
|
|
|
stream_putl(s, vni);
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2017-06-28 10:51:10 +02:00
|
|
|
int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise, vni_t vni)
|
|
|
|
{
|
|
|
|
struct stream *s = NULL;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
2018-10-05 15:43:28 +02:00
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: No zebra instance to talk to, not installing gw_macip",
|
|
|
|
__func__);
|
2017-06-28 10:51:10 +02:00
|
|
|
return 0;
|
2018-10-05 15:43:28 +02:00
|
|
|
}
|
2017-06-28 10:51:10 +02:00
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s, ZEBRA_ADVERTISE_DEFAULT_GW, bgp->vrf_id);
|
|
|
|
stream_putc(s, advertise);
|
2018-03-02 02:18:34 +01:00
|
|
|
stream_putl(s, vni);
|
2017-06-28 10:51:10 +02:00
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2018-10-05 01:20:12 +02:00
|
|
|
int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
|
|
|
|
enum vxlan_flood_control flood_ctrl)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
2018-10-05 15:43:28 +02:00
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: No zebra instance to talk to, not installing all vni",
|
|
|
|
__func__);
|
2018-10-05 01:20:12 +02:00
|
|
|
return 0;
|
2018-10-05 15:43:28 +02:00
|
|
|
}
|
2018-10-05 01:20:12 +02:00
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s, ZEBRA_VXLAN_FLOOD_CONTROL, bgp->vrf_id);
|
|
|
|
stream_putc(s, flood_ctrl);
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2017-05-15 23:30:19 +02:00
|
|
|
int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s, ZEBRA_ADVERTISE_ALL_VNI, bgp->vrf_id);
|
|
|
|
stream_putc(s, advertise);
|
2018-10-05 00:42:57 +02:00
|
|
|
/* Also inform current BUM handling setting. This is really
|
|
|
|
* relevant only when 'advertise' is set.
|
|
|
|
*/
|
2018-10-05 01:20:12 +02:00
|
|
|
stream_putc(s, bgp->vxlan_flood_ctrl);
|
2017-05-15 23:30:19 +02:00
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2018-11-01 16:28:08 +01:00
|
|
|
int bgp_zebra_dup_addr_detection(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
|
|
|
/* Check socket. */
|
|
|
|
if (!zclient || zclient->sock < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bgp))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("dup addr detect %s max_moves %u time %u freeze %s freeze_time %u",
|
|
|
|
bgp->evpn_info->dup_addr_detect ?
|
|
|
|
"enable" : "disable",
|
|
|
|
bgp->evpn_info->dad_max_moves,
|
|
|
|
bgp->evpn_info->dad_time,
|
|
|
|
bgp->evpn_info->dad_freeze ?
|
|
|
|
"enable" : "disable",
|
|
|
|
bgp->evpn_info->dad_freeze_time);
|
|
|
|
|
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
zclient_create_header(s, ZEBRA_DUPLICATE_ADDR_DETECTION,
|
|
|
|
bgp->vrf_id);
|
|
|
|
stream_putl(s, bgp->evpn_info->dup_addr_detect);
|
|
|
|
stream_putl(s, bgp->evpn_info->dad_time);
|
|
|
|
stream_putl(s, bgp->evpn_info->dad_max_moves);
|
|
|
|
stream_putl(s, bgp->evpn_info->dad_freeze);
|
|
|
|
stream_putl(s, bgp->evpn_info->dad_freeze_time);
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
|
|
|
|
|
|
|
return zclient_send_message(zclient);
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int rule_notify_owner(ZAPI_CALLBACK_ARGS)
|
2018-03-08 15:39:19 +01:00
|
|
|
{
|
|
|
|
uint32_t seqno, priority, unique;
|
|
|
|
enum zapi_rule_notify_owner note;
|
|
|
|
struct bgp_pbr_action *bgp_pbra;
|
2018-11-29 14:35:41 +01:00
|
|
|
struct bgp_pbr_rule *bgp_pbr = NULL;
|
2020-09-10 17:31:39 +02:00
|
|
|
char ifname[INTERFACE_NAMSIZ + 1];
|
2018-03-08 15:39:19 +01:00
|
|
|
|
|
|
|
if (!zapi_rule_notify_decode(zclient->ibuf, &seqno, &priority, &unique,
|
2020-09-10 17:31:39 +02:00
|
|
|
ifname, ¬e))
|
2018-03-08 15:39:19 +01:00
|
|
|
return -1;
|
|
|
|
|
2018-03-12 15:11:33 +01:00
|
|
|
bgp_pbra = bgp_pbr_action_rule_lookup(vrf_id, unique);
|
2018-03-08 15:39:19 +01:00
|
|
|
if (!bgp_pbra) {
|
2018-11-29 14:35:41 +01:00
|
|
|
/* look in bgp pbr rule */
|
|
|
|
bgp_pbr = bgp_pbr_rule_lookup(vrf_id, unique);
|
|
|
|
if (!bgp_pbr && note != ZAPI_RULE_REMOVED) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: Fail to look BGP rule (%u)",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, unique);
|
2018-11-29 14:35:41 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (note) {
|
|
|
|
case ZAPI_RULE_FAIL_INSTALL:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received RULE_FAIL_INSTALL", __func__);
|
2018-11-29 14:35:41 +01:00
|
|
|
if (bgp_pbra) {
|
|
|
|
bgp_pbra->installed = false;
|
|
|
|
bgp_pbra->install_in_progress = false;
|
|
|
|
} else {
|
|
|
|
bgp_pbr->installed = false;
|
|
|
|
bgp_pbr->install_in_progress = false;
|
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
|
|
|
case ZAPI_RULE_INSTALLED:
|
2018-11-29 14:35:41 +01:00
|
|
|
if (bgp_pbra) {
|
|
|
|
bgp_pbra->installed = true;
|
|
|
|
bgp_pbra->install_in_progress = false;
|
|
|
|
} else {
|
2018-11-30 14:56:40 +01:00
|
|
|
struct bgp_path_info *path;
|
|
|
|
struct bgp_path_info_extra *extra;
|
|
|
|
|
2018-11-29 14:35:41 +01:00
|
|
|
bgp_pbr->installed = true;
|
|
|
|
bgp_pbr->install_in_progress = false;
|
|
|
|
bgp_pbr->action->refcnt++;
|
2018-11-30 14:56:40 +01:00
|
|
|
/* link bgp_info to bgp_pbr */
|
|
|
|
path = (struct bgp_path_info *)bgp_pbr->path;
|
|
|
|
extra = bgp_path_info_extra_get(path);
|
2019-03-28 18:07:34 +01:00
|
|
|
listnode_add_force(&extra->bgp_fs_iprule,
|
|
|
|
bgp_pbr);
|
2018-11-29 14:35:41 +01:00
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received RULE_INSTALLED", __func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
2018-05-18 18:15:47 +02:00
|
|
|
case ZAPI_RULE_FAIL_REMOVE:
|
2018-03-08 15:39:19 +01:00
|
|
|
case ZAPI_RULE_REMOVED:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received RULE REMOVED", __func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ipset_notify_owner(ZAPI_CALLBACK_ARGS)
|
2018-03-08 15:39:19 +01:00
|
|
|
{
|
|
|
|
uint32_t unique;
|
|
|
|
enum zapi_ipset_notify_owner note;
|
|
|
|
struct bgp_pbr_match *bgp_pbim;
|
|
|
|
|
|
|
|
if (!zapi_ipset_notify_decode(zclient->ibuf,
|
|
|
|
&unique,
|
|
|
|
¬e))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
bgp_pbim = bgp_pbr_match_ipset_lookup(vrf_id, unique);
|
|
|
|
if (!bgp_pbim) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2018-07-02 16:53:19 +02:00
|
|
|
zlog_debug("%s: Fail to look BGP match ( %u, ID %u)",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, note, unique);
|
2018-03-08 15:39:19 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (note) {
|
|
|
|
case ZAPI_IPSET_FAIL_INSTALL:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received IPSET_FAIL_INSTALL", __func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
bgp_pbim->installed = false;
|
|
|
|
bgp_pbim->install_in_progress = false;
|
|
|
|
break;
|
|
|
|
case ZAPI_IPSET_INSTALLED:
|
|
|
|
bgp_pbim->installed = true;
|
|
|
|
bgp_pbim->install_in_progress = false;
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received IPSET_INSTALLED", __func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
2018-05-18 18:15:47 +02:00
|
|
|
case ZAPI_IPSET_FAIL_REMOVE:
|
2018-03-08 15:39:19 +01:00
|
|
|
case ZAPI_IPSET_REMOVED:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received IPSET REMOVED", __func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int ipset_entry_notify_owner(ZAPI_CALLBACK_ARGS)
|
2018-03-08 15:39:19 +01:00
|
|
|
{
|
|
|
|
uint32_t unique;
|
|
|
|
char ipset_name[ZEBRA_IPSET_NAME_SIZE];
|
|
|
|
enum zapi_ipset_entry_notify_owner note;
|
|
|
|
struct bgp_pbr_match_entry *bgp_pbime;
|
|
|
|
|
|
|
|
if (!zapi_ipset_entry_notify_decode(
|
|
|
|
zclient->ibuf,
|
|
|
|
&unique,
|
|
|
|
ipset_name,
|
|
|
|
¬e))
|
|
|
|
return -1;
|
|
|
|
bgp_pbime = bgp_pbr_match_ipset_entry_lookup(vrf_id,
|
|
|
|
ipset_name,
|
|
|
|
unique);
|
|
|
|
if (!bgp_pbime) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: Fail to look BGP match entry (%u, ID %u)",
|
|
|
|
__func__, note, unique);
|
2018-03-08 15:39:19 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (note) {
|
|
|
|
case ZAPI_IPSET_ENTRY_FAIL_INSTALL:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: Received IPSET_ENTRY_FAIL_INSTALL",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
bgp_pbime->installed = false;
|
|
|
|
bgp_pbime->install_in_progress = false;
|
|
|
|
break;
|
|
|
|
case ZAPI_IPSET_ENTRY_INSTALLED:
|
2018-04-20 11:41:54 +02:00
|
|
|
{
|
2018-10-03 00:34:03 +02:00
|
|
|
struct bgp_path_info *path;
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info_extra *extra;
|
|
|
|
|
|
|
|
bgp_pbime->installed = true;
|
|
|
|
bgp_pbime->install_in_progress = false;
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: Received IPSET_ENTRY_INSTALLED",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__);
|
2018-10-03 00:34:03 +02:00
|
|
|
/* link bgp_path_info to bpme */
|
|
|
|
path = (struct bgp_path_info *)bgp_pbime->path;
|
|
|
|
extra = bgp_path_info_extra_get(path);
|
2019-03-28 18:07:34 +01:00
|
|
|
listnode_add_force(&extra->bgp_fs_pbr, bgp_pbime);
|
2018-04-20 11:41:54 +02:00
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
2018-05-18 18:15:47 +02:00
|
|
|
case ZAPI_IPSET_ENTRY_FAIL_REMOVE:
|
2018-03-08 15:39:19 +01:00
|
|
|
case ZAPI_IPSET_ENTRY_REMOVED:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: Received IPSET_ENTRY_REMOVED",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__);
|
2018-03-08 15:39:19 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int iptable_notify_owner(ZAPI_CALLBACK_ARGS)
|
2018-03-12 09:38:53 +01:00
|
|
|
{
|
|
|
|
uint32_t unique;
|
|
|
|
enum zapi_iptable_notify_owner note;
|
|
|
|
struct bgp_pbr_match *bgpm;
|
|
|
|
|
|
|
|
if (!zapi_iptable_notify_decode(
|
|
|
|
zclient->ibuf,
|
|
|
|
&unique,
|
|
|
|
¬e))
|
|
|
|
return -1;
|
|
|
|
bgpm = bgp_pbr_match_iptable_lookup(vrf_id, unique);
|
|
|
|
if (!bgpm) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2018-04-30 10:35:10 +02:00
|
|
|
zlog_debug("%s: Fail to look BGP iptable (%u %u)",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, note, unique);
|
2018-03-12 09:38:53 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
switch (note) {
|
|
|
|
case ZAPI_IPTABLE_FAIL_INSTALL:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: Received IPTABLE_FAIL_INSTALL",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__);
|
2018-03-12 09:38:53 +01:00
|
|
|
bgpm->installed_in_iptable = false;
|
|
|
|
bgpm->install_iptable_in_progress = false;
|
|
|
|
break;
|
|
|
|
case ZAPI_IPTABLE_INSTALLED:
|
|
|
|
bgpm->installed_in_iptable = true;
|
|
|
|
bgpm->install_iptable_in_progress = false;
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received IPTABLE_INSTALLED", __func__);
|
2018-04-24 16:35:00 +02:00
|
|
|
bgpm->action->refcnt++;
|
2018-03-12 09:38:53 +01:00
|
|
|
break;
|
2018-05-18 18:15:47 +02:00
|
|
|
case ZAPI_IPTABLE_FAIL_REMOVE:
|
2018-03-12 09:38:53 +01:00
|
|
|
case ZAPI_IPTABLE_REMOVED:
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: Received IPTABLE REMOVED", __func__);
|
2018-03-12 09:38:53 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-06 04:25:56 +01:00
|
|
|
/* Process route notification messages from RIB */
|
|
|
|
static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient,
|
|
|
|
zebra_size_t length, vrf_id_t vrf_id)
|
|
|
|
{
|
|
|
|
struct prefix p;
|
|
|
|
enum zapi_route_notify_owner note;
|
|
|
|
uint32_t table_id;
|
|
|
|
afi_t afi;
|
|
|
|
safi_t safi;
|
|
|
|
struct bgp_dest *dest;
|
|
|
|
struct bgp *bgp;
|
|
|
|
struct bgp_path_info *pi, *new_select;
|
|
|
|
|
|
|
|
if (!zapi_route_notify_decode(zclient->ibuf, &p, &table_id, ¬e,
|
|
|
|
&afi, &safi)) {
|
|
|
|
zlog_err("%s : error in msg decode", __PRETTY_FUNCTION__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the bgp instance */
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp) {
|
|
|
|
flog_err(EC_BGP_INVALID_BGP_INSTANCE,
|
|
|
|
"%s : bgp instance not found vrf %d",
|
|
|
|
__PRETTY_FUNCTION__, vrf_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the bgp route node */
|
|
|
|
dest = bgp_afi_node_lookup(bgp->rib[afi][safi], afi, safi, &p,
|
|
|
|
&bgp->vrf_prd);
|
|
|
|
if (!dest)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
bgp_dest_unlock_node(dest);
|
|
|
|
|
|
|
|
switch (note) {
|
|
|
|
case ZAPI_ROUTE_INSTALLED:
|
|
|
|
new_select = NULL;
|
|
|
|
/* Clear the flags so that route can be processed */
|
|
|
|
if (CHECK_FLAG(dest->flags,
|
|
|
|
BGP_NODE_FIB_INSTALL_PENDING)) {
|
|
|
|
UNSET_FLAG(dest->flags,
|
|
|
|
BGP_NODE_FIB_INSTALL_PENDING);
|
|
|
|
SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED);
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2021-01-05 21:22:57 +01:00
|
|
|
zlog_debug("route %pRN : INSTALLED", dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
/* Find the best route */
|
|
|
|
for (pi = dest->info; pi; pi = pi->next) {
|
|
|
|
/* Process aggregate route */
|
|
|
|
bgp_aggregate_increment(bgp, &p, pi,
|
|
|
|
afi, safi);
|
|
|
|
if (CHECK_FLAG(pi->flags,
|
|
|
|
BGP_PATH_SELECTED))
|
|
|
|
new_select = pi;
|
|
|
|
}
|
|
|
|
/* Advertise the route */
|
|
|
|
if (new_select)
|
|
|
|
group_announce_route(bgp, afi, safi,
|
|
|
|
dest, new_select);
|
|
|
|
else {
|
|
|
|
flog_err(EC_BGP_INVALID_ROUTE,
|
2021-01-05 21:22:57 +01:00
|
|
|
"selected route %pRN not found",
|
|
|
|
dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ZAPI_ROUTE_REMOVED:
|
|
|
|
/* Route deleted from dataplane, reset the installed flag
|
|
|
|
* so that route can be reinstalled when client sends
|
|
|
|
* route add later
|
|
|
|
*/
|
|
|
|
UNSET_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED);
|
2021-01-05 21:22:57 +01:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("route %pRN: Removed from Fib", dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
break;
|
|
|
|
case ZAPI_ROUTE_FAIL_INSTALL:
|
2021-01-05 21:22:57 +01:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("route: %pRN Failed to Install into Fib",
|
|
|
|
dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
/* Error will be logged by zebra module */
|
|
|
|
break;
|
|
|
|
case ZAPI_ROUTE_BETTER_ADMIN_WON:
|
2021-01-05 21:22:57 +01:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("route: %pRN removed due to better admin won",
|
|
|
|
dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
/* No action required */
|
|
|
|
break;
|
|
|
|
case ZAPI_ROUTE_REMOVE_FAIL:
|
2021-01-05 21:22:57 +01:00
|
|
|
zlog_warn("%s: Route %pRN failure to remove",
|
|
|
|
__func__, dest);
|
2020-11-06 04:25:56 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-29 15:14:41 +01:00
|
|
|
/* this function is used to forge ip rule,
|
|
|
|
* - either for iptable/ipset using fwmark id
|
2019-05-03 21:42:59 +02:00
|
|
|
* - or for sample ip rule cmd
|
2018-11-29 15:14:41 +01:00
|
|
|
*/
|
2018-03-08 15:39:19 +01:00
|
|
|
static void bgp_encode_pbr_rule_action(struct stream *s,
|
2018-11-29 15:14:41 +01:00
|
|
|
struct bgp_pbr_action *pbra,
|
|
|
|
struct bgp_pbr_rule *pbr)
|
2018-03-08 15:39:19 +01:00
|
|
|
{
|
2018-11-29 15:14:41 +01:00
|
|
|
struct prefix pfx;
|
2019-10-16 10:05:36 +02:00
|
|
|
uint8_t fam = AF_INET;
|
2020-11-04 15:11:53 +01:00
|
|
|
char ifname[INTERFACE_NAMSIZ];
|
2018-03-08 15:39:19 +01:00
|
|
|
|
2020-09-24 14:16:57 +02:00
|
|
|
if (pbra->nh.type == NEXTHOP_TYPE_IPV6)
|
2019-10-16 10:05:36 +02:00
|
|
|
fam = AF_INET6;
|
2018-03-08 15:39:19 +01:00
|
|
|
stream_putl(s, 0); /* seqno unused */
|
2018-11-30 14:13:37 +01:00
|
|
|
if (pbr)
|
|
|
|
stream_putl(s, pbr->priority);
|
|
|
|
else
|
|
|
|
stream_putl(s, 0);
|
|
|
|
/* ruleno unused - priority change
|
|
|
|
* ruleno permits distinguishing various FS PBR entries
|
|
|
|
* - FS PBR entries based on ipset/iptables
|
|
|
|
* - FS PBR entries based on iprule
|
|
|
|
* the latter may contain default routing information injected by FS
|
|
|
|
*/
|
2018-11-29 15:14:41 +01:00
|
|
|
if (pbr)
|
|
|
|
stream_putl(s, pbr->unique);
|
|
|
|
else
|
|
|
|
stream_putl(s, pbra->unique);
|
|
|
|
if (pbr && pbr->flags & MATCH_IP_SRC_SET)
|
|
|
|
memcpy(&pfx, &(pbr->src), sizeof(struct prefix));
|
|
|
|
else {
|
|
|
|
memset(&pfx, 0, sizeof(pfx));
|
2019-10-16 10:05:36 +02:00
|
|
|
pfx.family = fam;
|
2018-11-29 15:14:41 +01:00
|
|
|
}
|
|
|
|
stream_putc(s, pfx.family);
|
|
|
|
stream_putc(s, pfx.prefixlen);
|
|
|
|
stream_put(s, &pfx.u.prefix, prefix_blen(&pfx));
|
2018-03-08 15:39:19 +01:00
|
|
|
|
|
|
|
stream_putw(s, 0); /* src port */
|
|
|
|
|
2018-11-29 15:14:41 +01:00
|
|
|
if (pbr && pbr->flags & MATCH_IP_DST_SET)
|
|
|
|
memcpy(&pfx, &(pbr->dst), sizeof(struct prefix));
|
|
|
|
else {
|
|
|
|
memset(&pfx, 0, sizeof(pfx));
|
2019-10-16 10:05:36 +02:00
|
|
|
pfx.family = fam;
|
2018-11-29 15:14:41 +01:00
|
|
|
}
|
|
|
|
stream_putc(s, pfx.family);
|
|
|
|
stream_putc(s, pfx.prefixlen);
|
|
|
|
stream_put(s, &pfx.u.prefix, prefix_blen(&pfx));
|
2018-03-08 15:39:19 +01:00
|
|
|
|
|
|
|
stream_putw(s, 0); /* dst port */
|
2020-11-04 15:11:53 +01:00
|
|
|
stream_putc(s, 0); /* dsfield */
|
2018-11-29 15:14:41 +01:00
|
|
|
/* if pbr present, fwmark is not used */
|
|
|
|
if (pbr)
|
|
|
|
stream_putl(s, 0);
|
|
|
|
else
|
|
|
|
stream_putl(s, pbra->fwmark); /* fwmark */
|
2018-03-08 15:39:19 +01:00
|
|
|
|
|
|
|
stream_putl(s, pbra->table_id);
|
|
|
|
|
2020-11-04 15:11:53 +01:00
|
|
|
memset(ifname, 0, sizeof(ifname));
|
|
|
|
stream_put(s, ifname, INTERFACE_NAMSIZ); /* ifname unused */
|
2018-03-08 15:39:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bgp_encode_pbr_ipset_match(struct stream *s,
|
|
|
|
struct bgp_pbr_match *pbim)
|
|
|
|
{
|
|
|
|
stream_putl(s, pbim->unique);
|
|
|
|
stream_putl(s, pbim->type);
|
2019-10-16 11:07:41 +02:00
|
|
|
stream_putc(s, pbim->family);
|
2018-03-08 15:39:19 +01:00
|
|
|
stream_put(s, pbim->ipset_name,
|
|
|
|
ZEBRA_IPSET_NAME_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bgp_encode_pbr_ipset_entry_match(struct stream *s,
|
|
|
|
struct bgp_pbr_match_entry *pbime)
|
|
|
|
{
|
|
|
|
stream_putl(s, pbime->unique);
|
|
|
|
/* check that back pointer is not null */
|
|
|
|
stream_put(s, pbime->backpointer->ipset_name,
|
|
|
|
ZEBRA_IPSET_NAME_SIZE);
|
|
|
|
|
|
|
|
stream_putc(s, pbime->src.family);
|
|
|
|
stream_putc(s, pbime->src.prefixlen);
|
|
|
|
stream_put(s, &pbime->src.u.prefix, prefix_blen(&pbime->src));
|
|
|
|
|
|
|
|
stream_putc(s, pbime->dst.family);
|
|
|
|
stream_putc(s, pbime->dst.prefixlen);
|
|
|
|
stream_put(s, &pbime->dst.u.prefix, prefix_blen(&pbime->dst));
|
2018-03-30 13:01:39 +02:00
|
|
|
|
|
|
|
stream_putw(s, pbime->src_port_min);
|
|
|
|
stream_putw(s, pbime->src_port_max);
|
|
|
|
stream_putw(s, pbime->dst_port_min);
|
|
|
|
stream_putw(s, pbime->dst_port_max);
|
|
|
|
stream_putc(s, pbime->proto);
|
2018-03-08 15:39:19 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 09:38:53 +01:00
|
|
|
static void bgp_encode_pbr_iptable_match(struct stream *s,
|
|
|
|
struct bgp_pbr_action *bpa,
|
|
|
|
struct bgp_pbr_match *pbm)
|
|
|
|
{
|
|
|
|
stream_putl(s, pbm->unique2);
|
|
|
|
|
|
|
|
stream_putl(s, pbm->type);
|
|
|
|
|
|
|
|
stream_putl(s, pbm->flags);
|
|
|
|
|
|
|
|
/* TODO: correlate with what is contained
|
|
|
|
* into bgp_pbr_action.
|
|
|
|
* currently only forward supported
|
|
|
|
*/
|
|
|
|
if (bpa->nh.type == NEXTHOP_TYPE_BLACKHOLE)
|
|
|
|
stream_putl(s, ZEBRA_IPTABLES_DROP);
|
|
|
|
else
|
|
|
|
stream_putl(s, ZEBRA_IPTABLES_FORWARD);
|
|
|
|
stream_putl(s, bpa->fwmark);
|
|
|
|
stream_put(s, pbm->ipset_name,
|
|
|
|
ZEBRA_IPSET_NAME_SIZE);
|
2019-10-16 11:07:41 +02:00
|
|
|
stream_putc(s, pbm->family);
|
2018-06-11 15:35:37 +02:00
|
|
|
stream_putw(s, pbm->pkt_len_min);
|
|
|
|
stream_putw(s, pbm->pkt_len_max);
|
2018-06-12 18:31:52 +02:00
|
|
|
stream_putw(s, pbm->tcp_flags);
|
|
|
|
stream_putw(s, pbm->tcp_mask_flags);
|
2018-06-13 11:59:07 +02:00
|
|
|
stream_putc(s, pbm->dscp_value);
|
2018-06-20 13:55:20 +02:00
|
|
|
stream_putc(s, pbm->fragment);
|
2019-06-06 17:31:46 +02:00
|
|
|
stream_putc(s, pbm->protocol);
|
2019-10-16 11:07:41 +02:00
|
|
|
stream_putw(s, pbm->flow_label);
|
2018-03-12 09:38:53 +01:00
|
|
|
}
|
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* BGP has established connection with Zebra. */
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
static void bgp_zebra_connected(struct zclient *zclient)
|
|
|
|
{
|
2016-02-02 13:36:20 +01:00
|
|
|
struct bgp *bgp;
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
|
2016-10-07 15:44:42 +02:00
|
|
|
zclient_num_connects++; /* increment even if not responding */
|
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* At this point, we may or may not have BGP instances configured, but
|
|
|
|
* we're only interested in the default VRF (others wouldn't have learnt
|
|
|
|
* the VRF from Zebra yet.)
|
|
|
|
*/
|
|
|
|
bgp = bgp_get_default();
|
|
|
|
if (!bgp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bgp_zebra_instance_register(bgp);
|
|
|
|
|
2016-06-21 12:39:58 +02:00
|
|
|
/* Send the client registration */
|
2019-03-26 14:29:13 +01:00
|
|
|
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, bgp->vrf_id);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
/* tell label pool that zebra is connected */
|
2018-04-07 20:32:52 +02:00
|
|
|
bgp_lp_event_zebra_up();
|
2018-04-07 20:13:07 +02:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* TODO - What if we have peers and networks configured, do we have to
|
|
|
|
* kick-start them?
|
|
|
|
*/
|
2019-10-25 21:23:52 +02:00
|
|
|
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(bgp, bgp->peer);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
}
|
|
|
|
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
static int bgp_zebra_process_local_es_add(ZAPI_CALLBACK_ARGS)
|
|
|
|
{
|
|
|
|
esi_t esi;
|
|
|
|
struct bgp *bgp = NULL;
|
|
|
|
struct stream *s = NULL;
|
|
|
|
char buf[ESI_STR_LEN];
|
|
|
|
struct in_addr originator_ip;
|
|
|
|
uint8_t active;
|
2020-08-11 02:46:09 +02:00
|
|
|
uint8_t bypass;
|
2020-05-09 01:35:09 +02:00
|
|
|
uint16_t df_pref;
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = zclient->ibuf;
|
|
|
|
stream_get(&esi, s, sizeof(esi_t));
|
|
|
|
originator_ip.s_addr = stream_get_ipv4(s);
|
|
|
|
active = stream_getc(s);
|
2020-05-09 01:35:09 +02:00
|
|
|
df_pref = stream_getw(s);
|
2020-08-11 02:46:09 +02:00
|
|
|
bypass = stream_getc(s);
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-05-09 01:35:09 +02:00
|
|
|
zlog_debug(
|
2020-08-11 02:46:09 +02:00
|
|
|
"Rx add ESI %s originator-ip %pI4 active %u df_pref %u %s",
|
|
|
|
esi_to_str(&esi, buf, sizeof(buf)), &originator_ip,
|
|
|
|
active, df_pref, bypass ? "bypass" : "");
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
|
2020-08-11 02:46:09 +02:00
|
|
|
bgp_evpn_local_es_add(bgp, &esi, originator_ip, active, df_pref,
|
|
|
|
!!bypass);
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bgp_zebra_process_local_es_del(ZAPI_CALLBACK_ARGS)
|
2018-04-14 00:01:12 +02:00
|
|
|
{
|
|
|
|
esi_t esi;
|
|
|
|
struct bgp *bgp = NULL;
|
|
|
|
struct stream *s = NULL;
|
|
|
|
char buf[ESI_STR_LEN];
|
|
|
|
|
|
|
|
memset(&esi, 0, sizeof(esi_t));
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = zclient->ibuf;
|
|
|
|
stream_get(&esi, s, sizeof(esi_t));
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Rx del ESI %s",
|
|
|
|
esi_to_str(&esi, buf, sizeof(buf)));
|
|
|
|
|
|
|
|
bgp_evpn_local_es_del(bgp, &esi);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bgp_zebra_process_local_es_evi(ZAPI_CALLBACK_ARGS)
|
|
|
|
{
|
|
|
|
esi_t esi;
|
|
|
|
vni_t vni;
|
|
|
|
struct bgp *bgp;
|
|
|
|
struct stream *s;
|
|
|
|
char buf[ESI_STR_LEN];
|
2018-04-14 00:01:12 +02:00
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
s = zclient->ibuf;
|
|
|
|
stream_get(&esi, s, sizeof(esi_t));
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
vni = stream_getl(s);
|
2018-04-14 00:01:12 +02:00
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
zlog_debug("Rx %s ESI %s VNI %u",
|
|
|
|
ZEBRA_VNI_ADD ? "add" : "del",
|
|
|
|
esi_to_str(&esi, buf, sizeof(buf)), vni);
|
2018-04-14 00:01:12 +02:00
|
|
|
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
if (cmd == ZEBRA_LOCAL_ES_EVI_ADD)
|
|
|
|
bgp_evpn_local_es_evi_add(bgp, &esi, vni);
|
2018-04-14 00:01:12 +02:00
|
|
|
else
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
bgp_evpn_local_es_evi_del(bgp, &esi, vni);
|
|
|
|
|
2018-04-14 00:01:12 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_zebra_process_local_l3vni(ZAPI_CALLBACK_ARGS)
|
2017-10-09 02:46:08 +02:00
|
|
|
{
|
2018-02-06 23:28:22 +01:00
|
|
|
int filter = 0;
|
2017-10-31 00:58:15 +01:00
|
|
|
vni_t l3vni = 0;
|
2019-08-05 01:51:33 +02:00
|
|
|
struct ethaddr svi_rmac, vrr_rmac = {.octet = {0} };
|
2017-10-31 00:58:15 +01:00
|
|
|
struct in_addr originator_ip;
|
2017-10-09 02:46:08 +02:00
|
|
|
struct stream *s;
|
2019-02-27 12:52:34 +01:00
|
|
|
ifindex_t svi_ifindex;
|
2019-08-05 01:51:33 +02:00
|
|
|
bool is_anycast_mac = false;
|
2017-10-09 02:46:08 +02:00
|
|
|
|
2019-08-05 01:51:33 +02:00
|
|
|
memset(&svi_rmac, 0, sizeof(struct ethaddr));
|
2017-10-31 00:58:15 +01:00
|
|
|
memset(&originator_ip, 0, sizeof(struct in_addr));
|
2017-10-09 02:46:08 +02:00
|
|
|
s = zclient->ibuf;
|
|
|
|
l3vni = stream_getl(s);
|
2017-10-31 00:58:15 +01:00
|
|
|
if (cmd == ZEBRA_L3VNI_ADD) {
|
2019-08-05 01:51:33 +02:00
|
|
|
stream_get(&svi_rmac, s, sizeof(struct ethaddr));
|
2017-10-31 00:58:15 +01:00
|
|
|
originator_ip.s_addr = stream_get_ipv4(s);
|
2018-02-06 23:28:22 +01:00
|
|
|
stream_get(&filter, s, sizeof(int));
|
2019-02-27 12:52:34 +01:00
|
|
|
svi_ifindex = stream_getl(s);
|
2019-08-05 01:51:33 +02:00
|
|
|
stream_get(&vrr_rmac, s, sizeof(struct ethaddr));
|
|
|
|
is_anycast_mac = stream_getl(s);
|
2017-10-09 02:46:08 +02:00
|
|
|
|
2019-02-27 12:52:34 +01:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2021-03-10 01:50:42 +01:00
|
|
|
zlog_debug(
|
|
|
|
"Rx L3-VNI ADD VRF %s VNI %u RMAC svi-mac %pEA vrr-mac %pEA filter %s svi-if %u",
|
|
|
|
vrf_id_to_name(vrf_id), l3vni, &svi_rmac,
|
|
|
|
&vrr_rmac,
|
|
|
|
filter ? "prefix-routes-only" : "none",
|
|
|
|
svi_ifindex);
|
2017-10-09 02:46:08 +02:00
|
|
|
|
2019-08-05 01:51:33 +02:00
|
|
|
bgp_evpn_local_l3vni_add(l3vni, vrf_id, &svi_rmac, &vrr_rmac,
|
|
|
|
originator_ip, filter, svi_ifindex,
|
|
|
|
is_anycast_mac);
|
2019-02-27 12:52:34 +01:00
|
|
|
} else {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Rx L3-VNI DEL VRF %s VNI %u",
|
|
|
|
vrf_id_to_name(vrf_id), l3vni);
|
|
|
|
|
2017-10-09 02:46:08 +02:00
|
|
|
bgp_evpn_local_l3vni_del(l3vni, vrf_id);
|
2019-02-27 12:52:34 +01:00
|
|
|
}
|
2017-10-09 02:46:08 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_zebra_process_local_vni(ZAPI_CALLBACK_ARGS)
|
2017-05-15 23:34:04 +02:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
vni_t vni;
|
|
|
|
struct bgp *bgp;
|
2018-02-09 19:22:50 +01:00
|
|
|
struct in_addr vtep_ip = {INADDR_ANY};
|
2017-10-08 05:26:16 +02:00
|
|
|
vrf_id_t tenant_vrf_id = VRF_DEFAULT;
|
2019-03-19 19:08:24 +01:00
|
|
|
struct in_addr mcast_grp = {INADDR_ANY};
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
s = zclient->ibuf;
|
|
|
|
vni = stream_getl(s);
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_VNI_ADD) {
|
2017-05-15 23:34:04 +02:00
|
|
|
vtep_ip.s_addr = stream_get_ipv4(s);
|
2017-10-08 05:26:16 +02:00
|
|
|
stream_get(&tenant_vrf_id, s, sizeof(vrf_id_t));
|
2019-03-19 19:08:24 +01:00
|
|
|
mcast_grp.s_addr = stream_get_ipv4(s);
|
2017-10-08 05:26:16 +02:00
|
|
|
}
|
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2017-10-08 05:26:16 +02:00
|
|
|
zlog_debug("Rx VNI %s VRF %s VNI %u tenant-vrf %s",
|
2019-05-03 21:42:59 +02:00
|
|
|
(cmd == ZEBRA_VNI_ADD) ? "add" : "del",
|
2018-02-09 19:22:50 +01:00
|
|
|
vrf_id_to_name(vrf_id), vni,
|
|
|
|
vrf_id_to_name(tenant_vrf_id));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_VNI_ADD)
|
2017-05-15 23:34:04 +02:00
|
|
|
return bgp_evpn_local_vni_add(
|
2020-02-06 07:49:02 +01:00
|
|
|
bgp, vni,
|
|
|
|
vtep_ip.s_addr != INADDR_ANY ? vtep_ip : bgp->router_id,
|
2019-03-19 19:08:24 +01:00
|
|
|
tenant_vrf_id, mcast_grp);
|
2017-05-15 23:34:04 +02:00
|
|
|
else
|
|
|
|
return bgp_evpn_local_vni_del(bgp, vni);
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static int bgp_zebra_process_local_macip(ZAPI_CALLBACK_ARGS)
|
2017-05-15 23:34:04 +02:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
vni_t vni;
|
|
|
|
struct bgp *bgp;
|
|
|
|
struct ethaddr mac;
|
|
|
|
struct ipaddr ip;
|
|
|
|
int ipa_len;
|
bgpd, zebra: EVPN extended mobility support
Implement procedures similar to what is specified in
https://tools.ietf.org/html/draft-malhotra-bess-evpn-irb-extended-mobility
in order to support extended mobility scenarios in EVPN. These are scenarios
where a host/VM move results in a different (MAC,IP) binding from earlier.
For example, a host with an address assignment (IP1, MAC1) moves behind a
different PE (VTEP) and has an address assignment of (IP1, MAC2) or a host
with an address assignment (IP5, MAC5) has a different assignment of (IP6,
MAC5) after the move. Note that while these are described as "move" scenarios,
they also cover the situation when a VM is shut down and a new VM is spun up
at a different location that reuses the IP address or MAC address of the
earlier instance, but not both. Yet another scenario is a MAC change for an
attached host/VM i.e., when the MAC of an attached host changes from MAC1 to
MAC2. This is necessary because there may already be a non-zero sequence
number associated with MAC2. Also, even though (IP, MAC1) is withdrawn before
(IP, MAC2) is advertised, they may propagate through the network differently.
The procedures continue to rely on the MAC mobility extended community
specified in RFC 7432 and already supported by the implementation, but
augment it with a inheritance mechanism that understands the relationship
of the host MACIP (ARP/neighbor table entry) to the underlying MAC (MAC
forwarding database entry). In FRR, this relationship is understood by the
zebra component which doubles as the "host mobility manager", so the MAC
mobility sequence numbers are determined through interaction between bgpd
and zebra.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2018-08-20 21:20:06 +02:00
|
|
|
uint8_t flags = 0;
|
|
|
|
uint32_t seqnum = 0;
|
2019-01-15 00:24:43 +01:00
|
|
|
int state = 0;
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
char buf2[ESI_STR_LEN];
|
|
|
|
esi_t esi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
memset(&ip, 0, sizeof(ip));
|
|
|
|
s = zclient->ibuf;
|
|
|
|
vni = stream_getl(s);
|
2017-08-03 14:45:27 +02:00
|
|
|
stream_get(&mac.octet, s, ETH_ALEN);
|
2017-05-15 23:34:04 +02:00
|
|
|
ipa_len = stream_getl(s);
|
|
|
|
if (ipa_len != 0 && ipa_len != IPV4_MAX_BYTELEN
|
|
|
|
&& ipa_len != IPV6_MAX_BYTELEN) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_MACIP_LEN,
|
2018-09-13 21:38:57 +02:00
|
|
|
"%u:Recv MACIP %s with invalid IP addr length %d",
|
2019-05-03 21:42:59 +02:00
|
|
|
vrf_id, (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del",
|
2018-09-13 21:38:57 +02:00
|
|
|
ipa_len);
|
2017-05-15 23:34:04 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
if (ipa_len) {
|
|
|
|
ip.ipa_type =
|
|
|
|
(ipa_len == IPV4_MAX_BYTELEN) ? IPADDR_V4 : IPADDR_V6;
|
|
|
|
stream_get(&ip.ip.addr, s, ipa_len);
|
|
|
|
}
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_MACIP_ADD) {
|
bgpd, zebra: EVPN extended mobility support
Implement procedures similar to what is specified in
https://tools.ietf.org/html/draft-malhotra-bess-evpn-irb-extended-mobility
in order to support extended mobility scenarios in EVPN. These are scenarios
where a host/VM move results in a different (MAC,IP) binding from earlier.
For example, a host with an address assignment (IP1, MAC1) moves behind a
different PE (VTEP) and has an address assignment of (IP1, MAC2) or a host
with an address assignment (IP5, MAC5) has a different assignment of (IP6,
MAC5) after the move. Note that while these are described as "move" scenarios,
they also cover the situation when a VM is shut down and a new VM is spun up
at a different location that reuses the IP address or MAC address of the
earlier instance, but not both. Yet another scenario is a MAC change for an
attached host/VM i.e., when the MAC of an attached host changes from MAC1 to
MAC2. This is necessary because there may already be a non-zero sequence
number associated with MAC2. Also, even though (IP, MAC1) is withdrawn before
(IP, MAC2) is advertised, they may propagate through the network differently.
The procedures continue to rely on the MAC mobility extended community
specified in RFC 7432 and already supported by the implementation, but
augment it with a inheritance mechanism that understands the relationship
of the host MACIP (ARP/neighbor table entry) to the underlying MAC (MAC
forwarding database entry). In FRR, this relationship is understood by the
zebra component which doubles as the "host mobility manager", so the MAC
mobility sequence numbers are determined through interaction between bgpd
and zebra.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2018-08-20 21:20:06 +02:00
|
|
|
flags = stream_getc(s);
|
|
|
|
seqnum = stream_getl(s);
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
stream_get(&esi, s, sizeof(esi_t));
|
2019-01-15 00:24:43 +01:00
|
|
|
} else {
|
|
|
|
state = stream_getl(s);
|
2020-11-14 23:33:43 +01:00
|
|
|
memset(&esi, 0, sizeof(esi_t));
|
bgpd, zebra: EVPN extended mobility support
Implement procedures similar to what is specified in
https://tools.ietf.org/html/draft-malhotra-bess-evpn-irb-extended-mobility
in order to support extended mobility scenarios in EVPN. These are scenarios
where a host/VM move results in a different (MAC,IP) binding from earlier.
For example, a host with an address assignment (IP1, MAC1) moves behind a
different PE (VTEP) and has an address assignment of (IP1, MAC2) or a host
with an address assignment (IP5, MAC5) has a different assignment of (IP6,
MAC5) after the move. Note that while these are described as "move" scenarios,
they also cover the situation when a VM is shut down and a new VM is spun up
at a different location that reuses the IP address or MAC address of the
earlier instance, but not both. Yet another scenario is a MAC change for an
attached host/VM i.e., when the MAC of an attached host changes from MAC1 to
MAC2. This is necessary because there may already be a non-zero sequence
number associated with MAC2. Also, even though (IP, MAC1) is withdrawn before
(IP, MAC2) is advertised, they may propagate through the network differently.
The procedures continue to rely on the MAC mobility extended community
specified in RFC 7432 and already supported by the implementation, but
augment it with a inheritance mechanism that understands the relationship
of the host MACIP (ARP/neighbor table entry) to the underlying MAC (MAC
forwarding database entry). In FRR, this relationship is understood by the
zebra component which doubles as the "host mobility manager", so the MAC
mobility sequence numbers are determined through interaction between bgpd
and zebra.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2018-08-20 21:20:06 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 23:34:04 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2021-03-10 01:50:42 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%u:Recv MACIP %s f 0x%x MAC %pEA IP %pI4 VNI %u seq %u state %d ESI %s",
|
|
|
|
vrf_id, (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del", flags,
|
|
|
|
&mac, &ip, vni, seqnum, state,
|
|
|
|
esi_to_str(&esi, buf2, sizeof(buf2)));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
if (cmd == ZEBRA_MACIP_ADD)
|
bgpd, zebra: EVPN extended mobility support
Implement procedures similar to what is specified in
https://tools.ietf.org/html/draft-malhotra-bess-evpn-irb-extended-mobility
in order to support extended mobility scenarios in EVPN. These are scenarios
where a host/VM move results in a different (MAC,IP) binding from earlier.
For example, a host with an address assignment (IP1, MAC1) moves behind a
different PE (VTEP) and has an address assignment of (IP1, MAC2) or a host
with an address assignment (IP5, MAC5) has a different assignment of (IP6,
MAC5) after the move. Note that while these are described as "move" scenarios,
they also cover the situation when a VM is shut down and a new VM is spun up
at a different location that reuses the IP address or MAC address of the
earlier instance, but not both. Yet another scenario is a MAC change for an
attached host/VM i.e., when the MAC of an attached host changes from MAC1 to
MAC2. This is necessary because there may already be a non-zero sequence
number associated with MAC2. Also, even though (IP, MAC1) is withdrawn before
(IP, MAC2) is advertised, they may propagate through the network differently.
The procedures continue to rely on the MAC mobility extended community
specified in RFC 7432 and already supported by the implementation, but
augment it with a inheritance mechanism that understands the relationship
of the host MACIP (ARP/neighbor table entry) to the underlying MAC (MAC
forwarding database entry). In FRR, this relationship is understood by the
zebra component which doubles as the "host mobility manager", so the MAC
mobility sequence numbers are determined through interaction between bgpd
and zebra.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Reviewed-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2018-08-20 21:20:06 +02:00
|
|
|
return bgp_evpn_local_macip_add(bgp, vni, &mac, &ip,
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
flags, seqnum, &esi);
|
2017-05-15 23:34:04 +02:00
|
|
|
else
|
2019-01-15 00:24:43 +01:00
|
|
|
return bgp_evpn_local_macip_del(bgp, vni, &mac, &ip, state);
|
2017-05-15 23:34:04 +02:00
|
|
|
}
|
2016-02-02 13:36:20 +01:00
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static void bgp_zebra_process_local_ip_prefix(ZAPI_CALLBACK_ARGS)
|
2017-11-20 06:47:04 +01:00
|
|
|
{
|
|
|
|
struct stream *s = NULL;
|
|
|
|
struct bgp *bgp_vrf = NULL;
|
|
|
|
struct prefix p;
|
|
|
|
|
|
|
|
memset(&p, 0, sizeof(struct prefix));
|
|
|
|
s = zclient->ibuf;
|
|
|
|
stream_get(&p, s, sizeof(struct prefix));
|
|
|
|
|
|
|
|
bgp_vrf = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp_vrf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-10-18 13:33:54 +02:00
|
|
|
zlog_debug("Recv prefix %pFX %s on vrf %s", &p,
|
2017-11-20 06:47:04 +01:00
|
|
|
(cmd == ZEBRA_IP_PREFIX_ROUTE_ADD) ? "ADD" : "DEL",
|
|
|
|
vrf_id_to_name(vrf_id));
|
|
|
|
|
|
|
|
if (cmd == ZEBRA_IP_PREFIX_ROUTE_ADD) {
|
|
|
|
|
|
|
|
if (p.family == AF_INET)
|
2018-11-20 16:30:20 +01:00
|
|
|
bgp_evpn_advertise_type5_route(bgp_vrf, &p, NULL,
|
|
|
|
AFI_IP, SAFI_UNICAST);
|
2017-11-20 06:47:04 +01:00
|
|
|
else
|
2018-11-20 16:30:20 +01:00
|
|
|
bgp_evpn_advertise_type5_route(bgp_vrf, &p, NULL,
|
|
|
|
AFI_IP6, SAFI_UNICAST);
|
2017-11-20 06:47:04 +01:00
|
|
|
|
|
|
|
} else {
|
|
|
|
if (p.family == AF_INET)
|
2018-11-20 16:30:20 +01:00
|
|
|
bgp_evpn_withdraw_type5_route(bgp_vrf, &p, AFI_IP,
|
|
|
|
SAFI_UNICAST);
|
2017-11-20 06:47:04 +01:00
|
|
|
else
|
2018-11-20 16:30:20 +01:00
|
|
|
bgp_evpn_withdraw_type5_route(bgp_vrf, &p, AFI_IP6,
|
|
|
|
SAFI_UNICAST);
|
2017-11-20 06:47:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 21:42:59 +02:00
|
|
|
static void bgp_zebra_process_label_chunk(ZAPI_CALLBACK_ARGS)
|
2018-04-07 20:13:07 +02:00
|
|
|
{
|
|
|
|
struct stream *s = NULL;
|
|
|
|
uint8_t response_keep;
|
|
|
|
uint32_t first;
|
|
|
|
uint32_t last;
|
2018-05-02 16:30:26 +02:00
|
|
|
uint8_t proto;
|
|
|
|
unsigned short instance;
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
s = zclient->ibuf;
|
2018-05-02 16:30:26 +02:00
|
|
|
STREAM_GETC(s, proto);
|
|
|
|
STREAM_GETW(s, instance);
|
2018-04-07 20:13:07 +02:00
|
|
|
STREAM_GETC(s, response_keep);
|
|
|
|
STREAM_GETL(s, first);
|
|
|
|
STREAM_GETL(s, last);
|
|
|
|
|
2018-05-02 16:30:26 +02:00
|
|
|
if (zclient->redist_default != proto) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_LM_ERROR, "Got LM msg with wrong proto %u",
|
2018-09-13 21:38:57 +02:00
|
|
|
proto);
|
2018-05-02 16:30:26 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (zclient->instance != instance) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_LM_ERROR, "Got LM msg with wrong instance %u",
|
2018-09-13 21:38:57 +02:00
|
|
|
proto);
|
2018-05-02 16:30:26 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-07 20:13:07 +02:00
|
|
|
if (first > last ||
|
|
|
|
first < MPLS_LABEL_UNRESERVED_MIN ||
|
|
|
|
last > MPLS_LABEL_UNRESERVED_MAX) {
|
|
|
|
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_err(EC_BGP_LM_ERROR, "%s: Invalid Label chunk: %u - %u",
|
2018-09-13 21:38:57 +02:00
|
|
|
__func__, first, last);
|
2018-04-07 20:13:07 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA)) {
|
|
|
|
zlog_debug("Label Chunk assign: %u - %u (%u) ",
|
|
|
|
first, last, response_keep);
|
|
|
|
}
|
|
|
|
|
2018-04-07 20:32:52 +02:00
|
|
|
bgp_lp_event_chunk(response_keep, first, last);
|
2018-04-07 20:13:07 +02:00
|
|
|
|
|
|
|
stream_failure: /* for STREAM_GETX */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-11 16:37:20 +02:00
|
|
|
extern struct zebra_privs_t bgpd_privs;
|
|
|
|
|
2019-09-18 22:20:04 +02:00
|
|
|
static int bgp_ifp_create(struct interface *ifp)
|
|
|
|
{
|
2019-09-19 04:26:55 +02:00
|
|
|
struct bgp *bgp;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("Rx Intf add VRF %u IF %s", ifp->vrf_id, ifp->name);
|
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(ifp->vrf_id);
|
|
|
|
if (!bgp)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bgp_mac_add_mac_entry(ifp);
|
|
|
|
|
|
|
|
bgp_update_interface_nbrs(bgp, ifp, ifp);
|
2020-09-28 17:35:35 +02:00
|
|
|
hook_call(bgp_vrf_status_changed, bgp, ifp);
|
2019-09-18 22:20:04 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-07 15:28:12 +02:00
|
|
|
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2016-10-07 15:44:42 +02:00
|
|
|
zclient_num_connects = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-09-18 22:20:04 +02:00
|
|
|
if_zapi_callbacks(bgp_ifp_create, bgp_ifp_up,
|
|
|
|
bgp_ifp_down, bgp_ifp_destroy);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Set default values. */
|
2018-11-02 13:54:58 +01:00
|
|
|
zclient = zclient_new(master, &zclient_options_default);
|
2017-10-11 16:37:20 +02:00
|
|
|
zclient_init(zclient, ZEBRA_ROUTE_BGP, 0, &bgpd_privs);
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
zclient->zebra_connected = bgp_zebra_connected;
|
2004-10-03 20:18:34 +02:00
|
|
|
zclient->router_id_update = bgp_router_id_update;
|
2002-12-13 21:15:29 +01:00
|
|
|
zclient->interface_address_add = bgp_interface_address_add;
|
|
|
|
zclient->interface_address_delete = bgp_interface_address_delete;
|
2015-05-20 02:40:40 +02:00
|
|
|
zclient->interface_nbr_address_add = bgp_interface_nbr_address_add;
|
|
|
|
zclient->interface_nbr_address_delete =
|
|
|
|
bgp_interface_nbr_address_delete;
|
2016-02-25 20:39:25 +01:00
|
|
|
zclient->interface_vrf_update = bgp_interface_vrf_update;
|
2017-08-21 03:10:50 +02:00
|
|
|
zclient->redistribute_route_add = zebra_read_route;
|
|
|
|
zclient->redistribute_route_del = zebra_read_route;
|
2015-05-20 02:40:34 +02:00
|
|
|
zclient->nexthop_update = bgp_read_nexthop_update;
|
2015-05-20 03:04:20 +02:00
|
|
|
zclient->import_check_update = bgp_read_import_check_update;
|
2017-03-09 15:54:20 +01:00
|
|
|
zclient->fec_update = bgp_read_fec_update;
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
zclient->local_es_add = bgp_zebra_process_local_es_add;
|
|
|
|
zclient->local_es_del = bgp_zebra_process_local_es_del;
|
2017-05-15 23:34:04 +02:00
|
|
|
zclient->local_vni_add = bgp_zebra_process_local_vni;
|
bgpd: support for Ethernet Segments and Type-1/EAD routes
This is the base patch that brings in support for Type-1 routes.
It includes support for -
- Ethernet Segment (ES) management
- EAD route handling
- MAC-IP (Type-2) routes with a non-zero ESI i.e. Aliasing for
active-active multihoming
- Initial infra for consistency checking. Consistency checking
is a fundamental feature for active-active solutions like MLAG.
We will try to levarage the info in the EAD-ES/EAD-EVI routes to
detect inconsitencies in access config across VTEPs attached to
the same Ethernet Segment.
Functionality Overview -
========================
1. Ethernet segments are created in zebra and associated with
access VLANs. zebra sends that info as ES and ES-EVI objects to BGP.
2. BGP advertises EAD-ES and EAD-EVI routes for the locally attached
ethernet segments.
3. Similarly BGP processes EAD-ES and EAD-EVI routes from peers
and translates them into ES-VTEP objects which are then sent to zebra
as remote ESs.
4. Each ES in zebra is associated with a list of active VTEPs which
is then translated into a L2-NHG (nexthop group). This is the ES
"Alias" entry
5. MAC-IP routes with a non-zero ESI use the alias entry created in
(4.) to forward traffic i.e. a MAC-ECMP is done to these remote-ES
destinations.
EAD route management (route table and key) -
============================================
1. Local EAD-ES routes
a. route-table: per-ES route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
b. route-table: per-VNI route-table
Not added
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
2. Remote EAD-ES routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=ES-RD, ESI, ET=0xffffffff, VTEP-IP)
c. route-table: global route-table
key: {RD=ES-RD, ESI, ET=0xffffffff)
3. Local EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
4. Remote EAD-EVI routes
a. route-table: per-ES route-table
Not added
b. route-table: per-VNI route-table
key: {RD=0, ESI, ET=0, VTEP-IP)
c. route-table: global route-table
key: {RD=L2-VNI-RD, ESI, ET=0)
Please refer to bgp_evpn_mh.h for info on how the data-structures are
organized.
Signed-off-by: Anuradha Karuppiah <anuradhak@cumulusnetworks.com>
2020-03-27 22:43:50 +01:00
|
|
|
zclient->local_es_evi_add = bgp_zebra_process_local_es_evi;
|
|
|
|
zclient->local_es_evi_del = bgp_zebra_process_local_es_evi;
|
2017-05-15 23:34:04 +02:00
|
|
|
zclient->local_vni_del = bgp_zebra_process_local_vni;
|
|
|
|
zclient->local_macip_add = bgp_zebra_process_local_macip;
|
|
|
|
zclient->local_macip_del = bgp_zebra_process_local_macip;
|
2017-10-09 02:46:08 +02:00
|
|
|
zclient->local_l3vni_add = bgp_zebra_process_local_l3vni;
|
|
|
|
zclient->local_l3vni_del = bgp_zebra_process_local_l3vni;
|
2017-11-20 06:47:04 +01:00
|
|
|
zclient->local_ip_prefix_add = bgp_zebra_process_local_ip_prefix;
|
|
|
|
zclient->local_ip_prefix_del = bgp_zebra_process_local_ip_prefix;
|
2018-04-07 20:13:07 +02:00
|
|
|
zclient->label_chunk = bgp_zebra_process_label_chunk;
|
2018-03-08 15:39:19 +01:00
|
|
|
zclient->rule_notify_owner = rule_notify_owner;
|
|
|
|
zclient->ipset_notify_owner = ipset_notify_owner;
|
|
|
|
zclient->ipset_entry_notify_owner = ipset_entry_notify_owner;
|
2018-03-12 09:38:53 +01:00
|
|
|
zclient->iptable_notify_owner = iptable_notify_owner;
|
2020-11-06 04:25:56 +01:00
|
|
|
zclient->route_notify_owner = bgp_zebra_route_notify_owner;
|
2018-06-07 15:28:12 +02:00
|
|
|
zclient->instance = instance;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2016-01-12 19:41:57 +01:00
|
|
|
|
|
|
|
void bgp_zebra_destroy(void)
|
|
|
|
{
|
|
|
|
if (zclient == NULL)
|
|
|
|
return;
|
|
|
|
zclient_stop(zclient);
|
|
|
|
zclient_free(zclient);
|
|
|
|
zclient = NULL;
|
|
|
|
}
|
2016-10-07 15:44:42 +02:00
|
|
|
|
|
|
|
int bgp_zebra_num_connects(void)
|
|
|
|
{
|
|
|
|
return zclient_num_connects;
|
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
|
2018-11-29 15:14:41 +01:00
|
|
|
void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra,
|
|
|
|
struct bgp_pbr_rule *pbr,
|
|
|
|
bool install)
|
2018-03-08 15:39:19 +01:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
2018-11-29 15:14:41 +01:00
|
|
|
if (pbra->install_in_progress && !pbr)
|
2018-03-08 15:39:19 +01:00
|
|
|
return;
|
2018-11-29 15:14:41 +01:00
|
|
|
if (pbr && pbr->install_in_progress)
|
|
|
|
return;
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA)) {
|
|
|
|
if (pbr)
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: table %d (ip rule) %d", __func__,
|
2018-11-29 15:14:41 +01:00
|
|
|
pbra->table_id, install);
|
|
|
|
else
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: table %d fwmark %d %d", __func__,
|
2018-11-29 15:14:41 +01:00
|
|
|
pbra->table_id, pbra->fwmark, install);
|
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s,
|
|
|
|
install ? ZEBRA_RULE_ADD : ZEBRA_RULE_DELETE,
|
|
|
|
VRF_DEFAULT);
|
|
|
|
stream_putl(s, 1); /* send one pbr action */
|
|
|
|
|
2018-11-29 15:14:41 +01:00
|
|
|
bgp_encode_pbr_rule_action(s, pbra, pbr);
|
2018-03-08 15:39:19 +01:00
|
|
|
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
2020-11-07 00:21:50 +01:00
|
|
|
if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE)
|
|
|
|
&& install) {
|
2018-11-29 15:14:41 +01:00
|
|
|
if (!pbr)
|
|
|
|
pbra->install_in_progress = true;
|
|
|
|
else
|
|
|
|
pbr->install_in_progress = true;
|
|
|
|
}
|
2018-03-08 15:39:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim, bool install)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
|
|
|
if (pbrim->install_in_progress)
|
|
|
|
return;
|
2018-05-21 12:02:52 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: name %s type %d %d, ID %u", __func__,
|
|
|
|
pbrim->ipset_name, pbrim->type, install,
|
|
|
|
pbrim->unique);
|
2018-03-08 15:39:19 +01:00
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s,
|
|
|
|
install ? ZEBRA_IPSET_CREATE :
|
|
|
|
ZEBRA_IPSET_DESTROY,
|
|
|
|
VRF_DEFAULT);
|
|
|
|
|
|
|
|
stream_putl(s, 1); /* send one pbr action */
|
|
|
|
|
|
|
|
bgp_encode_pbr_ipset_match(s, pbrim);
|
|
|
|
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
2020-11-07 00:21:50 +01:00
|
|
|
if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE) && install)
|
2018-03-08 15:39:19 +01:00
|
|
|
pbrim->install_in_progress = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
|
|
|
|
bool install)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
|
|
|
|
if (pbrime->install_in_progress)
|
|
|
|
return;
|
2018-05-21 12:02:52 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: name %s %d %d, ID %u", __func__,
|
|
|
|
pbrime->backpointer->ipset_name, pbrime->unique,
|
|
|
|
install, pbrime->unique);
|
2018-03-08 15:39:19 +01:00
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s,
|
|
|
|
install ? ZEBRA_IPSET_ENTRY_ADD :
|
|
|
|
ZEBRA_IPSET_ENTRY_DELETE,
|
|
|
|
VRF_DEFAULT);
|
|
|
|
|
|
|
|
stream_putl(s, 1); /* send one pbr action */
|
|
|
|
|
|
|
|
bgp_encode_pbr_ipset_entry_match(s, pbrime);
|
|
|
|
|
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
2020-11-07 00:21:50 +01:00
|
|
|
if ((zclient_send_message(zclient) != ZCLIENT_SEND_FAILURE) && install)
|
2018-03-08 15:39:19 +01:00
|
|
|
pbrime->install_in_progress = true;
|
|
|
|
}
|
2018-03-12 09:38:53 +01:00
|
|
|
|
2019-10-17 16:11:57 +02:00
|
|
|
static void bgp_encode_pbr_interface_list(struct bgp *bgp, struct stream *s,
|
|
|
|
uint8_t family)
|
2018-04-25 18:29:35 +02:00
|
|
|
{
|
|
|
|
struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
|
|
|
|
struct bgp_pbr_interface_head *head;
|
|
|
|
struct bgp_pbr_interface *pbr_if;
|
|
|
|
struct interface *ifp;
|
|
|
|
|
|
|
|
if (!bgp_pbr_cfg)
|
|
|
|
return;
|
2019-10-17 16:11:57 +02:00
|
|
|
if (family == AF_INET)
|
|
|
|
head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
|
|
|
|
else
|
|
|
|
head = &(bgp_pbr_cfg->ifaces_by_name_ipv6);
|
2018-04-25 18:29:35 +02:00
|
|
|
RB_FOREACH (pbr_if, bgp_pbr_interface_head, head) {
|
2019-06-24 01:46:39 +02:00
|
|
|
ifp = if_lookup_by_name(pbr_if->name, bgp->vrf_id);
|
2018-04-25 18:29:35 +02:00
|
|
|
if (ifp)
|
|
|
|
stream_putl(s, ifp->ifindex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-17 16:11:57 +02:00
|
|
|
static int bgp_pbr_get_ifnumber(struct bgp *bgp, uint8_t family)
|
2018-04-25 18:29:35 +02:00
|
|
|
{
|
|
|
|
struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
|
|
|
|
struct bgp_pbr_interface_head *head;
|
|
|
|
struct bgp_pbr_interface *pbr_if;
|
|
|
|
int cnt = 0;
|
|
|
|
|
|
|
|
if (!bgp_pbr_cfg)
|
|
|
|
return 0;
|
2019-10-17 16:11:57 +02:00
|
|
|
if (family == AF_INET)
|
|
|
|
head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
|
|
|
|
else
|
|
|
|
head = &(bgp_pbr_cfg->ifaces_by_name_ipv6);
|
2018-04-25 18:29:35 +02:00
|
|
|
RB_FOREACH (pbr_if, bgp_pbr_interface_head, head) {
|
2019-06-24 01:46:39 +02:00
|
|
|
if (if_lookup_by_name(pbr_if->name, bgp->vrf_id))
|
2018-04-25 18:29:35 +02:00
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2018-03-12 09:38:53 +01:00
|
|
|
void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
|
|
|
|
struct bgp_pbr_match *pbm,
|
|
|
|
bool install)
|
|
|
|
{
|
|
|
|
struct stream *s;
|
2018-05-18 16:14:46 +02:00
|
|
|
int ret = 0;
|
2018-04-25 18:29:35 +02:00
|
|
|
int nb_interface;
|
2018-03-12 09:38:53 +01:00
|
|
|
|
|
|
|
if (pbm->install_iptable_in_progress)
|
|
|
|
return;
|
2018-05-21 12:02:52 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug("%s: name %s type %d mark %d %d, ID %u", __func__,
|
|
|
|
pbm->ipset_name, pbm->type, pba->fwmark, install,
|
2018-07-02 16:53:19 +02:00
|
|
|
pbm->unique2);
|
2018-03-12 09:38:53 +01:00
|
|
|
s = zclient->obuf;
|
|
|
|
stream_reset(s);
|
|
|
|
|
|
|
|
zclient_create_header(s,
|
|
|
|
install ? ZEBRA_IPTABLE_ADD :
|
|
|
|
ZEBRA_IPTABLE_DELETE,
|
|
|
|
VRF_DEFAULT);
|
|
|
|
|
|
|
|
bgp_encode_pbr_iptable_match(s, pba, pbm);
|
2019-10-17 16:11:57 +02:00
|
|
|
nb_interface = bgp_pbr_get_ifnumber(pba->bgp, pbm->family);
|
2018-04-25 18:29:35 +02:00
|
|
|
stream_putl(s, nb_interface);
|
|
|
|
if (nb_interface)
|
2019-10-17 16:11:57 +02:00
|
|
|
bgp_encode_pbr_interface_list(pba->bgp, s, pbm->family);
|
2018-03-12 09:38:53 +01:00
|
|
|
stream_putw_at(s, 0, stream_get_endp(s));
|
2018-05-18 16:14:46 +02:00
|
|
|
ret = zclient_send_message(zclient);
|
|
|
|
if (install) {
|
2020-11-07 00:21:50 +01:00
|
|
|
if (ret != ZCLIENT_SEND_FAILURE)
|
2018-05-18 16:14:46 +02:00
|
|
|
pba->refcnt++;
|
|
|
|
else
|
|
|
|
pbm->install_iptable_in_progress = true;
|
2018-04-24 16:35:00 +02:00
|
|
|
}
|
2018-03-12 09:38:53 +01:00
|
|
|
}
|
2018-03-28 14:51:57 +02:00
|
|
|
|
|
|
|
/* inject in table <table_id> a default route to:
|
|
|
|
* - if nexthop IP is present : to this nexthop
|
|
|
|
* - if vrf is different from local : to the matching VRF
|
|
|
|
*/
|
|
|
|
void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
|
|
|
|
afi_t afi, uint32_t table_id, bool announce)
|
|
|
|
{
|
|
|
|
struct zapi_nexthop *api_nh;
|
|
|
|
struct zapi_route api;
|
|
|
|
struct prefix p;
|
|
|
|
|
2019-10-16 10:05:36 +02:00
|
|
|
if (!nh || (nh->type != NEXTHOP_TYPE_IPV4
|
|
|
|
&& nh->type != NEXTHOP_TYPE_IPV6)
|
2018-03-28 14:51:57 +02:00
|
|
|
|| nh->vrf_id == VRF_UNKNOWN)
|
|
|
|
return;
|
|
|
|
memset(&p, 0, sizeof(struct prefix));
|
2019-10-16 10:05:36 +02:00
|
|
|
if (afi != AFI_IP && afi != AFI_IP6)
|
2018-03-28 14:51:57 +02:00
|
|
|
return;
|
2019-10-16 10:05:36 +02:00
|
|
|
p.family = afi2family(afi);
|
2018-03-28 14:51:57 +02:00
|
|
|
memset(&api, 0, sizeof(api));
|
|
|
|
api.vrf_id = bgp->vrf_id;
|
|
|
|
api.type = ZEBRA_ROUTE_BGP;
|
|
|
|
api.safi = SAFI_UNICAST;
|
|
|
|
api.prefix = p;
|
|
|
|
api.tableid = table_id;
|
|
|
|
api.nexthop_num = 1;
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_TABLEID);
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
|
|
|
|
api_nh = &api.nexthops[0];
|
|
|
|
|
2019-05-29 14:56:03 +02:00
|
|
|
api.distance = ZEBRA_EBGP_DISTANCE_DEFAULT;
|
|
|
|
SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
|
|
|
|
|
2018-03-28 14:51:57 +02:00
|
|
|
/* redirect IP */
|
2019-10-16 10:05:36 +02:00
|
|
|
if (afi == AFI_IP && nh->gate.ipv4.s_addr != INADDR_ANY) {
|
2018-03-28 14:51:57 +02:00
|
|
|
char buff[PREFIX_STRLEN];
|
|
|
|
|
|
|
|
api_nh->vrf_id = nh->vrf_id;
|
|
|
|
api_nh->gate.ipv4 = nh->gate.ipv4;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV4;
|
|
|
|
|
|
|
|
inet_ntop(AF_INET, &(nh->gate.ipv4), buff, INET_ADDRSTRLEN);
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2019-10-16 10:05:36 +02:00
|
|
|
zlog_debug("BGP: %s default route to %s table %d (redirect IP)",
|
|
|
|
announce ? "adding" : "withdrawing",
|
|
|
|
buff, table_id);
|
|
|
|
zclient_route_send(announce ? ZEBRA_ROUTE_ADD
|
|
|
|
: ZEBRA_ROUTE_DELETE,
|
|
|
|
zclient, &api);
|
|
|
|
} else if (afi == AFI_IP6 &&
|
|
|
|
memcmp(&nh->gate.ipv6,
|
|
|
|
&in6addr_any, sizeof(struct in6_addr))) {
|
|
|
|
char buff[PREFIX_STRLEN];
|
|
|
|
|
|
|
|
api_nh->vrf_id = nh->vrf_id;
|
|
|
|
memcpy(&api_nh->gate.ipv6, &nh->gate.ipv6,
|
|
|
|
sizeof(struct in6_addr));
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IPV6;
|
|
|
|
|
|
|
|
inet_ntop(AF_INET6, &(nh->gate.ipv6), buff, INET_ADDRSTRLEN);
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("BGP: %s default route to %s table %d (redirect IP)",
|
2018-05-21 18:35:38 +02:00
|
|
|
announce ? "adding" : "withdrawing",
|
2018-03-28 14:51:57 +02:00
|
|
|
buff, table_id);
|
|
|
|
zclient_route_send(announce ? ZEBRA_ROUTE_ADD
|
|
|
|
: ZEBRA_ROUTE_DELETE,
|
|
|
|
zclient, &api);
|
|
|
|
} else if (nh->vrf_id != bgp->vrf_id) {
|
|
|
|
struct vrf *vrf;
|
2018-04-17 18:32:49 +02:00
|
|
|
struct interface *ifp;
|
2018-03-28 14:51:57 +02:00
|
|
|
|
2018-04-17 18:32:49 +02:00
|
|
|
vrf = vrf_lookup_by_id(nh->vrf_id);
|
2018-03-28 14:51:57 +02:00
|
|
|
if (!vrf)
|
|
|
|
return;
|
2018-04-17 18:32:49 +02:00
|
|
|
/* create default route with interface <VRF>
|
|
|
|
* with nexthop-vrf <VRF>
|
2018-03-28 14:51:57 +02:00
|
|
|
*/
|
2018-04-17 18:32:49 +02:00
|
|
|
ifp = if_lookup_by_name_all_vrf(vrf->name);
|
|
|
|
if (!ifp)
|
|
|
|
return;
|
|
|
|
api_nh->vrf_id = nh->vrf_id;
|
|
|
|
api_nh->type = NEXTHOP_TYPE_IFINDEX;
|
|
|
|
api_nh->ifindex = ifp->ifindex;
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2018-05-21 18:35:38 +02:00
|
|
|
zlog_info("BGP: %s default route to %s table %d (redirect VRF)",
|
|
|
|
announce ? "adding" : "withdrawing",
|
2018-04-17 18:32:49 +02:00
|
|
|
vrf->name, table_id);
|
|
|
|
zclient_route_send(announce ? ZEBRA_ROUTE_ADD
|
|
|
|
: ZEBRA_ROUTE_DELETE,
|
|
|
|
zclient, &api);
|
2018-03-28 14:51:57 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2019-10-25 21:23:52 +02:00
|
|
|
|
|
|
|
/* Send capabilities to RIB */
|
|
|
|
int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable)
|
|
|
|
{
|
|
|
|
struct zapi_cap api;
|
|
|
|
int ret = BGP_GR_SUCCESS;
|
|
|
|
|
|
|
|
if (zclient == NULL) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("zclient invalid");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the client is connected */
|
|
|
|
if ((zclient->sock < 0) || (zclient->t_connect)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("client not connected");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if capability is already sent. If the flag force is set
|
|
|
|
* send the capability since this can be initial bgp configuration
|
|
|
|
*/
|
|
|
|
memset(&api, 0, sizeof(struct zapi_cap));
|
|
|
|
if (disable) {
|
|
|
|
api.cap = ZEBRA_CLIENT_GR_DISABLE;
|
|
|
|
api.vrf_id = bgp->vrf_id;
|
|
|
|
} else {
|
|
|
|
api.cap = ZEBRA_CLIENT_GR_CAPABILITIES;
|
|
|
|
api.stale_removal_time = bgp->rib_stale_time;
|
|
|
|
api.vrf_id = bgp->vrf_id;
|
|
|
|
}
|
|
|
|
|
2020-01-31 19:04:00 +01:00
|
|
|
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
|
2020-11-11 20:14:37 +01:00
|
|
|
== ZCLIENT_SEND_FAILURE) {
|
2019-11-25 16:18:15 +01:00
|
|
|
zlog_err("error sending capability");
|
2019-10-25 21:23:52 +02:00
|
|
|
ret = BGP_GR_FAILURE;
|
|
|
|
} else {
|
|
|
|
if (disable)
|
|
|
|
bgp->present_zebra_gr_state = ZEBRA_GR_DISABLE;
|
|
|
|
else
|
|
|
|
bgp->present_zebra_gr_state = ZEBRA_GR_ENABLE;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("send capabilty success");
|
|
|
|
ret = BGP_GR_SUCCESS;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send route update pesding or completed status to RIB for the
|
|
|
|
* specific AFI, SAFI
|
|
|
|
*/
|
|
|
|
int bgp_zebra_update(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type)
|
|
|
|
{
|
2019-11-25 16:18:15 +01:00
|
|
|
struct zapi_cap api = {0};
|
2019-10-25 21:23:52 +02:00
|
|
|
|
|
|
|
if (zclient == NULL) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2019-11-25 16:18:15 +01:00
|
|
|
zlog_debug("zclient == NULL, invalid");
|
2019-10-25 21:23:52 +02:00
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the client is connected */
|
|
|
|
if ((zclient->sock < 0) || (zclient->t_connect)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("client not connected");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
api.afi = afi;
|
|
|
|
api.safi = safi;
|
|
|
|
api.vrf_id = vrf_id;
|
|
|
|
api.cap = type;
|
|
|
|
|
2020-01-31 19:04:00 +01:00
|
|
|
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
|
2020-11-11 20:14:37 +01:00
|
|
|
== ZCLIENT_SEND_FAILURE) {
|
2019-10-25 21:23:52 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("error sending capability");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
return BGP_GR_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Send RIB stale timer update */
|
|
|
|
int bgp_zebra_stale_timer_update(struct bgp *bgp)
|
|
|
|
{
|
|
|
|
struct zapi_cap api;
|
|
|
|
|
|
|
|
if (zclient == NULL) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("zclient invalid");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the client is connected */
|
|
|
|
if ((zclient->sock < 0) || (zclient->t_connect)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("client not connected");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&api, 0, sizeof(struct zapi_cap));
|
|
|
|
api.cap = ZEBRA_CLIENT_RIB_STALE_TIME;
|
|
|
|
api.stale_removal_time = bgp->rib_stale_time;
|
|
|
|
api.vrf_id = bgp->vrf_id;
|
2020-01-31 19:04:00 +01:00
|
|
|
if (zclient_capabilities_send(ZEBRA_CLIENT_CAPABILITIES, zclient, &api)
|
2020-11-11 20:14:37 +01:00
|
|
|
== ZCLIENT_SEND_FAILURE) {
|
2019-10-25 21:23:52 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("error sending capability");
|
|
|
|
return BGP_GR_FAILURE;
|
|
|
|
}
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("send capabilty success");
|
|
|
|
return BGP_GR_SUCCESS;
|
|
|
|
}
|