2015-05-20 02:40:34 +02:00
|
|
|
/* BGP Nexthop tracking
|
|
|
|
* Copyright (C) 2013 Cumulus Networks, Inc.
|
|
|
|
*
|
|
|
|
* This file is part of GNU Zebra.
|
|
|
|
*
|
|
|
|
* GNU Zebra is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* GNU Zebra is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
2017-05-13 10:25:29 +02:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; see the file COPYING; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2015-05-20 02:40:34 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "command.h"
|
|
|
|
#include "thread.h"
|
|
|
|
#include "prefix.h"
|
|
|
|
#include "zclient.h"
|
|
|
|
#include "stream.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "nexthop.h"
|
*: add VRF ID in the API message header
The API messages are used by zebra to exchange the interfaces, addresses,
routes and router-id information with its clients. To distinguish which
VRF the information belongs to, a new field "VRF ID" is added in the
message header. And hence the message version is increased to 3.
* The new field "VRF ID" in the message header:
Length (2 bytes)
Marker (1 byte)
Version (1 byte)
VRF ID (2 bytes, newly added)
Command (2 bytes)
- Client side:
- zclient_create_header() adds the VRF ID in the message header.
- zclient_read() extracts and validates the VRF ID from the header,
and passes the VRF ID to the callback functions registered to
the API messages.
- All relative functions are appended with a new parameter "vrf_id",
including all the callback functions.
- "vrf_id" is also added to "struct zapi_ipv4" and "struct zapi_ipv6".
Clients need to correctly set the VRF ID when using the API
functions zapi_ipv4_route() and zapi_ipv6_route().
- Till now all messages sent from a client have the default VRF ID
"0" in the header.
- The HELLO message is special, which is used as the heart-beat of
a client, and has no relation with VRF. The VRF ID in the HELLO
message header will always be 0 and ignored by zebra.
- Zebra side:
- zserv_create_header() adds the VRF ID in the message header.
- zebra_client_read() extracts and validates the VRF ID from the
header, and passes the VRF ID to the functions which process
the received messages.
- All relative functions are appended with a new parameter "vrf_id".
* Suppress the messages in a VRF which a client does not care:
Some clients may not care about the information in the VRF X, and
zebra should not send the messages in the VRF X to those clients.
Extra flags are used to indicate which VRF is registered by a client,
and a new message ZEBRA_VRF_UNREGISTER is introduced to let a client
can unregister a VRF when it does not need any information in that
VRF.
A client sends any message other than ZEBRA_VRF_UNREGISTER in a VRF
will automatically register to that VRF.
- lib/vrf:
A new utility "VRF bit-map" is provided to manage the flags for
VRFs, one bit per VRF ID.
- Use vrf_bitmap_init()/vrf_bitmap_free() to initialize/free a
bit-map;
- Use vrf_bitmap_set()/vrf_bitmap_unset() to set/unset a flag
in the given bit-map, corresponding to the given VRF ID;
- Use vrf_bitmap_check() to test whether the flag, in the given
bit-map and for the given VRF ID, is set.
- Client side:
- In "struct zclient", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
default_information
These flags are extended for each VRF, and controlled by the
clients themselves (or with the help of zclient_redistribute()
and zclient_redistribute_default()).
- Zebra side:
- In "struct zserv", the following flags are changed from
"u_char" to "vrf_bitmap_t":
redist[ZEBRA_ROUTE_MAX]
redist_default
ifinfo
ridinfo
These flags are extended for each VRF, as the VRF registration
flags. They are maintained on receiving a ZEBRA_XXX_ADD or
ZEBRA_XXX_DELETE message.
When sending an interface/address/route/router-id message in
a VRF to a client, if the corresponding VRF registration flag
is not set, this message will not be dropped by zebra.
- A new function zread_vrf_unregister() is introduced to process
the new command ZEBRA_VRF_UNREGISTER. All the VRF registration
flags are cleared for the requested VRF.
Those clients, who support only the default VRF, will never receive
a message in a non-default VRF, thanks to the filter in zebra.
* New callback for the event of successful connection to zebra:
- zclient_start() is splitted, keeping only the code of connecting
to zebra.
- Now zclient_init()=>zclient_connect()=>zclient_start() operations
are purely dealing with the connection to zbera.
- Once zebra is successfully connected, at the end of zclient_start(),
a new callback is used to inform the client about connection.
- Till now, in the callback of connect-to-zebra event, all clients
send messages to zebra to request the router-id/interface/routes
information in the default VRF.
Of corse in future the client can do anything it wants in this
callback. For example, it may send requests for both default VRF
and some non-default VRFs.
Signed-off-by: Feng Lu <lu.feng@6wind.com>
Reviewed-by: Alain Ritoux <alain.ritoux@6wind.com>
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Acked-by: Donald Sharp <sharpd@cumulusnetworks.com>
Conflicts:
lib/zclient.h
lib/zebra.h
zebra/zserv.c
zebra/zserv.h
Conflicts:
bgpd/bgp_nexthop.c
bgpd/bgp_nht.c
bgpd/bgp_zebra.c
isisd/isis_zebra.c
lib/zclient.c
lib/zclient.h
lib/zebra.h
nhrpd/nhrp_interface.c
nhrpd/nhrp_route.c
nhrpd/nhrpd.h
ospf6d/ospf6_zebra.c
ospf6d/ospf6_zebra.h
ospfd/ospf_vty.c
ospfd/ospf_zebra.c
pimd/pim_zebra.c
pimd/pim_zlookup.c
ripd/rip_zebra.c
ripngd/ripng_zebra.c
zebra/redistribute.c
zebra/rt_netlink.c
zebra/zebra_rnh.c
zebra/zebra_rnh.h
zebra/zserv.c
zebra/zserv.h
2014-10-16 03:52:36 +02:00
|
|
|
#include "vrf.h"
|
2016-01-07 16:03:01 +01:00
|
|
|
#include "filter.h"
|
2020-05-16 01:33:41 +02:00
|
|
|
#include "nexthop_group.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
|
|
|
|
#include "bgpd/bgpd.h"
|
|
|
|
#include "bgpd/bgp_table.h"
|
|
|
|
#include "bgpd/bgp_route.h"
|
|
|
|
#include "bgpd/bgp_attr.h"
|
|
|
|
#include "bgpd/bgp_nexthop.h"
|
|
|
|
#include "bgpd/bgp_debug.h"
|
2018-06-15 23:08:53 +02:00
|
|
|
#include "bgpd/bgp_errors.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
#include "bgpd/bgp_nht.h"
|
2015-05-20 03:12:17 +02:00
|
|
|
#include "bgpd/bgp_fsm.h"
|
2016-10-07 15:44:42 +02:00
|
|
|
#include "bgpd/bgp_zebra.h"
|
2018-12-04 09:33:21 +01:00
|
|
|
#include "bgpd/bgp_flowspec_util.h"
|
2019-11-14 01:46:56 +01:00
|
|
|
#include "bgpd/bgp_evpn.h"
|
2020-05-25 23:10:12 +02:00
|
|
|
#include "bgpd/bgp_rd.h"
|
2015-05-20 02:40:34 +02:00
|
|
|
|
|
|
|
extern struct zclient *zclient;
|
|
|
|
|
2022-03-12 20:48:18 +01:00
|
|
|
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
|
|
|
|
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
|
2018-10-03 02:43:07 +02:00
|
|
|
static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
|
2022-02-23 01:04:25 +01:00
|
|
|
static void bgp_nht_ifp_initial(struct thread *thread);
|
2015-05-20 02:40:34 +02:00
|
|
|
|
2016-10-11 17:30:55 +02:00
|
|
|
static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc)
|
|
|
|
{
|
|
|
|
return (bgp_zebra_num_connects() == 0
|
2021-04-22 19:04:47 +02:00
|
|
|
|| (bnc && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)
|
|
|
|
&& bnc->nexthop_num > 0));
|
2016-10-11 17:30:55 +02:00
|
|
|
}
|
|
|
|
|
2022-08-17 11:52:51 +02:00
|
|
|
static int bgp_isvalid_nexthop_for_ebgp(struct bgp_nexthop_cache *bnc,
|
|
|
|
struct bgp_path_info *path)
|
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
struct bgp_interface *iifp;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!path->extra || !path->extra->peer_orig)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
peer = path->extra->peer_orig;
|
|
|
|
|
|
|
|
/* only connected ebgp peers are valid */
|
|
|
|
if (peer->sort != BGP_PEER_EBGP || peer->ttl != BGP_DEFAULT_TTL ||
|
|
|
|
CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK) ||
|
|
|
|
CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
|
|
|
|
if (nexthop->type == NEXTHOP_TYPE_IFINDEX ||
|
|
|
|
nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX ||
|
|
|
|
nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) {
|
|
|
|
ifp = if_lookup_by_index(
|
|
|
|
bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
|
|
|
|
bnc->bgp->vrf_id);
|
|
|
|
}
|
|
|
|
if (!ifp)
|
|
|
|
continue;
|
|
|
|
iifp = ifp->info;
|
|
|
|
if (CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
static int bgp_isvalid_nexthop_for_mplsovergre(struct bgp_nexthop_cache *bnc,
|
|
|
|
struct bgp_path_info *path)
|
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
|
|
|
|
for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) {
|
|
|
|
if (nexthop->type != NEXTHOP_TYPE_BLACKHOLE) {
|
|
|
|
ifp = if_lookup_by_index(
|
|
|
|
bnc->ifindex ? bnc->ifindex : nexthop->ifindex,
|
|
|
|
bnc->bgp->vrf_id);
|
|
|
|
if (ifp && (ifp->ll_type == ZEBRA_LLT_IPGRE ||
|
|
|
|
ifp->ll_type == ZEBRA_LLT_IP6GRE))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ifp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (CHECK_FLAG(path->attr->rmap_change_flags,
|
|
|
|
BATTR_RMAP_L3VPN_ACCEPT_GRE))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bgp_isvalid_nexthop_for_mpls(struct bgp_nexthop_cache *bnc,
|
|
|
|
struct bgp_path_info *path)
|
2018-03-24 00:57:03 +01:00
|
|
|
{
|
2021-02-09 06:26:22 +01:00
|
|
|
/*
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
* - In the case of MPLS-VPN, the label is learned from LDP or other
|
2021-02-09 06:26:22 +01:00
|
|
|
* protocols, and nexthop tracking is enabled for the label.
|
|
|
|
* The value is recorded as BGP_NEXTHOP_LABELED_VALID.
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
* - In the case of SRv6-VPN, we need to track the reachability to the
|
2021-02-09 06:26:22 +01:00
|
|
|
* SID (in other words, IPv6 address). As in MPLS, we need to record
|
|
|
|
* the value as BGP_NEXTHOP_SID_VALID. However, this function is
|
|
|
|
* currently not implemented, and this function assumes that all
|
|
|
|
* Transit routes for SRv6-VPN are valid.
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
* - Otherwise check for mpls-gre acceptance
|
2021-02-09 06:26:22 +01:00
|
|
|
*/
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
return (bgp_zebra_num_connects() == 0 ||
|
|
|
|
(bnc && (bnc->nexthop_num > 0 &&
|
2022-09-18 21:18:13 +02:00
|
|
|
(CHECK_FLAG(path->flags, BGP_PATH_ACCEPT_OWN) ||
|
|
|
|
CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) ||
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
bnc->bgp->srv6_enabled ||
|
2022-08-17 11:52:51 +02:00
|
|
|
bgp_isvalid_nexthop_for_ebgp(bnc, path) ||
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
bgp_isvalid_nexthop_for_mplsovergre(bnc, path)))));
|
2018-03-24 00:57:03 +01:00
|
|
|
}
|
|
|
|
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2018-08-10 15:00:17 +02:00
|
|
|
if (LIST_EMPTY(&(bnc->paths)) && !bnc->nht_info) {
|
2022-08-25 12:53:30 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug("%s: freeing bnc %pFX(%d)(%u)(%s)", __func__,
|
|
|
|
&bnc->prefix, bnc->ifindex, bnc->srte_color,
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc->bgp->name_pretty);
|
2020-06-26 18:37:30 +02:00
|
|
|
/* only unregister if this is the last nh for this prefix*/
|
|
|
|
if (!bnc_existing_for_prefix(bnc))
|
2022-03-12 20:48:18 +01:00
|
|
|
unregister_zebra_rnh(bnc);
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc_free(bnc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 22:41:30 +02:00
|
|
|
void bgp_unlink_nexthop(struct bgp_path_info *path)
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
{
|
|
|
|
struct bgp_nexthop_cache *bnc = path->nexthop;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
if (!bnc)
|
2017-07-17 14:03:14 +02:00
|
|
|
return;
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
|
2018-10-17 17:27:30 +02:00
|
|
|
path_nh_map(path, NULL, false);
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
|
|
|
|
bgp_unlink_nexthop_check(bnc);
|
|
|
|
}
|
|
|
|
|
2021-04-12 20:16:30 +02:00
|
|
|
void bgp_replace_nexthop_by_peer(struct peer *from, struct peer *to)
|
|
|
|
{
|
|
|
|
struct prefix pp;
|
|
|
|
struct prefix pt;
|
|
|
|
struct bgp_nexthop_cache *bncp, *bnct;
|
|
|
|
afi_t afi;
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2021-04-12 20:16:30 +02:00
|
|
|
|
|
|
|
if (!sockunion2hostprefix(&from->su, &pp))
|
|
|
|
return;
|
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (from->conf_if && IN6_IS_ADDR_LINKLOCAL(&from->su.sin6.sin6_addr))
|
|
|
|
ifindex = from->su.sin6.sin6_scope_id;
|
|
|
|
|
2021-04-12 20:16:30 +02:00
|
|
|
afi = family2afi(pp.family);
|
2022-07-21 21:42:51 +02:00
|
|
|
bncp = bnc_find(&from->bgp->nexthop_cache_table[afi], &pp, 0, ifindex);
|
2021-04-12 20:16:30 +02:00
|
|
|
|
|
|
|
if (!sockunion2hostprefix(&to->su, &pt))
|
|
|
|
return;
|
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
ifindex = 0;
|
|
|
|
if (to->conf_if && IN6_IS_ADDR_LINKLOCAL(&to->su.sin6.sin6_addr))
|
|
|
|
ifindex = to->su.sin6.sin6_scope_id;
|
|
|
|
bnct = bnc_find(&to->bgp->nexthop_cache_table[afi], &pt, 0, ifindex);
|
2021-04-12 20:16:30 +02:00
|
|
|
|
|
|
|
if (bnct != bncp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (bnct)
|
|
|
|
bnct->nht_info = to;
|
|
|
|
}
|
|
|
|
|
2022-10-08 02:07:46 +02:00
|
|
|
/*
|
|
|
|
* Returns the bnc whose bnc->nht_info matches the LL peer by
|
|
|
|
* looping through the IPv6 nexthop table
|
|
|
|
*/
|
|
|
|
static struct bgp_nexthop_cache *
|
|
|
|
bgp_find_ipv6_nexthop_matching_peer(struct peer *peer)
|
|
|
|
{
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
|
|
|
|
frr_each (bgp_nexthop_cache, &peer->bgp->nexthop_cache_table[AFI_IP6],
|
|
|
|
bnc) {
|
|
|
|
if (bnc->nht_info == peer) {
|
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
|
|
|
zlog_debug(
|
|
|
|
"Found bnc: %pFX(%u)(%u)(%p) for peer: %s(%s) %p",
|
|
|
|
&bnc->prefix, bnc->ifindex,
|
|
|
|
bnc->srte_color, bnc, peer->host,
|
|
|
|
peer->bgp->name_pretty, peer);
|
|
|
|
}
|
|
|
|
return bnc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
|
|
|
"Could not find bnc for peer %s(%s) %p in v6 nexthop table",
|
|
|
|
peer->host, peer->bgp->name_pretty, peer);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
void bgp_unlink_nexthop_by_peer(struct peer *peer)
|
|
|
|
{
|
|
|
|
struct prefix p;
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
afi_t afi = family2afi(peer->su.sa.sa_family);
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-10-08 02:07:46 +02:00
|
|
|
if (!sockunion2hostprefix(&peer->su, &p)) {
|
|
|
|
/*
|
|
|
|
* In scenarios where unnumbered BGP session is brought
|
|
|
|
* down by shutting down the interface before unconfiguring
|
|
|
|
* the BGP neighbor, neighbor information in peer->su.sa
|
|
|
|
* will be cleared when the interface is shutdown. So
|
|
|
|
* during the deletion of unnumbered bgp peer, above check
|
|
|
|
* will return true. Therefore, in this case,BGP needs to
|
|
|
|
* find the bnc whose bnc->nht_info matches the
|
|
|
|
* peer being deleted and free it.
|
|
|
|
*/
|
|
|
|
bnc = bgp_find_ipv6_nexthop_matching_peer(peer);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (afi == AFI_IP6 &&
|
|
|
|
IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
|
|
|
bnc = bnc_find(&peer->bgp->nexthop_cache_table[afi], &p, 0,
|
|
|
|
ifindex);
|
|
|
|
}
|
|
|
|
|
2018-09-19 14:20:37 +02:00
|
|
|
if (!bnc)
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
/* cleanup the peer reference */
|
|
|
|
bnc->nht_info = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: bgp_nexthop_cache not deleted with peers
* Fix mild leak, bgp_nexthop_caches were not deleted when their peer was.
Not a huge one, but makes valgrinding for other leaks noisier.
Credit to Lou Berger <lberger@labn.net> for doing the hard work of
debugging and pinning down the leak, and supplying an initial fix.
That one didn't quite get the refcounting right, it seemed, hence
this version.
This version also keeps bncs pinned so long as the peer is defined, where
Lou's tried to delete whenever the peer went through bgp_stop. That causes
lots of zebra traffic if down peers go Active->Connect->Active, etc., so
leaving bnc's in place until peer_delete seemed better.
* bgp_nht.c: (bgp_unlink_nexthop_by_peer) similar to bgp_unlink_nexthop, but
by peer.
* bgp_nht.c: (bgp_unlink_nexthop_check) helper to consolidate checking
if a bnc should be deleted.
(bgp_unlink_nexthop_by_peer) ensure the bnc->nht_info peer reference
is removed, and hence allow bncs to be removed by previous.
* bgpd.c: (peer_delete) cleanup the peer's bnc.
2016-09-06 18:23:48 +02:00
|
|
|
bgp_unlink_nexthop_check(bnc);
|
|
|
|
}
|
|
|
|
|
2018-03-24 00:57:03 +01:00
|
|
|
/*
|
|
|
|
* A route and its nexthop might belong to different VRFs. Therefore,
|
|
|
|
* we need both the bgp_route and bgp_nexthop pointers.
|
|
|
|
*/
|
|
|
|
int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
|
2021-01-21 15:16:26 +01:00
|
|
|
afi_t afi, safi_t safi, struct bgp_path_info *pi,
|
2021-06-30 10:52:29 +02:00
|
|
|
struct peer *peer, int connected,
|
|
|
|
const struct prefix *orig_prefix)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2020-04-09 20:56:11 +02:00
|
|
|
struct bgp_nexthop_cache_head *tree = NULL;
|
2015-05-20 02:40:34 +02:00
|
|
|
struct bgp_nexthop_cache *bnc;
|
2022-02-14 14:18:10 +01:00
|
|
|
struct bgp_path_info *bpi_ultimate;
|
2015-05-20 02:40:34 +02:00
|
|
|
struct prefix p;
|
2020-08-26 19:39:33 +02:00
|
|
|
uint32_t srte_color = 0;
|
2015-05-20 03:04:20 +02:00
|
|
|
int is_bgp_static_route = 0;
|
2020-12-17 15:46:30 +01:00
|
|
|
ifindex_t ifindex = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
if (pi) {
|
|
|
|
is_bgp_static_route = ((pi->type == ZEBRA_ROUTE_BGP)
|
|
|
|
&& (pi->sub_type == BGP_ROUTE_STATIC))
|
2015-06-11 18:19:12 +02:00
|
|
|
? 1
|
|
|
|
: 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-11 18:19:12 +02:00
|
|
|
/* Since Extended Next-hop Encoding (RFC5549) support, we want
|
|
|
|
to derive
|
|
|
|
address-family from the next-hop. */
|
|
|
|
if (!is_bgp_static_route)
|
2022-07-27 00:04:14 +02:00
|
|
|
afi = BGP_ATTR_MP_NEXTHOP_LEN_IP6(pi->attr) ? AFI_IP6
|
|
|
|
: AFI_IP;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-07-29 17:48:57 +02:00
|
|
|
/* Validation for the ipv4 mapped ipv6 nexthop. */
|
|
|
|
if (IS_MAPPED_IPV6(&pi->attr->mp_nexthop_global)) {
|
|
|
|
afi = AFI_IP;
|
|
|
|
}
|
|
|
|
|
2019-07-01 19:26:05 +02:00
|
|
|
/* This will return true if the global IPv6 NH is a link local
|
2015-05-20 03:04:03 +02:00
|
|
|
* addr */
|
2018-10-03 02:43:07 +02:00
|
|
|
if (make_prefix(afi, pi, &p) < 0)
|
2015-05-20 02:47:21 +02:00
|
|
|
return 1;
|
2020-08-26 19:39:33 +02:00
|
|
|
|
2021-06-30 10:52:29 +02:00
|
|
|
if (!is_bgp_static_route && orig_prefix
|
|
|
|
&& prefix_same(&p, orig_prefix)) {
|
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
|
|
|
zlog_debug(
|
|
|
|
"%s(%pFX): prefix loops through itself",
|
|
|
|
__func__, &p);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
srte_color = pi->attr->srte_color;
|
2015-05-20 02:47:21 +02:00
|
|
|
} else if (peer) {
|
2020-12-17 15:46:30 +01:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
2022-07-21 21:42:51 +02:00
|
|
|
if (afi == AFI_IP6 &&
|
|
|
|
IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr)) {
|
2020-12-17 15:46:30 +01:00
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
2022-07-21 21:42:51 +02:00
|
|
|
if (ifindex == 0) {
|
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Unable to locate ifindex, waiting till we have one",
|
|
|
|
peer->conf_if);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2020-12-17 15:46:30 +01:00
|
|
|
|
2016-11-02 03:29:46 +01:00
|
|
|
if (!sockunion2hostprefix(&peer->su, &p)) {
|
2015-05-20 03:04:03 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
2015-05-20 03:04:20 +02:00
|
|
|
zlog_debug(
|
2015-05-20 03:04:03 +02:00
|
|
|
"%s: Attempting to register with unknown AFI %d (not %d or %d)",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, afi, AFI_IP, AFI_IP6);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:04:03 +02:00
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
} else
|
2015-05-20 03:04:03 +02:00
|
|
|
return 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:03 +02:00
|
|
|
if (is_bgp_static_route)
|
2020-04-09 20:56:11 +02:00
|
|
|
tree = &bgp_nexthop->import_check_table[afi];
|
2017-07-17 14:03:14 +02:00
|
|
|
else
|
2020-04-09 20:56:11 +02:00
|
|
|
tree = &bgp_nexthop->nexthop_cache_table[afi];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc = bnc_find(tree, &p, srte_color, ifindex);
|
2018-09-19 14:20:37 +02:00
|
|
|
if (!bnc) {
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc = bnc_new(tree, &p, srte_color, ifindex);
|
2018-03-24 00:57:03 +01:00
|
|
|
bnc->bgp = bgp_nexthop;
|
2022-08-25 12:53:30 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug("Allocated bnc %pFX(%d)(%u)(%s) peer %p",
|
|
|
|
&bnc->prefix, bnc->ifindex, bnc->srte_color,
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc->bgp->name_pretty, peer);
|
2021-03-21 21:39:51 +01:00
|
|
|
} else {
|
2022-08-25 12:53:30 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
2021-03-21 21:39:51 +01:00
|
|
|
zlog_debug(
|
2022-08-25 12:53:30 +02:00
|
|
|
"Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
|
|
|
|
&bnc->prefix, bnc->ifindex,
|
|
|
|
bnc->bgp->name_pretty, bnc->flags, bnc->ifindex,
|
|
|
|
bnc->path_count, bnc->nht_info);
|
2015-05-20 02:47:21 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
if (pi && is_route_parent_evpn(pi))
|
|
|
|
bnc->is_evpn_gwip_nexthop = true;
|
|
|
|
|
2023-01-11 16:14:11 +01:00
|
|
|
if (is_bgp_static_route) {
|
2016-02-02 13:36:20 +01:00
|
|
|
SET_FLAG(bnc->flags, BGP_STATIC_ROUTE);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-02 13:36:20 +01:00
|
|
|
/* If we're toggling the type, re-register */
|
2020-02-06 15:37:20 +01:00
|
|
|
if ((CHECK_FLAG(bgp_route->flags, BGP_FLAG_IMPORT_CHECK))
|
2015-05-20 03:04:20 +02:00
|
|
|
&& !CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH)) {
|
|
|
|
SET_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
2020-02-06 15:37:20 +01:00
|
|
|
} else if ((!CHECK_FLAG(bgp_route->flags,
|
|
|
|
BGP_FLAG_IMPORT_CHECK))
|
2015-05-20 03:04:20 +02:00
|
|
|
&& CHECK_FLAG(bnc->flags,
|
|
|
|
BGP_STATIC_ROUTE_EXACT_MATCH)) {
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-05-20 03:04:20 +02:00
|
|
|
}
|
2015-11-15 19:21:12 +01:00
|
|
|
/* When nexthop is already known, but now requires 'connected'
|
|
|
|
* resolution,
|
|
|
|
* re-register it. The reverse scenario where the nexthop currently
|
|
|
|
* requires
|
|
|
|
* 'connected' resolution does not need a re-register (i.e., we treat
|
|
|
|
* 'connected-required' as an override) except in the scenario where
|
2017-07-17 14:03:14 +02:00
|
|
|
* this
|
2015-10-20 23:57:09 +02:00
|
|
|
* is actually a case of tracking a peer for connectivity (e.g., after
|
|
|
|
* disable connected-check).
|
2015-11-15 19:21:12 +01:00
|
|
|
* NOTE: We don't track the number of paths separately for 'connected-
|
|
|
|
* required' vs 'connected-not-required' as this change is not a common
|
|
|
|
* scenario.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-10-20 23:57:09 +02:00
|
|
|
else if (connected && !CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) {
|
2015-05-20 03:04:20 +02:00
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
2023-01-11 16:14:11 +01:00
|
|
|
} else if (peer && !connected
|
|
|
|
&& CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED)) {
|
2015-05-20 03:04:20 +02:00
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
}
|
2021-03-21 21:39:51 +01:00
|
|
|
if (peer && (bnc->ifindex != ifindex)) {
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
bnc->ifindex = ifindex;
|
|
|
|
}
|
2018-03-24 00:57:03 +01:00
|
|
|
if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW) {
|
2018-10-05 17:31:29 +02:00
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
2020-03-22 05:02:18 +01:00
|
|
|
} else if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED)
|
2020-04-09 20:56:11 +02:00
|
|
|
&& !is_default_host_route(&bnc->prefix))
|
2022-03-12 20:48:18 +01:00
|
|
|
register_zebra_rnh(bnc);
|
2019-04-03 03:47:46 +02:00
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
if (pi && pi->nexthop != bnc) {
|
2015-05-20 03:04:20 +02:00
|
|
|
/* Unlink from existing nexthop cache, if any. This will also
|
|
|
|
* free
|
|
|
|
* the nexthop cache entry, if appropriate.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2018-10-03 02:43:07 +02:00
|
|
|
bgp_unlink_nexthop(pi);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-17 17:27:30 +02:00
|
|
|
/* updates NHT pi list reference */
|
|
|
|
path_nh_map(pi, bnc, true);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-02-14 14:18:10 +01:00
|
|
|
bpi_ultimate = bgp_get_imported_bpi_ultimate(pi);
|
2015-11-15 19:21:12 +01:00
|
|
|
if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID) && bnc->metric)
|
2022-02-14 14:18:10 +01:00
|
|
|
(bgp_path_info_extra_get(bpi_ultimate))->igpmetric =
|
|
|
|
bnc->metric;
|
|
|
|
else if (bpi_ultimate->extra)
|
|
|
|
bpi_ultimate->extra->igpmetric = 0;
|
2021-04-12 20:16:30 +02:00
|
|
|
} else if (peer) {
|
|
|
|
/*
|
2022-04-19 14:31:30 +02:00
|
|
|
* Let's not accidentally save the peer data for a peer
|
2021-04-12 20:16:30 +02:00
|
|
|
* we are going to throw away in a second or so.
|
|
|
|
* When we come back around we'll fix up this
|
|
|
|
* data properly in replace_nexthop_by_peer
|
|
|
|
*/
|
|
|
|
if (CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
|
|
|
|
bnc->nht_info = (void *)peer; /* NHT peer reference */
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-02-01 16:31:40 +01:00
|
|
|
/*
|
|
|
|
* We are cheating here. Views have no associated underlying
|
|
|
|
* ability to detect nexthops. So when we have a view
|
|
|
|
* just tell everyone the nexthop is valid
|
|
|
|
*/
|
2018-03-24 00:57:03 +01:00
|
|
|
if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW)
|
2017-02-01 16:31:40 +01:00
|
|
|
return 1;
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
else if (safi == SAFI_UNICAST && pi &&
|
|
|
|
pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra &&
|
|
|
|
pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop)
|
|
|
|
return bgp_isvalid_nexthop_for_mpls(bnc, pi);
|
|
|
|
else
|
2017-02-01 16:31:40 +01:00
|
|
|
return (bgp_isvalid_nexthop(bnc));
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 03:03:54 +02:00
|
|
|
void bgp_delete_connected_nexthop(afi_t afi, struct peer *peer)
|
|
|
|
{
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
struct prefix p;
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:03 +02:00
|
|
|
if (!peer)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-10-08 02:07:46 +02:00
|
|
|
/*
|
|
|
|
* In case the below check evaluates true and if
|
|
|
|
* the bnc has not been freed at this point, then
|
|
|
|
* we might have to do something similar to what's
|
|
|
|
* done in bgp_unlink_nexthop_by_peer(). Since
|
|
|
|
* bgp_unlink_nexthop_by_peer() loops through the
|
|
|
|
* nodes of V6 nexthop cache to find the bnc, it is
|
|
|
|
* currently not being called here.
|
|
|
|
*/
|
2016-11-02 03:29:46 +01:00
|
|
|
if (!sockunion2hostprefix(&peer->su, &p))
|
2015-05-20 03:04:03 +02:00
|
|
|
return;
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (afi == AFI_IP6 && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
2020-04-09 20:56:11 +02:00
|
|
|
bnc = bnc_find(&peer->bgp->nexthop_cache_table[family2afi(p.family)],
|
2022-07-21 21:42:51 +02:00
|
|
|
&p, 0, ifindex);
|
2018-09-19 14:20:37 +02:00
|
|
|
if (!bnc) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
2019-11-05 13:35:36 +01:00
|
|
|
zlog_debug(
|
2020-04-09 20:56:11 +02:00
|
|
|
"Cannot find connected NHT node for peer %s(%s)",
|
2019-11-05 13:35:36 +01:00
|
|
|
peer->host, peer->bgp->name_pretty);
|
2018-09-19 14:20:37 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:54 +02:00
|
|
|
if (bnc->nht_info != peer) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
2019-11-05 13:35:36 +01:00
|
|
|
"Connected NHT %p node for peer %s(%s) points to %p",
|
|
|
|
bnc, peer->host, bnc->bgp->name_pretty,
|
|
|
|
bnc->nht_info);
|
2015-05-20 03:03:54 +02:00
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:54 +02:00
|
|
|
bnc->nht_info = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:03:54 +02:00
|
|
|
if (LIST_EMPTY(&(bnc->paths))) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
2019-11-05 13:35:36 +01:00
|
|
|
zlog_debug(
|
|
|
|
"Freeing connected NHT node %p for peer %s(%s)",
|
|
|
|
bnc, peer->host, bnc->bgp->name_pretty);
|
2022-03-12 20:48:18 +01:00
|
|
|
unregister_zebra_rnh(bnc);
|
2015-05-20 03:03:54 +02:00
|
|
|
bnc_free(bnc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
|
2022-04-24 22:52:46 +02:00
|
|
|
struct zapi_route *nhr,
|
|
|
|
bool import_check)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
struct nexthop *nexthop;
|
|
|
|
struct nexthop *oldnh;
|
|
|
|
struct nexthop *nhlist_head = NULL;
|
|
|
|
struct nexthop *nhlist_tail = NULL;
|
|
|
|
int i;
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
bool evpn_resolved = false;
|
2018-09-19 14:20:37 +02:00
|
|
|
|
2022-08-18 00:27:54 +02:00
|
|
|
bnc->last_update = monotime(NULL);
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc->change_flags = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* debug print the input */
|
2021-01-28 01:56:13 +01:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
|
|
|
char bnc_buf[BNC_FLAG_DUMP_SIZE];
|
|
|
|
|
2017-03-09 15:54:20 +01:00
|
|
|
zlog_debug(
|
2022-07-21 21:42:51 +02:00
|
|
|
"%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s",
|
2020-10-18 13:33:54 +02:00
|
|
|
bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc->ifindex, bnc->srte_color, nhr->metric, bnc->metric,
|
2021-01-28 01:56:13 +01:00
|
|
|
nhr->nexthop_num, bnc->nexthop_num,
|
|
|
|
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
|
|
|
|
sizeof(bnc_buf)));
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
if (nhr->metric != bnc->metric)
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc->change_flags |= BGP_NEXTHOP_METRIC_CHANGED;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
if (nhr->nexthop_num != bnc->nexthop_num)
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc->change_flags |= BGP_NEXTHOP_CHANGED;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-04-24 22:52:46 +02:00
|
|
|
if (import_check && (nhr->type == ZEBRA_ROUTE_BGP ||
|
|
|
|
!prefix_same(&bnc->prefix, &nhr->prefix))) {
|
|
|
|
SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE);
|
|
|
|
|
|
|
|
bnc_nexthop_free(bnc);
|
|
|
|
bnc->nexthop = NULL;
|
|
|
|
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Import Check does not resolve to the same prefix for %pFX received %pFX or matching route is BGP",
|
|
|
|
__func__, &bnc->prefix, &nhr->prefix);
|
|
|
|
} else if (nhr->nexthop_num) {
|
2018-09-06 15:16:30 +02:00
|
|
|
struct peer *peer = bnc->nht_info;
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* notify bgp fsm if nbr ip goes from invalid->valid */
|
2015-05-20 03:04:09 +02:00
|
|
|
if (!bnc->nexthop_num)
|
2015-05-20 02:40:34 +02:00
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
if (!bnc->is_evpn_gwip_nexthop)
|
|
|
|
bnc->flags |= BGP_NEXTHOP_VALID;
|
2020-08-26 19:39:33 +02:00
|
|
|
bnc->metric = nhr->metric;
|
|
|
|
bnc->nexthop_num = nhr->nexthop_num;
|
2018-02-05 09:44:29 +01:00
|
|
|
|
2018-03-24 00:57:03 +01:00
|
|
|
bnc->flags &= ~BGP_NEXTHOP_LABELED_VALID; /* check below */
|
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
for (i = 0; i < nhr->nexthop_num; i++) {
|
2018-03-24 00:57:03 +01:00
|
|
|
int num_labels = 0;
|
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-09-06 15:16:30 +02:00
|
|
|
/*
|
|
|
|
* Turn on RA for the v6 nexthops
|
|
|
|
* we receive from bgp. This is to allow us
|
|
|
|
* to work with v4 routing over v6 nexthops
|
|
|
|
*/
|
2018-09-25 15:22:22 +02:00
|
|
|
if (peer && !peer->ifp
|
|
|
|
&& CHECK_FLAG(peer->flags,
|
|
|
|
PEER_FLAG_CAPABILITY_ENHE)
|
2020-08-26 19:39:33 +02:00
|
|
|
&& nhr->prefix.family == AF_INET6
|
2019-12-03 21:08:01 +01:00
|
|
|
&& nexthop->type != NEXTHOP_TYPE_BLACKHOLE) {
|
2018-09-06 15:16:30 +02:00
|
|
|
struct interface *ifp;
|
|
|
|
|
|
|
|
ifp = if_lookup_by_index(nexthop->ifindex,
|
|
|
|
nexthop->vrf_id);
|
2020-02-14 21:15:26 +01:00
|
|
|
if (ifp)
|
|
|
|
zclient_send_interface_radv_req(
|
|
|
|
zclient, nexthop->vrf_id, ifp,
|
|
|
|
true,
|
|
|
|
BGP_UNNUM_DEFAULT_RA_INTERVAL);
|
2018-09-06 15:16:30 +02:00
|
|
|
}
|
2018-03-24 00:57:03 +01:00
|
|
|
/* There is at least one label-switched path */
|
|
|
|
if (nexthop->nh_label &&
|
|
|
|
nexthop->nh_label->num_labels) {
|
|
|
|
|
|
|
|
bnc->flags |= BGP_NEXTHOP_LABELED_VALID;
|
|
|
|
num_labels = nexthop->nh_label->num_labels;
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
2016-09-08 18:38:53 +02:00
|
|
|
char buf[NEXTHOP_STRLEN];
|
|
|
|
zlog_debug(
|
2018-03-24 00:57:03 +01:00
|
|
|
" nhop via %s (%d labels)",
|
|
|
|
nexthop2str(nexthop, buf, sizeof(buf)),
|
|
|
|
num_labels);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (nhlist_tail) {
|
|
|
|
nhlist_tail->next = nexthop;
|
|
|
|
nhlist_tail = nexthop;
|
2017-07-17 14:03:14 +02:00
|
|
|
} else {
|
2015-05-20 02:40:34 +02:00
|
|
|
nhlist_tail = nexthop;
|
|
|
|
nhlist_head = nexthop;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* No need to evaluate the nexthop if we have already
|
|
|
|
* determined
|
|
|
|
* that there has been a change.
|
2017-07-17 14:03:14 +02:00
|
|
|
*/
|
2015-11-15 18:57:34 +01:00
|
|
|
if (bnc->change_flags & BGP_NEXTHOP_CHANGED)
|
2015-05-20 02:40:34 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
for (oldnh = bnc->nexthop; oldnh; oldnh = oldnh->next)
|
2019-05-14 19:48:26 +02:00
|
|
|
if (nexthop_same(oldnh, nexthop))
|
2017-07-17 14:03:14 +02:00
|
|
|
break;
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!oldnh)
|
|
|
|
bnc->change_flags |= BGP_NEXTHOP_CHANGED;
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-10-11 17:30:55 +02:00
|
|
|
bnc_nexthop_free(bnc);
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc->nexthop = nhlist_head;
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Gateway IP nexthop is L3 reachable. Mark it as
|
|
|
|
* BGP_NEXTHOP_VALID only if it is recursively resolved with a
|
|
|
|
* remote EVPN RT-2.
|
|
|
|
* Else, mark it as BGP_NEXTHOP_EVPN_INCOMPLETE.
|
|
|
|
* When its mapping with EVPN RT-2 is established, unset
|
|
|
|
* BGP_NEXTHOP_EVPN_INCOMPLETE and set BGP_NEXTHOP_VALID.
|
|
|
|
*/
|
|
|
|
if (bnc->is_evpn_gwip_nexthop) {
|
|
|
|
evpn_resolved = bgp_evpn_is_gateway_ip_resolved(bnc);
|
|
|
|
|
2022-08-25 12:46:58 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
zlog_debug(
|
2022-08-25 12:46:58 +02:00
|
|
|
"EVPN gateway IP %pFX recursive MAC/IP lookup %s",
|
|
|
|
&bnc->prefix,
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
(evpn_resolved ? "successful"
|
|
|
|
: "failed"));
|
|
|
|
|
|
|
|
if (evpn_resolved) {
|
|
|
|
bnc->flags |= BGP_NEXTHOP_VALID;
|
|
|
|
bnc->flags &= ~BGP_NEXTHOP_EVPN_INCOMPLETE;
|
|
|
|
bnc->change_flags |= BGP_NEXTHOP_MACIP_CHANGED;
|
|
|
|
} else {
|
|
|
|
bnc->flags |= BGP_NEXTHOP_EVPN_INCOMPLETE;
|
|
|
|
bnc->flags &= ~BGP_NEXTHOP_VALID;
|
|
|
|
}
|
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
} else {
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
bnc->flags &= ~BGP_NEXTHOP_EVPN_INCOMPLETE;
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc->flags &= ~BGP_NEXTHOP_VALID;
|
2021-04-22 19:04:47 +02:00
|
|
|
bnc->flags &= ~BGP_NEXTHOP_LABELED_VALID;
|
2020-08-26 19:39:33 +02:00
|
|
|
bnc->nexthop_num = nhr->nexthop_num;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:09 +02:00
|
|
|
/* notify bgp fsm if nbr ip goes from valid->invalid */
|
2015-05-20 02:47:21 +02:00
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
bnc_nexthop_free(bnc);
|
|
|
|
bnc->nexthop = NULL;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
evaluate_paths(bnc);
|
|
|
|
}
|
|
|
|
|
2020-12-17 15:46:30 +01:00
|
|
|
static void bgp_nht_ifp_table_handle(struct bgp *bgp,
|
|
|
|
struct bgp_nexthop_cache_head *table,
|
|
|
|
struct interface *ifp, bool up)
|
|
|
|
{
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
|
|
|
|
frr_each (bgp_nexthop_cache, table, bnc) {
|
|
|
|
if (bnc->ifindex != ifp->ifindex)
|
|
|
|
continue;
|
|
|
|
|
2022-08-18 00:27:54 +02:00
|
|
|
bnc->last_update = monotime(NULL);
|
2020-12-17 15:46:30 +01:00
|
|
|
bnc->change_flags = 0;
|
|
|
|
|
2021-03-15 18:15:49 +01:00
|
|
|
/*
|
|
|
|
* For interface based routes ( ala the v6 LL routes
|
|
|
|
* that this was written for ) the metric received
|
|
|
|
* for the connected route is 0 not 1.
|
|
|
|
*/
|
|
|
|
bnc->metric = 0;
|
2020-12-17 15:46:30 +01:00
|
|
|
if (up) {
|
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED);
|
|
|
|
bnc->nexthop_num = 1;
|
|
|
|
} else {
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
SET_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED);
|
|
|
|
bnc->nexthop_num = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
evaluate_paths(bnc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static void bgp_nht_ifp_handle(struct interface *ifp, bool up)
|
|
|
|
{
|
|
|
|
struct bgp *bgp;
|
|
|
|
|
2021-10-22 00:17:40 +02:00
|
|
|
bgp = ifp->vrf->info;
|
2020-12-17 15:46:30 +01:00
|
|
|
if (!bgp)
|
|
|
|
return;
|
|
|
|
|
2022-04-18 20:06:26 +02:00
|
|
|
bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP], ifp,
|
|
|
|
up);
|
|
|
|
bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP], ifp,
|
|
|
|
up);
|
2020-12-17 15:46:30 +01:00
|
|
|
bgp_nht_ifp_table_handle(bgp, &bgp->nexthop_cache_table[AFI_IP6], ifp,
|
|
|
|
up);
|
|
|
|
bgp_nht_ifp_table_handle(bgp, &bgp->import_check_table[AFI_IP6], ifp,
|
|
|
|
up);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_nht_ifp_up(struct interface *ifp)
|
|
|
|
{
|
|
|
|
bgp_nht_ifp_handle(ifp, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_nht_ifp_down(struct interface *ifp)
|
|
|
|
{
|
|
|
|
bgp_nht_ifp_handle(ifp, false);
|
|
|
|
}
|
|
|
|
|
2022-02-23 01:04:25 +01:00
|
|
|
static void bgp_nht_ifp_initial(struct thread *thread)
|
2020-12-17 15:46:30 +01:00
|
|
|
{
|
|
|
|
ifindex_t ifindex = THREAD_VAL(thread);
|
2021-10-14 17:58:49 +02:00
|
|
|
struct bgp *bgp = THREAD_ARG(thread);
|
|
|
|
struct interface *ifp = if_lookup_by_index(ifindex, bgp->vrf_id);
|
2020-12-17 15:46:30 +01:00
|
|
|
|
|
|
|
if (!ifp)
|
2022-02-23 01:04:25 +01:00
|
|
|
return;
|
2020-12-17 15:46:30 +01:00
|
|
|
|
2021-03-21 21:39:51 +01:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
|
|
|
"Handle NHT initial update for Intf %s(%d) status %s",
|
|
|
|
ifp->name, ifp->ifindex, if_is_up(ifp) ? "up" : "down");
|
|
|
|
|
2020-12-17 15:46:30 +01:00
|
|
|
if (if_is_up(ifp))
|
|
|
|
bgp_nht_ifp_up(ifp);
|
|
|
|
else
|
|
|
|
bgp_nht_ifp_down(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* So the bnc code has the ability to handle interface up/down
|
|
|
|
* events to properly handle v6 LL peering.
|
|
|
|
* What is happening here:
|
|
|
|
* The event system for peering expects the nht code to
|
|
|
|
* report on the tracking events after we move to active
|
|
|
|
* So let's give the system a chance to report on that event
|
|
|
|
* in a manner that is expected.
|
|
|
|
*/
|
|
|
|
void bgp_nht_interface_events(struct peer *peer)
|
|
|
|
{
|
|
|
|
struct bgp *bgp = peer->bgp;
|
|
|
|
struct bgp_nexthop_cache_head *table;
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
struct prefix p;
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2020-12-17 15:46:30 +01:00
|
|
|
|
|
|
|
if (!IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!sockunion2hostprefix(&peer->su, &p))
|
|
|
|
return;
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
2020-12-17 15:46:30 +01:00
|
|
|
|
|
|
|
table = &bgp->nexthop_cache_table[AFI_IP6];
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc = bnc_find(table, &p, 0, ifindex);
|
2020-12-17 15:46:30 +01:00
|
|
|
if (!bnc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (bnc->ifindex)
|
2021-10-14 17:58:49 +02:00
|
|
|
thread_add_event(bm->master, bgp_nht_ifp_initial, bnc->bgp,
|
2020-12-17 15:46:30 +01:00
|
|
|
bnc->ifindex, NULL);
|
|
|
|
}
|
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
|
|
|
|
{
|
|
|
|
struct bgp_nexthop_cache_head *tree = NULL;
|
2021-09-24 16:10:31 +02:00
|
|
|
struct bgp_nexthop_cache *bnc_nhc, *bnc_import;
|
2020-08-26 19:39:33 +02:00
|
|
|
struct bgp *bgp;
|
2022-03-12 16:47:16 +01:00
|
|
|
struct prefix match;
|
2020-08-26 19:39:33 +02:00
|
|
|
struct zapi_route nhr;
|
|
|
|
afi_t afi;
|
|
|
|
|
|
|
|
bgp = bgp_lookup_by_vrf_id(vrf_id);
|
|
|
|
if (!bgp) {
|
|
|
|
flog_err(
|
|
|
|
EC_BGP_NH_UPD,
|
|
|
|
"parse nexthop update: instance not found for vrf_id %u",
|
|
|
|
vrf_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-03-12 16:47:16 +01:00
|
|
|
if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &nhr)) {
|
2021-05-12 18:00:23 +02:00
|
|
|
zlog_err("%s[%s]: Failure to decode nexthop update", __func__,
|
|
|
|
bgp->name_pretty);
|
2020-08-26 19:39:33 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-03-12 16:47:16 +01:00
|
|
|
afi = family2afi(match.family);
|
2021-09-24 16:10:31 +02:00
|
|
|
tree = &bgp->nexthop_cache_table[afi];
|
2020-08-26 19:39:33 +02:00
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc_nhc = bnc_find(tree, &match, nhr.srte_color, 0);
|
2023-01-11 16:14:11 +01:00
|
|
|
if (!bnc_nhc) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
|
|
|
"parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",
|
|
|
|
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
|
|
|
|
} else
|
2022-04-24 22:52:46 +02:00
|
|
|
bgp_process_nexthop_update(bnc_nhc, &nhr, false);
|
2021-09-24 16:10:31 +02:00
|
|
|
|
|
|
|
tree = &bgp->import_check_table[afi];
|
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc_import = bnc_find(tree, &match, nhr.srte_color, 0);
|
2023-01-11 16:14:11 +01:00
|
|
|
if (!bnc_import) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
|
|
|
"parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",
|
|
|
|
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
|
|
|
|
} else
|
2022-05-27 17:16:47 +02:00
|
|
|
bgp_process_nexthop_update(bnc_import, &nhr, true);
|
2022-04-29 14:26:04 +02:00
|
|
|
|
2020-08-26 19:39:33 +02:00
|
|
|
/*
|
|
|
|
* HACK: if any BGP route is dependant on an SR-policy that doesn't
|
|
|
|
* exist, zebra will never send NH updates relative to that policy. In
|
|
|
|
* that case, whenever we receive an update about a colorless NH, update
|
|
|
|
* the corresponding colorful NHs that share the same endpoint but that
|
|
|
|
* are inactive. This ugly hack should work around the problem at the
|
|
|
|
* cost of a performance pernalty. Long term, what should be done is to
|
|
|
|
* make zebra's RNH subsystem aware of SR-TE colors (like bgpd is),
|
|
|
|
* which should provide a better infrastructure to solve this issue in
|
|
|
|
* a more efficient and elegant way.
|
|
|
|
*/
|
2021-09-24 16:10:31 +02:00
|
|
|
if (nhr.srte_color == 0 && bnc_nhc) {
|
2020-08-26 19:39:33 +02:00
|
|
|
struct bgp_nexthop_cache *bnc_iter;
|
|
|
|
|
|
|
|
frr_each (bgp_nexthop_cache, &bgp->nexthop_cache_table[afi],
|
|
|
|
bnc_iter) {
|
2022-05-27 17:16:47 +02:00
|
|
|
if (!prefix_same(&bnc_nhc->prefix, &bnc_iter->prefix) ||
|
|
|
|
bnc_iter->srte_color == 0 ||
|
|
|
|
CHECK_FLAG(bnc_iter->flags, BGP_NEXTHOP_VALID))
|
2020-08-26 19:39:33 +02:00
|
|
|
continue;
|
|
|
|
|
2022-04-24 22:52:46 +02:00
|
|
|
bgp_process_nexthop_update(bnc_iter, &nhr, false);
|
2020-08-26 19:39:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-10 02:32:19 +02:00
|
|
|
/*
|
|
|
|
* Cleanup nexthop registration and status information for BGP nexthops
|
|
|
|
* pertaining to this VRF. This is invoked upon VRF deletion.
|
|
|
|
*/
|
|
|
|
void bgp_cleanup_nexthops(struct bgp *bgp)
|
|
|
|
{
|
2020-04-09 20:56:11 +02:00
|
|
|
for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) {
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
2017-08-10 02:32:19 +02:00
|
|
|
|
2020-04-09 20:56:11 +02:00
|
|
|
frr_each (bgp_nexthop_cache, &bgp->nexthop_cache_table[afi],
|
|
|
|
bnc) {
|
2017-08-10 02:32:19 +02:00
|
|
|
/* Clear relevant flags. */
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE);
|
2017-08-10 02:32:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/**
|
|
|
|
* make_prefix - make a prefix structure from the path (essentially
|
|
|
|
* path's node.
|
|
|
|
*/
|
2018-10-03 02:43:07 +02:00
|
|
|
static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2015-05-20 03:04:20 +02:00
|
|
|
|
2018-10-03 02:43:07 +02:00
|
|
|
int is_bgp_static = ((pi->type == ZEBRA_ROUTE_BGP)
|
|
|
|
&& (pi->sub_type == BGP_ROUTE_STATIC))
|
2015-05-20 03:04:20 +02:00
|
|
|
? 1
|
|
|
|
: 0;
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *net = pi->net;
|
|
|
|
const struct prefix *p_orig = bgp_dest_get_prefix(net);
|
2020-07-29 17:48:57 +02:00
|
|
|
struct in_addr ipv4;
|
2018-12-04 09:33:21 +01:00
|
|
|
|
|
|
|
if (p_orig->family == AF_FLOWSPEC) {
|
|
|
|
if (!pi->peer)
|
|
|
|
return -1;
|
|
|
|
return bgp_flowspec_get_first_nh(pi->peer->bgp,
|
2019-10-14 18:02:22 +02:00
|
|
|
pi, p, afi);
|
2018-12-04 09:33:21 +01:00
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
memset(p, 0, sizeof(struct prefix));
|
|
|
|
switch (afi) {
|
|
|
|
case AFI_IP:
|
|
|
|
p->family = AF_INET;
|
2015-05-20 03:04:20 +02:00
|
|
|
if (is_bgp_static) {
|
2020-03-22 05:02:18 +01:00
|
|
|
p->u.prefix4 = p_orig->u.prefix4;
|
|
|
|
p->prefixlen = p_orig->prefixlen;
|
2015-05-20 03:04:20 +02:00
|
|
|
} else {
|
2020-07-29 17:48:57 +02:00
|
|
|
if (IS_MAPPED_IPV6(&pi->attr->mp_nexthop_global)) {
|
|
|
|
ipv4_mapped_ipv6_to_ipv4(
|
|
|
|
&pi->attr->mp_nexthop_global, &ipv4);
|
|
|
|
p->u.prefix4 = ipv4;
|
|
|
|
p->prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
} else {
|
2022-07-27 00:04:14 +02:00
|
|
|
if (p_orig->family == AF_EVPN)
|
|
|
|
p->u.prefix4 =
|
|
|
|
pi->attr->mp_nexthop_global_in;
|
|
|
|
else
|
|
|
|
p->u.prefix4 = pi->attr->nexthop;
|
2020-07-29 17:48:57 +02:00
|
|
|
p->prefixlen = IPV4_MAX_BITLEN;
|
|
|
|
}
|
2015-05-20 03:04:20 +02:00
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
break;
|
|
|
|
case AFI_IP6:
|
|
|
|
p->family = AF_INET6;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 03:04:20 +02:00
|
|
|
if (is_bgp_static) {
|
2020-03-22 05:02:18 +01:00
|
|
|
p->u.prefix6 = p_orig->u.prefix6;
|
|
|
|
p->prefixlen = p_orig->prefixlen;
|
2015-05-20 03:04:20 +02:00
|
|
|
} else {
|
2020-05-25 16:22:37 +02:00
|
|
|
/* If we receive MP_REACH nexthop with ::(LL)
|
|
|
|
* or LL(LL), use LL address as nexthop cache.
|
|
|
|
*/
|
|
|
|
if (pi->attr->mp_nexthop_len
|
|
|
|
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL
|
|
|
|
&& (IN6_IS_ADDR_UNSPECIFIED(
|
|
|
|
&pi->attr->mp_nexthop_global)
|
|
|
|
|| IN6_IS_ADDR_LINKLOCAL(
|
|
|
|
&pi->attr->mp_nexthop_global)))
|
|
|
|
p->u.prefix6 = pi->attr->mp_nexthop_local;
|
2021-06-30 14:07:52 +02:00
|
|
|
/* If we receive MR_REACH with (GA)::(LL)
|
|
|
|
* then check for route-map to choose GA or LL
|
|
|
|
*/
|
|
|
|
else if (pi->attr->mp_nexthop_len
|
|
|
|
== BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
|
2023-01-11 16:14:11 +01:00
|
|
|
if (pi->attr->mp_nexthop_prefer_global)
|
2021-06-30 14:07:52 +02:00
|
|
|
p->u.prefix6 =
|
|
|
|
pi->attr->mp_nexthop_global;
|
|
|
|
else
|
|
|
|
p->u.prefix6 =
|
|
|
|
pi->attr->mp_nexthop_local;
|
|
|
|
} else
|
2020-05-25 16:22:37 +02:00
|
|
|
p->u.prefix6 = pi->attr->mp_nexthop_global;
|
2015-05-20 03:04:20 +02:00
|
|
|
p->prefixlen = IPV6_MAX_BITLEN;
|
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
break;
|
|
|
|
default:
|
2015-05-20 03:04:03 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Attempting to make prefix with unknown AFI %d (not %d or %d)",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, afi, AFI_IP, AFI_IP6);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
break;
|
2015-05-20 03:04:03 +02:00
|
|
|
}
|
2015-05-20 02:40:34 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-05-20 03:04:20 +02:00
|
|
|
* sendmsg_zebra_rnh -- Format and send a nexthop register/Unregister
|
2015-05-20 02:40:34 +02:00
|
|
|
* command to Zebra.
|
|
|
|
* ARGUMENTS:
|
|
|
|
* struct bgp_nexthop_cache *bnc -- the nexthop structure.
|
2015-05-20 03:04:20 +02:00
|
|
|
* int command -- command to send to zebra
|
2015-05-20 02:40:34 +02:00
|
|
|
* RETURNS:
|
|
|
|
* void.
|
|
|
|
*/
|
2015-05-20 03:04:20 +02:00
|
|
|
static void sendmsg_zebra_rnh(struct bgp_nexthop_cache *bnc, int command)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2018-01-23 00:36:03 +01:00
|
|
|
bool exact_match = false;
|
2021-09-24 21:51:18 +02:00
|
|
|
bool resolve_via_default = false;
|
2015-05-20 02:40:34 +02:00
|
|
|
int ret;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-01-23 00:36:03 +01:00
|
|
|
if (!zclient)
|
2016-02-12 21:18:28 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-02-12 21:18:28 +01:00
|
|
|
/* Don't try to register if Zebra doesn't know of this instance. */
|
2018-10-05 15:43:28 +02:00
|
|
|
if (!IS_BGP_INST_KNOWN_TO_ZEBRA(bnc->bgp)) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: No zebra instance to talk to, not installing NHT entry",
|
|
|
|
__func__);
|
2016-02-12 21:18:28 +01:00
|
|
|
return;
|
2018-10-05 15:43:28 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-10-05 17:31:29 +02:00
|
|
|
if (!bgp_zebra_num_connects()) {
|
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
|
|
|
"%s: We have not connected yet, cannot send nexthops",
|
|
|
|
__func__);
|
2018-10-05 17:31:29 +02:00
|
|
|
}
|
2021-09-24 21:51:18 +02:00
|
|
|
if (command == ZEBRA_NEXTHOP_REGISTER) {
|
|
|
|
if (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED))
|
|
|
|
exact_match = true;
|
|
|
|
if (CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH))
|
|
|
|
resolve_via_default = true;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-04-09 20:56:11 +02:00
|
|
|
if (BGP_DEBUG(zebra, ZEBRA))
|
|
|
|
zlog_debug("%s: sending cmd %s for %pFX (vrf %s)", __func__,
|
|
|
|
zserv_command_string(command), &bnc->prefix,
|
|
|
|
bnc->bgp->name_pretty);
|
2018-03-24 00:57:03 +01:00
|
|
|
|
2021-10-04 12:21:45 +02:00
|
|
|
ret = zclient_send_rnh(zclient, command, &bnc->prefix, SAFI_UNICAST,
|
|
|
|
exact_match, resolve_via_default,
|
|
|
|
bnc->bgp->vrf_id);
|
2021-11-12 07:39:51 +01:00
|
|
|
if (ret == ZCLIENT_SEND_FAILURE) {
|
2018-09-13 20:23:42 +02:00
|
|
|
flog_warn(EC_BGP_ZEBRA_SEND,
|
2018-08-16 15:15:43 +02:00
|
|
|
"sendmsg_nexthop: zclient_send_message() failed");
|
2021-11-12 07:39:51 +01:00
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-09-24 16:32:30 +02:00
|
|
|
if (command == ZEBRA_NEXTHOP_REGISTER)
|
2015-05-20 02:47:21 +02:00
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
2021-09-24 16:32:30 +02:00
|
|
|
else if (command == ZEBRA_NEXTHOP_UNREGISTER)
|
2015-05-20 02:47:21 +02:00
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
2015-05-20 02:40:34 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-05-20 03:04:20 +02:00
|
|
|
* register_zebra_rnh - register a NH/route with Zebra for notification
|
|
|
|
* when the route or the route to the nexthop changes.
|
2015-05-20 02:40:34 +02:00
|
|
|
* ARGUMENTS:
|
2015-05-20 03:04:20 +02:00
|
|
|
* struct bgp_nexthop_cache *bnc
|
2015-05-20 02:40:34 +02:00
|
|
|
* RETURNS:
|
|
|
|
* void.
|
|
|
|
*/
|
2022-03-12 20:48:18 +01:00
|
|
|
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
/* Check if we have already registered */
|
|
|
|
if (bnc->flags & BGP_NEXTHOP_REGISTERED)
|
|
|
|
return;
|
2020-12-17 15:46:30 +01:00
|
|
|
|
|
|
|
if (bnc->ifindex) {
|
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-09-24 16:32:30 +02:00
|
|
|
sendmsg_zebra_rnh(bnc, ZEBRA_NEXTHOP_REGISTER);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-05-20 03:04:20 +02:00
|
|
|
* unregister_zebra_rnh -- Unregister the route/nexthop from Zebra.
|
2015-05-20 02:40:34 +02:00
|
|
|
* ARGUMENTS:
|
2015-05-20 03:04:20 +02:00
|
|
|
* struct bgp_nexthop_cache *bnc
|
2015-05-20 02:40:34 +02:00
|
|
|
* RETURNS:
|
|
|
|
* void.
|
|
|
|
*/
|
2022-03-12 20:48:18 +01:00
|
|
|
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
/* Check if we have already registered */
|
|
|
|
if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED))
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-12-17 15:46:30 +01:00
|
|
|
if (bnc->ifindex) {
|
|
|
|
UNSET_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-09-24 16:32:30 +02:00
|
|
|
sendmsg_zebra_rnh(bnc, ZEBRA_NEXTHOP_UNREGISTER);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* evaluate_paths - Evaluate the paths/nets associated with a nexthop.
|
|
|
|
* ARGUMENTS:
|
|
|
|
* struct bgp_nexthop_cache *bnc -- the nexthop structure.
|
|
|
|
* RETURNS:
|
|
|
|
* void.
|
|
|
|
*/
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
void evaluate_paths(struct bgp_nexthop_cache *bnc)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
2020-03-27 00:11:58 +01:00
|
|
|
struct bgp_dest *dest;
|
2018-10-02 22:41:30 +02:00
|
|
|
struct bgp_path_info *path;
|
2022-02-14 14:18:10 +01:00
|
|
|
struct bgp_path_info *bpi_ultimate;
|
2015-05-20 02:40:34 +02:00
|
|
|
int afi;
|
2015-05-20 02:47:21 +02:00
|
|
|
struct peer *peer = (struct peer *)bnc->nht_info;
|
2017-03-09 15:54:20 +01:00
|
|
|
struct bgp_table *table;
|
|
|
|
safi_t safi;
|
2018-03-24 00:57:03 +01:00
|
|
|
struct bgp *bgp_path;
|
2020-03-22 05:02:18 +01:00
|
|
|
const struct prefix *p;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
2021-01-28 01:56:13 +01:00
|
|
|
char bnc_buf[BNC_FLAG_DUMP_SIZE];
|
2021-01-28 02:03:03 +01:00
|
|
|
char chg_buf[BNC_FLAG_DUMP_SIZE];
|
2021-01-28 01:56:13 +01:00
|
|
|
|
2016-09-08 18:38:53 +02:00
|
|
|
zlog_debug(
|
2022-08-25 12:53:30 +02:00
|
|
|
"NH update for %pFX(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
|
|
|
|
&bnc->prefix, bnc->ifindex, bnc->srte_color,
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc->bgp->name_pretty,
|
2021-01-28 02:03:03 +01:00
|
|
|
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
|
|
|
|
sizeof(bnc_buf)),
|
|
|
|
bgp_nexthop_dump_bnc_change_flags(bnc, chg_buf,
|
|
|
|
sizeof(bnc_buf)));
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
LIST_FOREACH (path, &(bnc->paths), nh_thread) {
|
2015-05-20 02:40:34 +02:00
|
|
|
if (!(path->type == ZEBRA_ROUTE_BGP
|
2015-05-20 03:12:17 +02:00
|
|
|
&& ((path->sub_type == BGP_ROUTE_NORMAL)
|
2018-03-24 00:57:03 +01:00
|
|
|
|| (path->sub_type == BGP_ROUTE_STATIC)
|
|
|
|
|| (path->sub_type == BGP_ROUTE_IMPORTED))))
|
2015-05-20 02:40:34 +02:00
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
dest = path->net;
|
|
|
|
assert(dest && bgp_dest_table(dest));
|
|
|
|
p = bgp_dest_get_prefix(dest);
|
2020-03-22 05:02:18 +01:00
|
|
|
afi = family2afi(p->family);
|
2020-03-27 00:11:58 +01:00
|
|
|
table = bgp_dest_table(dest);
|
2017-03-09 15:54:20 +01:00
|
|
|
safi = table->safi;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-24 00:57:03 +01:00
|
|
|
/*
|
|
|
|
* handle routes from other VRFs (they can have a
|
|
|
|
* nexthop in THIS VRF). bgp_path is the bgp instance
|
|
|
|
* that owns the route referencing this nexthop.
|
|
|
|
*/
|
|
|
|
bgp_path = table->bgp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Path becomes valid/invalid depending on whether the nexthop
|
2015-05-20 02:40:34 +02:00
|
|
|
* reachable/unreachable.
|
2018-03-24 00:57:03 +01:00
|
|
|
*
|
|
|
|
* In case of unicast routes that were imported from vpn
|
|
|
|
* and that have labels, they are valid only if there are
|
|
|
|
* nexthops with labels
|
2021-01-11 07:32:41 +01:00
|
|
|
*
|
|
|
|
* If the nexthop is EVPN gateway-IP,
|
|
|
|
* do not check for a valid label.
|
2015-05-20 02:40:34 +02:00
|
|
|
*/
|
2018-03-24 00:57:03 +01:00
|
|
|
|
2020-05-25 23:15:37 +02:00
|
|
|
bool bnc_is_valid_nexthop = false;
|
|
|
|
bool path_valid = false;
|
2018-03-24 00:57:03 +01:00
|
|
|
|
bgpd: EVPN route type-5 to type-2 recursive resolution using gateway IP
When EVPN prefix route with a gateway IP overlay index is imported into the IP
vrf at the ingress PE, BGP nexthop of this route is set to the gateway IP.
For this vrf route to be valid, following conditions must be met.
- Gateway IP nexthop of this route should be L3 reachable, i.e., this route
should be resolved in RIB.
- A remote MAC/IP route should be present for the gateway IP address in the
EVI(L2VPN table).
To check for the first condition, gateway IP is registered with nht (nexthop
tracking) to receive the reachability notifications for this IP from zebra RIB.
If the gateway IP is reachable, zebra sends the reachability information (i.e.,
nexthop interface) for the gateway IP.
This nexthop interface should be the SVI interface.
Now, to find out type-2 route corresponding to the gateway IP, we need to fetch
the VNI for the above SVI.
To do this VNI lookup effitiently, define a hashtable of struct bgpevpn with
svi_ifindex as key.
struct hash *vni_svi_hash;
An EVI instance is added to vni_svi_hash if its svi_ifindex is nonzero.
Using this hash, we obtain struct bgpevpn corresponding to the gateway IP.
For gateway IP overlay index recursive lookup, once we find the correct EVI, we
have to lookup its route table for a MAC/IP prefix. As we have to iterate the
entire route table for every lookup, this lookup is expensive. We can optimize
this lookup by adding all the remote IP addresses in a hash table.
Following hash table is defined for this purpose in struct bgpevpn
Struct hash *remote_ip_hash;
When a MAC/IP route is installed in the EVI table, it is also added to
remote_ip_hash.
It is possible to have multiple MAC/IP routes with the same IP address because
of host move scenarios. Thus, for every address addr in remote_ip_hash, we
maintain list of all the MAC/IP routes having addr as their IP address.
Following structure defines an address in remote_ip_hash.
struct evpn_remote_ip {
struct ipaddr addr;
struct list *macip_path_list;
};
A Boolean field is added to struct bgp_nexthop_cache to indicate that the
nexthop is EVPN gateway IP overlay index.
bool is_evpn_gwip_nexthop;
A flag BGP_NEXTHOP_EVPN_INCOMPLETE is added to struct bgp_nexthop_cache.
This flag is set when the gateway IP is L3 reachable but not yet resolved by a
MAC/IP route.
Following table explains the combination of L3 and L2 reachability w.r.t.
BGP_NEXTHOP_VALID and BGP_NEXTHOP_EVPN_INCOMPLETE flags
* | MACIP resolved | MACIP unresolved
*----------------|----------------|------------------
* L3 reachable | VALID = 1 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 1
* ---------------|----------------|--------------------
* L3 unreachable | VALID = 0 | VALID = 0
* | INCOMPLETE = 0 | INCOMPLETE = 0
Procedure that we use to check if the gateway IP is resolvable by a MAC/IP
route:
- Find the EVI/L2VRF that belongs to the nexthop SVI using vni_svi_hash.
- Check if the gateway IP is present in remote_ip_hash in this EVI.
When the gateway IP is L3 reachable and it is also resolved by a MAC/IP route,
unset BGP_NEXTHOP_EVPN_INCOMPLETE flag and set BGP_NEXTHOP_VALID flag.
Signed-off-by: Ameya Dharkar <adharkar@vmware.com>
2021-01-11 12:51:56 +01:00
|
|
|
if (safi == SAFI_UNICAST && path->sub_type == BGP_ROUTE_IMPORTED
|
|
|
|
&& path->extra && path->extra->num_labels
|
|
|
|
&& (path->attr->evpn_overlay.type
|
|
|
|
!= OVERLAY_INDEX_GATEWAY_IP)) {
|
2018-03-24 00:57:03 +01:00
|
|
|
bnc_is_valid_nexthop =
|
bgpd: add resolution for l3vpn traffic over gre interfaces
When a route imported from l3vpn is analysed, the nexthop from default
VRF is looked up against a valid MPLS path. Generally, this is done on
backbones with a MPLS signalisation transport layer like LDP. Generally,
the BGP connection is multiple hops away. That scenario is already
working.
There is case where it is possible to run L3VPN over GRE interfaces, and
where there is no LSP path over that GRE interface: GRE is just here to
tunnel MPLS traffic. On that case, the nexthop given in the path does not
have MPLS path, but should be authorized to convey MPLS traffic provided
that the user permits it via a configuration command.
That commit introduces a new command that can be activated in route-map:
> set l3vpn next-hop encapsulation gre
That command authorizes the nexthop tracking engine to accept paths that
o have a GRE interface as output, independently of the presence of an LSP
path or not.
A configuration example is given below. When bgp incoming vpnv4 updates
are received, the nexthop of NLRI is 192.168.0.2. Based on nexthop
tracking service from zebra, BGP knows that the output interface to reach
192.168.0.2 is r1-gre0. Because that interface is not MPLS based, but is
a GRE tunnel, then the update will be using that nexthop to be installed.
interface r1-gre0
ip address 192.168.0.1/24
exit
router bgp 65500
bgp router-id 1.1.1.1
neighbor 192.168.0.2 remote-as 65500
!
address-family ipv4 unicast
no neighbor 192.168.0.2 activate
exit-address-family
!
address-family ipv4 vpn
neighbor 192.168.0.2 activate
neighbor 192.168.0.2 route-map rmap in
exit-address-family
exit
!
router bgp 65500 vrf vrf1
bgp router-id 1.1.1.1
no bgp network import-check
!
address-family ipv4 unicast
network 10.201.0.0/24
redistribute connected
label vpn export 101
rd vpn export 444:1
rt vpn both 52:100
export vpn
import vpn
exit-address-family
exit
!
route-map rmap permit 1
set l3vpn next-hop encapsulation gre
exit
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2021-09-20 11:50:52 +02:00
|
|
|
bgp_isvalid_nexthop_for_mpls(bnc, path) ? true
|
|
|
|
: false;
|
2018-03-24 00:57:03 +01:00
|
|
|
} else {
|
bgpd: Force self-next-hop check in next-hop update.
Problem Description:
=====================
+--+ +--+
|R1|-(192.201.202.1)----iBGP----(192.201.202.2)-|R2|
+--+ +--+
Routes on R2:
=============
S>* 202.202.202.202/32 [1/0] via 192.201.78.1, ens256, 00:40:48
Where, the next-hop network, 192.201.78.0/24, is a directly connected network address.
C>* 192.201.78.0/24 is directly connected, ens256, 00:40:48
Configurations on R1:
=====================
!
router bgp 201
bgp router-id 192.168.0.1
neighbor 192.201.202.2 remote-as 201
!
Configurations on R2:
=====================
!
ip route 202.202.202.202/32 192.201.78.1
!
router bgp 201
bgp router-id 192.168.0.2
neighbor 192.201.202.1 remote-as 201
!
address-family ipv4 unicast
redistribute static
exit-address-family
!
Step-1:
=======
R1 receives the route 202.202.202.202/32 from R2.
R1 installs the route in its BGP RIB.
Step-2:
=======
On R1, a connected interface address is added.
The address is the same as the next-hop of the BGP route received from R2 (192.201.78.1).
Point of Failure:
=================
R1 resolves the BGP route even though the route's next-hop is its own connected address.
Even though this appears to be a misconfiguration it would still be better to safeguard the code against it.
Fix:
====
When BGP receives a connected route from Zebra, it processes the
routes for the next-hop update.
While doing so, BGP must ignore routes whose next-hop address matches
the address of the connected route for which Zebra sent the next-hop update
message.
Signed-off-by: NaveenThanikachalam <nthanikachal@vmware.com>
2020-04-09 09:27:54 +02:00
|
|
|
if (bgp_update_martian_nexthop(
|
|
|
|
bnc->bgp, afi, safi, path->type,
|
2020-03-27 00:11:58 +01:00
|
|
|
path->sub_type, path->attr, dest)) {
|
bgpd: Force self-next-hop check in next-hop update.
Problem Description:
=====================
+--+ +--+
|R1|-(192.201.202.1)----iBGP----(192.201.202.2)-|R2|
+--+ +--+
Routes on R2:
=============
S>* 202.202.202.202/32 [1/0] via 192.201.78.1, ens256, 00:40:48
Where, the next-hop network, 192.201.78.0/24, is a directly connected network address.
C>* 192.201.78.0/24 is directly connected, ens256, 00:40:48
Configurations on R1:
=====================
!
router bgp 201
bgp router-id 192.168.0.1
neighbor 192.201.202.2 remote-as 201
!
Configurations on R2:
=====================
!
ip route 202.202.202.202/32 192.201.78.1
!
router bgp 201
bgp router-id 192.168.0.2
neighbor 192.201.202.1 remote-as 201
!
address-family ipv4 unicast
redistribute static
exit-address-family
!
Step-1:
=======
R1 receives the route 202.202.202.202/32 from R2.
R1 installs the route in its BGP RIB.
Step-2:
=======
On R1, a connected interface address is added.
The address is the same as the next-hop of the BGP route received from R2 (192.201.78.1).
Point of Failure:
=================
R1 resolves the BGP route even though the route's next-hop is its own connected address.
Even though this appears to be a misconfiguration it would still be better to safeguard the code against it.
Fix:
====
When BGP receives a connected route from Zebra, it processes the
routes for the next-hop update.
While doing so, BGP must ignore routes whose next-hop address matches
the address of the connected route for which Zebra sent the next-hop update
message.
Signed-off-by: NaveenThanikachalam <nthanikachal@vmware.com>
2020-04-09 09:27:54 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT))
|
|
|
|
zlog_debug(
|
2020-06-23 16:00:41 +02:00
|
|
|
"%s: prefix %pBD (vrf %s), ignoring path due to martian or self-next-hop",
|
2020-03-27 00:11:58 +01:00
|
|
|
__func__, dest, bgp_path->name);
|
bgpd: Force self-next-hop check in next-hop update.
Problem Description:
=====================
+--+ +--+
|R1|-(192.201.202.1)----iBGP----(192.201.202.2)-|R2|
+--+ +--+
Routes on R2:
=============
S>* 202.202.202.202/32 [1/0] via 192.201.78.1, ens256, 00:40:48
Where, the next-hop network, 192.201.78.0/24, is a directly connected network address.
C>* 192.201.78.0/24 is directly connected, ens256, 00:40:48
Configurations on R1:
=====================
!
router bgp 201
bgp router-id 192.168.0.1
neighbor 192.201.202.2 remote-as 201
!
Configurations on R2:
=====================
!
ip route 202.202.202.202/32 192.201.78.1
!
router bgp 201
bgp router-id 192.168.0.2
neighbor 192.201.202.1 remote-as 201
!
address-family ipv4 unicast
redistribute static
exit-address-family
!
Step-1:
=======
R1 receives the route 202.202.202.202/32 from R2.
R1 installs the route in its BGP RIB.
Step-2:
=======
On R1, a connected interface address is added.
The address is the same as the next-hop of the BGP route received from R2 (192.201.78.1).
Point of Failure:
=================
R1 resolves the BGP route even though the route's next-hop is its own connected address.
Even though this appears to be a misconfiguration it would still be better to safeguard the code against it.
Fix:
====
When BGP receives a connected route from Zebra, it processes the
routes for the next-hop update.
While doing so, BGP must ignore routes whose next-hop address matches
the address of the connected route for which Zebra sent the next-hop update
message.
Signed-off-by: NaveenThanikachalam <nthanikachal@vmware.com>
2020-04-09 09:27:54 +02:00
|
|
|
} else
|
|
|
|
bnc_is_valid_nexthop =
|
2020-05-25 23:15:37 +02:00
|
|
|
bgp_isvalid_nexthop(bnc) ? true : false;
|
2018-03-24 00:57:03 +01:00
|
|
|
}
|
|
|
|
|
2020-05-25 23:10:12 +02:00
|
|
|
if (BGP_DEBUG(nht, NHT)) {
|
2022-09-22 10:17:49 +02:00
|
|
|
if (dest->pdest)
|
2020-05-25 23:10:12 +02:00
|
|
|
zlog_debug(
|
2022-09-22 10:17:49 +02:00
|
|
|
"... eval path %d/%d %pBD RD %pRD %s flags 0x%x",
|
|
|
|
afi, safi, dest,
|
|
|
|
(struct prefix_rd *)bgp_dest_get_prefix(
|
|
|
|
dest->pdest),
|
2020-05-25 23:10:12 +02:00
|
|
|
bgp_path->name_pretty, path->flags);
|
2022-09-22 10:17:49 +02:00
|
|
|
else
|
2020-05-25 23:10:12 +02:00
|
|
|
zlog_debug(
|
2020-06-23 16:00:41 +02:00
|
|
|
"... eval path %d/%d %pBD %s flags 0x%x",
|
2020-03-27 00:11:58 +01:00
|
|
|
afi, safi, dest, bgp_path->name_pretty,
|
2020-05-25 23:10:12 +02:00
|
|
|
path->flags);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-05-25 23:17:12 +02:00
|
|
|
/* Skip paths marked for removal or as history. */
|
|
|
|
if (CHECK_FLAG(path->flags, BGP_PATH_REMOVED)
|
|
|
|
|| CHECK_FLAG(path->flags, BGP_PATH_HISTORY))
|
|
|
|
continue;
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
/* Copy the metric to the path. Will be used for bestpath
|
|
|
|
* computation */
|
2022-02-14 14:18:10 +01:00
|
|
|
bpi_ultimate = bgp_get_imported_bpi_ultimate(path);
|
2016-10-11 17:30:55 +02:00
|
|
|
if (bgp_isvalid_nexthop(bnc) && bnc->metric)
|
2022-02-14 14:18:10 +01:00
|
|
|
(bgp_path_info_extra_get(bpi_ultimate))->igpmetric =
|
2018-10-03 00:15:34 +02:00
|
|
|
bnc->metric;
|
2022-02-14 14:18:10 +01:00
|
|
|
else if (bpi_ultimate->extra)
|
|
|
|
bpi_ultimate->extra->igpmetric = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-11-15 18:57:34 +01:00
|
|
|
if (CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_METRIC_CHANGED)
|
2020-08-26 19:39:33 +02:00
|
|
|
|| CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED)
|
|
|
|
|| path->attr->srte_color != 0)
|
2018-09-14 02:34:42 +02:00
|
|
|
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-06-29 13:47:31 +02:00
|
|
|
path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
|
2020-05-25 23:15:37 +02:00
|
|
|
if (path_valid != bnc_is_valid_nexthop) {
|
|
|
|
if (path_valid) {
|
|
|
|
/* No longer valid, clear flag; also for EVPN
|
|
|
|
* routes, unimport from VRFs if needed.
|
|
|
|
*/
|
|
|
|
bgp_aggregate_decrement(bgp_path, p, path, afi,
|
|
|
|
safi);
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_path_info_unset_flag(dest, path,
|
2020-05-25 23:15:37 +02:00
|
|
|
BGP_PATH_VALID);
|
|
|
|
if (safi == SAFI_EVPN &&
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
|
2020-05-25 23:15:37 +02:00
|
|
|
bgp_evpn_unimport_route(bgp_path,
|
2020-03-27 00:11:58 +01:00
|
|
|
afi, safi, bgp_dest_get_prefix(dest), path);
|
2020-05-25 23:15:37 +02:00
|
|
|
} else {
|
|
|
|
/* Path becomes valid, set flag; also for EVPN
|
|
|
|
* routes, import from VRFs if needed.
|
|
|
|
*/
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_path_info_set_flag(dest, path,
|
2020-05-25 23:15:37 +02:00
|
|
|
BGP_PATH_VALID);
|
|
|
|
bgp_aggregate_increment(bgp_path, p, path, afi,
|
|
|
|
safi);
|
|
|
|
if (safi == SAFI_EVPN &&
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_evpn_is_prefix_nht_supported(bgp_dest_get_prefix(dest)))
|
2020-05-25 23:15:37 +02:00
|
|
|
bgp_evpn_import_route(bgp_path,
|
2020-03-27 00:11:58 +01:00
|
|
|
afi, safi, bgp_dest_get_prefix(dest), path);
|
2020-05-25 23:15:37 +02:00
|
|
|
}
|
2019-11-14 01:46:56 +01:00
|
|
|
}
|
|
|
|
|
2020-03-27 00:11:58 +01:00
|
|
|
bgp_process(bgp_path, dest, afi, safi);
|
2015-05-20 02:40:34 +02:00
|
|
|
}
|
2015-05-20 02:47:21 +02:00
|
|
|
|
2020-02-11 01:25:52 +01:00
|
|
|
if (peer) {
|
|
|
|
int valid_nexthops = bgp_isvalid_nexthop(bnc);
|
|
|
|
|
bgpd: Blackhole nexthops are not reachable
When bgp registers for a nexthop that is not reachable due
to the nexthop pointing to a blackhole, bgp is never going
to be able to reach it when attempting to open a connection.
Broken behavior:
<show bgp nexthop>
192.168.161.204 valid [IGP metric 0], #paths 0, peer 192.168.161.204
blackhole
Last update: Thu Feb 11 09:46:10 2021
eva# show bgp ipv4 uni summ fail
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor EstdCnt DropCnt ResetTime Reason
192.168.161.204 0 0 never Waiting for peer OPEN
The log file fills up with this type of message:
2021-02-09T18:53:11.653433+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:21.654005+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:31.654381+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:41.654729+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:51.655147+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
As that the connect to a blackhole is correctly rejected by the kernel
Fixed behavior:
eva# show bgp ipv4 uni summ
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
annie(192.168.161.2) 4 64539 126264 39 0 0 0 00:01:36 38 40 N/A
192.168.161.178 4 0 0 0 0 0 0 never Active 0 N/A
Total number of neighbors 2
eva# show bgp ipv4 uni summ fail
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor EstdCnt DropCnt ResetTime Reason
192.168.161.178 0 0 never Waiting for NHT
Total number of neighbors 2
eva# show bgp nexthop
Current BGP nexthop cache:
192.168.161.2 valid [IGP metric 0], #paths 38, peer 192.168.161.2
if enp39s0
Last update: Thu Feb 11 09:52:05 2021
192.168.161.131 valid [IGP metric 0], #paths 0, peer 192.168.161.131
if enp39s0
Last update: Thu Feb 11 09:52:05 2021
192.168.161.178 invalid, #paths 0, peer 192.168.161.178
Must be Connected
Last update: Thu Feb 11 09:53:37 2021
eva#
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
2021-02-11 15:54:34 +01:00
|
|
|
if (valid_nexthops) {
|
|
|
|
/*
|
|
|
|
* Peering cannot occur across a blackhole nexthop
|
|
|
|
*/
|
2021-02-18 12:55:29 +01:00
|
|
|
if (bnc->nexthop_num == 1 && bnc->nexthop
|
bgpd: Blackhole nexthops are not reachable
When bgp registers for a nexthop that is not reachable due
to the nexthop pointing to a blackhole, bgp is never going
to be able to reach it when attempting to open a connection.
Broken behavior:
<show bgp nexthop>
192.168.161.204 valid [IGP metric 0], #paths 0, peer 192.168.161.204
blackhole
Last update: Thu Feb 11 09:46:10 2021
eva# show bgp ipv4 uni summ fail
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor EstdCnt DropCnt ResetTime Reason
192.168.161.204 0 0 never Waiting for peer OPEN
The log file fills up with this type of message:
2021-02-09T18:53:11.653433+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:21.654005+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:31.654381+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:41.654729+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
2021-02-09T18:53:51.655147+00:00 nq-sjc6c-cor-01 bgpd[6548]: can't connect to 24.51.27.241 fd 26 : Invalid argument
As that the connect to a blackhole is correctly rejected by the kernel
Fixed behavior:
eva# show bgp ipv4 uni summ
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
annie(192.168.161.2) 4 64539 126264 39 0 0 0 00:01:36 38 40 N/A
192.168.161.178 4 0 0 0 0 0 0 never Active 0 N/A
Total number of neighbors 2
eva# show bgp ipv4 uni summ fail
BGP router identifier 10.10.3.11, local AS number 3235 vrf-id 0
BGP table version 40
RIB entries 78, using 14 KiB of memory
Peers 2, using 54 KiB of memory
Neighbor EstdCnt DropCnt ResetTime Reason
192.168.161.178 0 0 never Waiting for NHT
Total number of neighbors 2
eva# show bgp nexthop
Current BGP nexthop cache:
192.168.161.2 valid [IGP metric 0], #paths 38, peer 192.168.161.2
if enp39s0
Last update: Thu Feb 11 09:52:05 2021
192.168.161.131 valid [IGP metric 0], #paths 0, peer 192.168.161.131
if enp39s0
Last update: Thu Feb 11 09:52:05 2021
192.168.161.178 invalid, #paths 0, peer 192.168.161.178
Must be Connected
Last update: Thu Feb 11 09:53:37 2021
eva#
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
2021-02-11 15:54:34 +01:00
|
|
|
&& bnc->nexthop->type == NEXTHOP_TYPE_BLACKHOLE) {
|
|
|
|
peer->last_reset = PEER_DOWN_WAITING_NHT;
|
|
|
|
valid_nexthops = 0;
|
|
|
|
} else
|
|
|
|
peer->last_reset = PEER_DOWN_WAITING_OPEN;
|
|
|
|
} else
|
2020-02-11 01:25:52 +01:00
|
|
|
peer->last_reset = PEER_DOWN_WAITING_NHT;
|
|
|
|
|
|
|
|
if (!CHECK_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED)) {
|
|
|
|
if (BGP_DEBUG(nht, NHT))
|
2020-03-05 19:17:54 +01:00
|
|
|
zlog_debug(
|
2020-12-17 15:46:30 +01:00
|
|
|
"%s: Updating peer (%s(%s)) status with NHT nexthops %d",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, peer->host,
|
2020-12-17 15:46:30 +01:00
|
|
|
peer->bgp->name_pretty,
|
|
|
|
!!valid_nexthops);
|
2020-09-17 18:38:12 +02:00
|
|
|
bgp_fsm_nht_update(peer, !!valid_nexthops);
|
2020-02-11 01:25:52 +01:00
|
|
|
SET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
|
|
|
|
}
|
2015-05-20 02:47:21 +02:00
|
|
|
}
|
|
|
|
|
2015-05-20 02:40:34 +02:00
|
|
|
RESET_FLAG(bnc->change_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* path_nh_map - make or break path-to-nexthop association.
|
|
|
|
* ARGUMENTS:
|
|
|
|
* path - pointer to the path structure
|
|
|
|
* bnc - pointer to the nexthop structure
|
|
|
|
* make - if set, make the association. if unset, just break the existing
|
|
|
|
* association.
|
|
|
|
*/
|
2018-10-17 17:27:30 +02:00
|
|
|
void path_nh_map(struct bgp_path_info *path, struct bgp_nexthop_cache *bnc,
|
|
|
|
bool make)
|
2015-05-20 02:40:34 +02:00
|
|
|
{
|
|
|
|
if (path->nexthop) {
|
|
|
|
LIST_REMOVE(path, nh_thread);
|
|
|
|
path->nexthop->path_count--;
|
|
|
|
path->nexthop = NULL;
|
|
|
|
}
|
|
|
|
if (make) {
|
|
|
|
LIST_INSERT_HEAD(&(bnc->paths), path, nh_thread);
|
|
|
|
path->nexthop = bnc;
|
|
|
|
path->nexthop->path_count++;
|
|
|
|
}
|
|
|
|
}
|
2018-10-05 17:31:29 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is called to register nexthops to zebra
|
|
|
|
* as that we may have tried to install the nexthops
|
|
|
|
* before we actually have a zebra connection
|
|
|
|
*/
|
|
|
|
void bgp_nht_register_nexthops(struct bgp *bgp)
|
|
|
|
{
|
2020-04-09 20:56:11 +02:00
|
|
|
for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) {
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
2018-10-05 17:31:29 +02:00
|
|
|
|
2020-04-09 20:56:11 +02:00
|
|
|
frr_each (bgp_nexthop_cache, &bgp->nexthop_cache_table[afi],
|
|
|
|
bnc) {
|
2022-03-12 20:48:18 +01:00
|
|
|
register_zebra_rnh(bnc);
|
2018-10-05 17:31:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-11-06 21:55:36 +01:00
|
|
|
|
2020-04-21 21:01:35 +02:00
|
|
|
void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
|
2018-11-06 21:55:36 +01:00
|
|
|
{
|
|
|
|
struct bgp *bgp;
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
struct nexthop *nhop;
|
|
|
|
struct interface *ifp;
|
|
|
|
struct prefix p;
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2018-11-06 21:55:36 +01:00
|
|
|
|
|
|
|
if (peer->ifp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bgp = peer->bgp;
|
|
|
|
if (!sockunion2hostprefix(&peer->su, &p)) {
|
2020-04-21 21:01:35 +02:00
|
|
|
zlog_warn("%s: Unable to convert sockunion to prefix for %s",
|
|
|
|
__func__, peer->host);
|
2018-11-06 21:55:36 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p.family != AF_INET6)
|
|
|
|
return;
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
2018-11-06 21:55:36 +01:00
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
|
2018-11-06 21:55:36 +01:00
|
|
|
if (!bnc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (peer != bnc->nht_info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (nhop = bnc->nexthop; nhop; nhop = nhop->next) {
|
2020-02-14 21:15:26 +01:00
|
|
|
ifp = if_lookup_by_index(nhop->ifindex, nhop->vrf_id);
|
|
|
|
|
|
|
|
if (!ifp)
|
|
|
|
continue;
|
|
|
|
|
2018-11-06 21:55:36 +01:00
|
|
|
zclient_send_interface_radv_req(zclient,
|
|
|
|
nhop->vrf_id,
|
|
|
|
ifp, true,
|
|
|
|
BGP_UNNUM_DEFAULT_RA_INTERVAL);
|
|
|
|
}
|
|
|
|
}
|
2020-04-21 21:01:35 +02:00
|
|
|
|
|
|
|
void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
|
|
|
|
{
|
|
|
|
struct bgp *bgp;
|
|
|
|
struct bgp_nexthop_cache *bnc;
|
|
|
|
struct nexthop *nhop;
|
|
|
|
struct interface *ifp;
|
|
|
|
struct prefix p;
|
2022-07-21 21:42:51 +02:00
|
|
|
ifindex_t ifindex = 0;
|
2020-04-21 21:01:35 +02:00
|
|
|
|
|
|
|
if (peer->ifp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bgp = peer->bgp;
|
|
|
|
|
|
|
|
if (!sockunion2hostprefix(&peer->su, &p)) {
|
|
|
|
zlog_warn("%s: Unable to convert sockunion to prefix for %s",
|
|
|
|
__func__, peer->host);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p.family != AF_INET6)
|
|
|
|
return;
|
2022-07-21 21:42:51 +02:00
|
|
|
/*
|
|
|
|
* Gather the ifindex for if up/down events to be
|
|
|
|
* tagged into this fun
|
|
|
|
*/
|
|
|
|
if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
|
|
|
|
ifindex = peer->su.sin6.sin6_scope_id;
|
2020-04-21 21:01:35 +02:00
|
|
|
|
2022-07-21 21:42:51 +02:00
|
|
|
bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
|
2020-04-21 21:01:35 +02:00
|
|
|
if (!bnc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (peer != bnc->nht_info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (nhop = bnc->nexthop; nhop; nhop = nhop->next) {
|
|
|
|
ifp = if_lookup_by_index(nhop->ifindex, nhop->vrf_id);
|
|
|
|
|
2020-04-30 17:16:28 +02:00
|
|
|
if (!ifp)
|
|
|
|
continue;
|
|
|
|
|
2020-04-21 21:01:35 +02:00
|
|
|
zclient_send_interface_radv_req(zclient, nhop->vrf_id, ifp, 0,
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
}
|
2020-05-09 04:24:56 +02:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* L3 NHGs are used for fast failover of nexthops in the dplane. These are
|
|
|
|
* the APIs for allocating L3 NHG ids. Management of the L3 NHG itself is
|
|
|
|
* left to the application using it.
|
|
|
|
* PS: Currently EVPN host routes is the only app using L3 NHG for fast
|
|
|
|
* failover of remote ES links.
|
|
|
|
***************************************************************************/
|
|
|
|
static bitfield_t bgp_nh_id_bitmap;
|
2020-05-16 01:33:41 +02:00
|
|
|
static uint32_t bgp_l3nhg_start;
|
2020-05-09 04:24:56 +02:00
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
/* XXX - currently we do nothing on the callbacks */
|
|
|
|
static void bgp_l3nhg_add_cb(const char *name)
|
|
|
|
{
|
|
|
|
}
|
2022-10-24 15:25:54 +02:00
|
|
|
|
|
|
|
static void bgp_l3nhg_modify_cb(const struct nexthop_group_cmd *nhgc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
static void bgp_l3nhg_add_nexthop_cb(const struct nexthop_group_cmd *nhgc,
|
|
|
|
const struct nexthop *nhop)
|
|
|
|
{
|
|
|
|
}
|
2022-10-24 15:25:54 +02:00
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
static void bgp_l3nhg_del_nexthop_cb(const struct nexthop_group_cmd *nhgc,
|
|
|
|
const struct nexthop *nhop)
|
|
|
|
{
|
|
|
|
}
|
2022-10-24 15:25:54 +02:00
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
static void bgp_l3nhg_del_cb(const char *name)
|
2020-05-09 04:24:56 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
static void bgp_l3nhg_zebra_init(void)
|
2020-05-09 04:24:56 +02:00
|
|
|
{
|
2020-05-16 01:33:41 +02:00
|
|
|
static bool bgp_l3nhg_zebra_inited;
|
|
|
|
if (bgp_l3nhg_zebra_inited)
|
2020-05-09 04:24:56 +02:00
|
|
|
return;
|
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
bgp_l3nhg_zebra_inited = true;
|
|
|
|
bgp_l3nhg_start = zclient_get_nhg_start(ZEBRA_ROUTE_BGP);
|
2022-10-24 15:25:54 +02:00
|
|
|
nexthop_group_init(bgp_l3nhg_add_cb, bgp_l3nhg_modify_cb,
|
|
|
|
bgp_l3nhg_add_nexthop_cb, bgp_l3nhg_del_nexthop_cb,
|
|
|
|
bgp_l3nhg_del_cb);
|
2020-05-09 04:24:56 +02:00
|
|
|
}
|
|
|
|
|
2020-05-16 01:33:41 +02:00
|
|
|
|
2020-05-09 04:24:56 +02:00
|
|
|
void bgp_l3nhg_init(void)
|
|
|
|
{
|
2020-05-16 01:33:41 +02:00
|
|
|
uint32_t id_max;
|
|
|
|
|
2021-11-11 15:39:52 +01:00
|
|
|
id_max = MIN(ZEBRA_NHG_PROTO_SPACING - 1, 16 * 1024);
|
2020-05-16 01:33:41 +02:00
|
|
|
bf_init(bgp_nh_id_bitmap, id_max);
|
2020-05-09 04:24:56 +02:00
|
|
|
bf_assign_zero_index(bgp_nh_id_bitmap);
|
2020-05-16 01:33:41 +02:00
|
|
|
|
|
|
|
if (BGP_DEBUG(nht, NHT) || BGP_DEBUG(evpn_mh, EVPN_MH_ES))
|
|
|
|
zlog_debug("bgp l3_nhg range %u - %u", bgp_l3nhg_start + 1,
|
|
|
|
bgp_l3nhg_start + id_max);
|
2020-05-09 04:24:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_l3nhg_finish(void)
|
|
|
|
{
|
|
|
|
bf_free(bgp_nh_id_bitmap);
|
|
|
|
}
|
2020-05-16 01:33:41 +02:00
|
|
|
|
|
|
|
uint32_t bgp_l3nhg_id_alloc(void)
|
|
|
|
{
|
|
|
|
uint32_t nhg_id = 0;
|
|
|
|
|
|
|
|
bgp_l3nhg_zebra_init();
|
|
|
|
bf_assign_index(bgp_nh_id_bitmap, nhg_id);
|
|
|
|
if (nhg_id)
|
|
|
|
nhg_id += bgp_l3nhg_start;
|
|
|
|
|
|
|
|
return nhg_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bgp_l3nhg_id_free(uint32_t nhg_id)
|
|
|
|
{
|
|
|
|
if (!nhg_id || (nhg_id <= bgp_l3nhg_start))
|
|
|
|
return;
|
|
|
|
|
|
|
|
nhg_id -= bgp_l3nhg_start;
|
|
|
|
|
|
|
|
bf_release_index(bgp_nh_id_bitmap, nhg_id);
|
|
|
|
}
|