2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2002-12-13 21:15:29 +01:00
|
|
|
/* zebra connection and redistribute fucntions.
|
2017-05-13 10:25:29 +02:00
|
|
|
* Copyright (C) 1999 Kunihiro Ishiguro
|
|
|
|
*/
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2005-05-23 16:19:54 +02:00
|
|
|
#ifndef _QUAGGA_BGP_ZEBRA_H
|
|
|
|
#define _QUAGGA_BGP_ZEBRA_H
|
|
|
|
|
2017-06-28 10:51:10 +02:00
|
|
|
#include "vxlan.h"
|
|
|
|
|
2021-09-17 17:50:03 +02:00
|
|
|
/* Macro to update bgp_original based on bpg_path_info */
|
|
|
|
#define BGP_ORIGINAL_UPDATE(_bgp_orig, _mpinfo, _bgp) \
|
2023-08-08 12:47:29 +02:00
|
|
|
((_mpinfo->extra && _mpinfo->extra->vrfleak && \
|
|
|
|
_mpinfo->extra->vrfleak->bgp_orig && \
|
|
|
|
_mpinfo->sub_type == BGP_ROUTE_IMPORTED) \
|
|
|
|
? (_bgp_orig = _mpinfo->extra->vrfleak->bgp_orig) \
|
2021-09-17 17:50:03 +02:00
|
|
|
: (_bgp_orig = _bgp))
|
|
|
|
|
2020-03-24 22:38:37 +01:00
|
|
|
/* Default weight for next hop, if doing weighted ECMP. */
|
|
|
|
#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
extern void bgp_zebra_init(struct event_loop *master, unsigned short instance);
|
2022-08-17 11:52:51 +02:00
|
|
|
extern void bgp_if_init(void);
|
2018-05-23 14:14:53 +02:00
|
|
|
extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
|
2018-04-03 15:06:50 +02:00
|
|
|
extern uint32_t bgp_zebra_tm_get_id(void);
|
2018-05-23 14:14:53 +02:00
|
|
|
extern bool bgp_zebra_tm_chunk_obtained(void);
|
2016-01-12 19:41:57 +01:00
|
|
|
extern void bgp_zebra_destroy(void);
|
2023-07-10 16:40:38 +02:00
|
|
|
extern int bgp_zebra_get_table_range(struct zclient *zc, uint32_t chunk_size,
|
2018-03-05 18:09:57 +01:00
|
|
|
uint32_t *start, uint32_t *end);
|
2005-06-28 14:44:16 +02:00
|
|
|
extern int bgp_if_update_all(void);
|
2024-01-26 20:48:53 +01:00
|
|
|
extern void bgp_zebra_route_install(struct bgp_dest *dest,
|
|
|
|
struct bgp_path_info *path, struct bgp *bgp,
|
2024-02-15 20:23:51 +01:00
|
|
|
bool install, struct bgpevpn *vpn,
|
|
|
|
bool is_sync);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-02 18:20:08 +02:00
|
|
|
/* Announce routes of any bgp subtype of a table to zebra */
|
|
|
|
extern void bgp_zebra_announce_table_all_subtypes(struct bgp *bgp, afi_t afi,
|
|
|
|
safi_t safi);
|
|
|
|
|
|
|
|
/* Withdraw all entries of any subtype in a BGP instances RIB table from Zebra */
|
|
|
|
extern void bgp_zebra_withdraw_table_all_subtypes(struct bgp *bgp, afi_t afi,
|
|
|
|
safi_t safi);
|
|
|
|
|
BGP: Trigger IPv6 router advertisements upon config of unnumbered neighbor
Instead of turning on IPv6 RA on every interface as soon as it has an IPv6
address, only enable it upon configuration of BGP neighbor. When the BGP
neighbor is deleted, signal that RAs can be turned off.
To support this, introduce new message interaction between BGP and Zebra.
Also, take appropriate actions in BGP upon interface add/del since the
unnumbered neighbor could exist prior to interface creation etc.
Only unnumbered IPv6 neighbors require RA, the /30 or /31 based neighbors
don't. However, to keep the interaction simple and not have to deal with
too many dynamic conditions (e.g., address deletes or neighbor change to/from
'v6only'), RAs on the interface are triggered upon any unnumbered neighbor
configuration.
BGP-triggered RAs will cause RAs to be initiated on the interface; however,
if BGP asks that RAs be stopped (upon delete of unnumbered neighbor), RAs
will continue to be exchanged if the operator has explicitly enabled.
Signed-off-by: Vivek Venkatraman <vivek@cumulusnetworks.com>
Reviewed-by: Donald Sharp <sharpd@cumulusnetworks.com>
Ticket: CM-10640
Reviewed By: CCR-4589
Testing Done: Various manual and automated (refer to defect)
2016-05-02 22:53:38 +02:00
|
|
|
extern void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer);
|
|
|
|
extern void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-05-12 14:28:11 +02:00
|
|
|
extern void bgp_zebra_instance_register(struct bgp *bgp);
|
|
|
|
extern void bgp_zebra_instance_deregister(struct bgp *bgp);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-03-13 19:41:29 +01:00
|
|
|
extern void bgp_redistribute_redo(struct bgp *bgp);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern struct bgp_redist *bgp_redist_lookup(struct bgp *bgp, afi_t afi,
|
|
|
|
uint8_t type,
|
|
|
|
unsigned short instance);
|
|
|
|
extern struct bgp_redist *bgp_redist_add(struct bgp *bgp, afi_t afi,
|
|
|
|
uint8_t type, unsigned short instance);
|
|
|
|
extern int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
|
|
|
|
unsigned short instance, bool changed);
|
|
|
|
extern int bgp_redistribute_resend(struct bgp *bgp, afi_t afi, int type,
|
|
|
|
unsigned short instance);
|
2020-03-20 10:57:54 +01:00
|
|
|
extern bool bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name,
|
|
|
|
struct route_map *route_map);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern bool bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red,
|
|
|
|
afi_t afi, int type, uint32_t metric);
|
2023-08-28 12:23:24 +02:00
|
|
|
extern void bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type,
|
|
|
|
unsigned short instance);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
|
|
|
|
unsigned short instance);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-05-12 14:28:11 +02:00
|
|
|
extern struct interface *if_lookup_by_ipv4(struct in_addr *addr,
|
|
|
|
vrf_id_t vrf_id);
|
|
|
|
extern struct interface *if_lookup_by_ipv4_exact(struct in_addr *addr,
|
|
|
|
vrf_id_t vrf_id);
|
|
|
|
extern struct interface *if_lookup_by_ipv6(struct in6_addr *addr,
|
|
|
|
ifindex_t ifindex, vrf_id_t vrf_id);
|
|
|
|
extern struct interface *if_lookup_by_ipv6_exact(struct in6_addr *addr,
|
|
|
|
ifindex_t ifindex,
|
|
|
|
vrf_id_t vrf_id);
|
2017-11-20 06:47:04 +01:00
|
|
|
extern int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise,
|
|
|
|
vni_t vni);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise,
|
|
|
|
vni_t vni);
|
2019-02-04 02:29:59 +01:00
|
|
|
extern int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise,
|
|
|
|
vni_t vni);
|
2022-05-12 14:28:11 +02:00
|
|
|
extern int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise);
|
2018-11-01 16:28:08 +01:00
|
|
|
extern int bgp_zebra_dup_addr_detection(struct bgp *bgp);
|
2018-10-05 01:20:12 +02:00
|
|
|
extern int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
|
|
|
|
enum vxlan_flood_control flood_ctrl);
|
2017-05-15 23:30:19 +02:00
|
|
|
|
2016-10-07 15:44:42 +02:00
|
|
|
extern int bgp_zebra_num_connects(void);
|
|
|
|
|
2022-05-12 14:28:11 +02:00
|
|
|
extern bool bgp_zebra_nexthop_set(union sockunion *local,
|
|
|
|
union sockunion *remote,
|
|
|
|
struct bgp_nexthop *nexthop,
|
|
|
|
struct peer *peer);
|
2018-03-08 15:39:19 +01:00
|
|
|
struct bgp_pbr_action;
|
|
|
|
struct bgp_pbr_match;
|
2018-11-29 15:14:41 +01:00
|
|
|
struct bgp_pbr_rule;
|
2018-03-08 15:39:19 +01:00
|
|
|
struct bgp_pbr_match_entry;
|
2019-10-25 17:42:39 +02:00
|
|
|
|
2018-03-08 15:39:19 +01:00
|
|
|
extern void bgp_send_pbr_rule_action(struct bgp_pbr_action *pbra,
|
2018-11-29 15:14:41 +01:00
|
|
|
struct bgp_pbr_rule *pbr,
|
|
|
|
bool install);
|
2018-03-08 15:39:19 +01:00
|
|
|
extern void bgp_send_pbr_ipset_match(struct bgp_pbr_match *pbrim,
|
|
|
|
bool install);
|
|
|
|
extern void bgp_send_pbr_ipset_entry_match(struct bgp_pbr_match_entry *pbrime,
|
|
|
|
bool install);
|
2018-03-12 09:38:53 +01:00
|
|
|
extern void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
|
|
|
|
struct bgp_pbr_match *pbm,
|
|
|
|
bool install);
|
2018-03-08 15:39:19 +01:00
|
|
|
|
2018-03-28 14:51:57 +02:00
|
|
|
extern void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
|
|
|
|
afi_t afi, uint32_t table_id, bool announce);
|
2019-10-25 17:42:39 +02:00
|
|
|
extern int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable);
|
2023-03-09 14:36:51 +01:00
|
|
|
extern int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi,
|
|
|
|
enum zserv_client_capabilities);
|
2019-10-25 17:42:39 +02:00
|
|
|
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
|
2020-12-19 02:50:18 +01:00
|
|
|
extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
|
2021-09-14 00:11:11 +02:00
|
|
|
extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name);
|
2024-03-23 19:07:33 +01:00
|
|
|
extern int bgp_zebra_srv6_manager_get_locator(const char *name);
|
2024-03-23 19:17:48 +01:00
|
|
|
extern bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx,
|
|
|
|
struct in6_addr *sid_value,
|
|
|
|
const char *locator_name,
|
|
|
|
uint32_t *sid_func);
|
|
|
|
extern void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx);
|
2024-03-23 19:07:33 +01:00
|
|
|
|
bgpd: add support for l3vpn per-nexthop label
This commit introduces a new method to associate a label to
prefixes to export to a VPNv4 backbone. All the methods to
associate a label to a BGP update is documented in rfc4364,
chapter 4.3.2. Initially, the "single label for an entire
VRF" method was available. This commit adds "single label
for each attachment circuit" method.
The change impacts the control-plane, because each BGP update
is checked to know if the nexthop has reachability in the VRF
or not. If this is the case, then a unique label for a given
destination IP in the VRF will be picked up. This label will
be reused for an other BGP update that will have the same
nexthop IP address.
The change impacts the data-plane, because the MPLs pop
mechanism applied to incoming labelled packets changes: the
MPLS label is popped, and the packet is directly sent to the
connected nexthop described in the previous outgoing BGP VPN
update.
By default per-vrf mode is done, but the user may choose
the per-nexthop mode, by using the vty command from the
previous commit. In the latter case, a per-vrf label
will however be allocated to handle networks that are not directly
connected. This is the case for local traffic for instance.
The change also include the following:
- ECMP case
In case a route is learnt in a given VRF, and is resolved via an
ECMP nexthop. This implies that when exporting the route as a BGP
update, if label allocation per nexthop is used, then two possible
MPLS values could be picked up, which is not possible with the
current implementation. Actually, the NLRI for VPNv4 stores one
prefix, and one single label value, not two. Today, RFC8277 with
multiple label capability is not yet available.
To avoid this corner case, when a route is resolved via more than one
nexthop, the label allocation per nexthop will not apply, and the
default per-vrf label will be chosen.
Let us imagine BGP redistributes a static route using the `172.31.0.20`
nexthop. The nexthop resolution will find two different nexthops fo a
unique BGP update.
> r1# show running-config
> [..]
> vrf vrf1
> ip route 172.31.0.30/32 172.31.0.20
> r1# show bgp vrf vrf1 nexthop
> [..]
> 172.31.0.20 valid [IGP metric 0], #paths 1
> gate 192.0.2.11
> gate 192.0.2.12
> Last update: Mon Jan 16 09:27:09 2023
> Paths:
> 1/1 172.31.0.30/32 VRF vrf1 flags 0x20018
To avoid this situation, BGP updates that resolve over multiple
nexthops are using the unique per-vrf label.
- recursive route case
Prefixes that need a recursive route to be resolved can
also be eligible for mpls allocation per nexthop. In that
case, the nexthop will be the recursive nexthop calculated.
To achieve this, all nexthop types in bnc contexts are valid,
except for the blackhole nexthops.
- network declared prefixes
Nexthop tracking is used to look for the reachability of the
prefixes. When the the 'no bgp network import-check' command
is used, network declared prefixes are maintained active,
even if there is no active nexthop.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2023-02-28 14:25:02 +01:00
|
|
|
extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label,
|
2023-02-16 10:39:40 +01:00
|
|
|
ifindex_t index, vrf_id_t vrfid,
|
bgpd: add support for l3vpn per-nexthop label
This commit introduces a new method to associate a label to
prefixes to export to a VPNv4 backbone. All the methods to
associate a label to a BGP update is documented in rfc4364,
chapter 4.3.2. Initially, the "single label for an entire
VRF" method was available. This commit adds "single label
for each attachment circuit" method.
The change impacts the control-plane, because each BGP update
is checked to know if the nexthop has reachability in the VRF
or not. If this is the case, then a unique label for a given
destination IP in the VRF will be picked up. This label will
be reused for an other BGP update that will have the same
nexthop IP address.
The change impacts the data-plane, because the MPLs pop
mechanism applied to incoming labelled packets changes: the
MPLS label is popped, and the packet is directly sent to the
connected nexthop described in the previous outgoing BGP VPN
update.
By default per-vrf mode is done, but the user may choose
the per-nexthop mode, by using the vty command from the
previous commit. In the latter case, a per-vrf label
will however be allocated to handle networks that are not directly
connected. This is the case for local traffic for instance.
The change also include the following:
- ECMP case
In case a route is learnt in a given VRF, and is resolved via an
ECMP nexthop. This implies that when exporting the route as a BGP
update, if label allocation per nexthop is used, then two possible
MPLS values could be picked up, which is not possible with the
current implementation. Actually, the NLRI for VPNv4 stores one
prefix, and one single label value, not two. Today, RFC8277 with
multiple label capability is not yet available.
To avoid this corner case, when a route is resolved via more than one
nexthop, the label allocation per nexthop will not apply, and the
default per-vrf label will be chosen.
Let us imagine BGP redistributes a static route using the `172.31.0.20`
nexthop. The nexthop resolution will find two different nexthops fo a
unique BGP update.
> r1# show running-config
> [..]
> vrf vrf1
> ip route 172.31.0.30/32 172.31.0.20
> r1# show bgp vrf vrf1 nexthop
> [..]
> 172.31.0.20 valid [IGP metric 0], #paths 1
> gate 192.0.2.11
> gate 192.0.2.12
> Last update: Mon Jan 16 09:27:09 2023
> Paths:
> 1/1 172.31.0.30/32 VRF vrf1 flags 0x20018
To avoid this situation, BGP updates that resolve over multiple
nexthops are using the unique per-vrf label.
- recursive route case
Prefixes that need a recursive route to be resolved can
also be eligible for mpls allocation per nexthop. In that
case, the nexthop will be the recursive nexthop calculated.
To achieve this, all nexthop types in bnc contexts are valid,
except for the blackhole nexthops.
- network declared prefixes
Nexthop tracking is used to look for the reachability of the
prefixes. When the the 'no bgp network import-check' command
is used, network declared prefixes are maintained active,
even if there is no active nexthop.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2023-02-28 14:25:02 +01:00
|
|
|
enum lsp_types_t ltype,
|
2024-02-26 18:11:09 +01:00
|
|
|
struct prefix *p, uint8_t num_labels,
|
2023-05-11 15:42:08 +02:00
|
|
|
mpls_label_t out_labels[]);
|
bgpd: fix hardset l3vpn label available in mpls pool
Today, when configuring BGP L3VPN mpls, the operator may
use that command to hardset a label value:
> router bgp 65500 vrf vrf1
> address-family ipv4 unicast
> label vpn export <hardset_label_value>
Today, BGP uses this value without checks, leading to potential
conflicts with other control planes like LDP. For instance, if
LDP initiates with a label chunk of [16;72] and BGP also uses the
50 label value, a conflict arises.
The 'label manager' service in zebra oversees label allocations.
While all the control plane daemons use it, BGP doesn't when a
hardset label is in place.
This update fixes this problem. Now, when a hardset label is set for
l3vpn export, a request is made to the label manager for approval,
ensuring no conflicts with other daemons. But, this means some existing
BGP configurations might become non-operational if they conflict with
labels already allocated to another daemon but not used.
note: Labels below 16 are reserved and won't be checked for consistency
by the label manager.
Fixes: ddb5b4880ba8 ("bgpd: vpn-vrf route leaking")
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2023-09-01 17:14:06 +02:00
|
|
|
extern bool bgp_zebra_request_label_range(uint32_t base, uint32_t chunk_size,
|
|
|
|
bool label_auto);
|
2023-06-12 16:09:52 +02:00
|
|
|
extern void bgp_zebra_release_label_range(uint32_t start, uint32_t end);
|
2024-01-26 20:48:53 +01:00
|
|
|
extern enum zclient_send_status
|
|
|
|
bgp_zebra_withdraw_actual(struct bgp_dest *dest, struct bgp_path_info *info,
|
|
|
|
struct bgp *bgp);
|
2024-08-28 20:38:53 +02:00
|
|
|
extern void bgp_zebra_process_remote_routes_for_l2vni(struct event *e);
|
2024-11-27 09:04:51 +01:00
|
|
|
extern void bgp_zebra_process_remote_routes_for_l3vrf(struct event *e);
|
2005-05-23 16:19:54 +02:00
|
|
|
#endif /* _QUAGGA_BGP_ZEBRA_H */
|