2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2019-05-13 21:46:05 +02:00
|
|
|
/* Zebra Nexthop Group header.
|
|
|
|
* Copyright (C) 2019 Cumulus Networks, Inc.
|
|
|
|
* Donald Sharp
|
|
|
|
* Stephen Worley
|
|
|
|
*/
|
|
|
|
#ifndef __ZEBRA_NHG_H__
|
|
|
|
#define __ZEBRA_NHG_H__
|
|
|
|
|
2019-11-21 21:05:52 +01:00
|
|
|
#include "lib/nexthop.h"
|
2019-01-24 14:06:34 +01:00
|
|
|
#include "lib/nexthop_group.h"
|
2019-05-13 21:46:05 +02:00
|
|
|
|
2020-04-21 01:53:19 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2019-05-14 02:10:34 +02:00
|
|
|
/* This struct is used exclusively for dataplane
|
|
|
|
* interaction via a dataplane context.
|
|
|
|
*
|
|
|
|
* It is designed to mimic the netlink nexthop_grp
|
|
|
|
* struct in include/linux/nexthop.h
|
|
|
|
*/
|
2019-05-15 00:03:29 +02:00
|
|
|
struct nh_grp {
|
2019-05-14 02:10:34 +02:00
|
|
|
uint32_t id;
|
|
|
|
uint8_t weight;
|
|
|
|
};
|
|
|
|
|
2019-07-24 18:27:40 +02:00
|
|
|
PREDECL_RBTREE_UNIQ(nhg_connected_tree);
|
2019-05-14 02:10:34 +02:00
|
|
|
|
2019-10-04 20:28:33 +02:00
|
|
|
/*
|
2020-02-25 14:29:46 +01:00
|
|
|
* Hashtables containing nhg entries is in `zebra_router`.
|
2019-10-04 20:28:33 +02:00
|
|
|
*/
|
2019-01-24 14:06:34 +01:00
|
|
|
struct nhg_hash_entry {
|
2019-02-15 19:18:48 +01:00
|
|
|
uint32_t id;
|
2019-03-19 22:06:01 +01:00
|
|
|
afi_t afi;
|
2019-01-24 14:06:34 +01:00
|
|
|
vrf_id_t vrf_id;
|
2021-01-21 16:12:05 +01:00
|
|
|
|
2021-04-22 23:19:03 +02:00
|
|
|
/* Time since last update */
|
|
|
|
time_t uptime;
|
|
|
|
|
2021-01-21 16:12:05 +01:00
|
|
|
/* Source protocol - zebra or another daemon */
|
2019-08-01 20:07:04 +02:00
|
|
|
int type;
|
2019-01-24 14:06:34 +01:00
|
|
|
|
2021-01-21 16:12:05 +01:00
|
|
|
/* zapi instance and session id, for groups from other daemons */
|
|
|
|
uint16_t zapi_instance;
|
|
|
|
uint32_t zapi_session;
|
|
|
|
|
2020-02-25 14:29:46 +01:00
|
|
|
struct nexthop_group nhg;
|
2019-01-24 14:06:34 +01:00
|
|
|
|
2019-12-24 20:22:03 +01:00
|
|
|
/* If supported, a mapping of backup nexthops. */
|
|
|
|
struct nhg_backup_info *backup_info;
|
|
|
|
|
2019-05-14 22:46:05 +02:00
|
|
|
/* If this is not a group, it
|
|
|
|
* will be a single nexthop
|
|
|
|
* and must have an interface
|
|
|
|
* associated with it.
|
|
|
|
* Otherwise, this will be null.
|
|
|
|
*/
|
|
|
|
struct interface *ifp;
|
|
|
|
|
2019-01-24 14:06:34 +01:00
|
|
|
uint32_t refcnt;
|
|
|
|
uint32_t dplane_ref;
|
2019-01-24 03:32:49 +01:00
|
|
|
|
|
|
|
uint32_t flags;
|
2019-03-21 15:43:16 +01:00
|
|
|
|
2022-06-15 23:24:39 +02:00
|
|
|
/* Dependency trees for other entries.
|
2019-03-21 15:43:16 +01:00
|
|
|
* For instance a group with two
|
|
|
|
* nexthops will have two dependencies
|
|
|
|
* pointing to those nhg_hash_entries.
|
2019-05-14 02:10:34 +02:00
|
|
|
*
|
|
|
|
* Using a rb tree here to make lookups
|
|
|
|
* faster with ID's.
|
2022-06-15 23:24:39 +02:00
|
|
|
*
|
|
|
|
* nhg_depends the RB tree of entries that this
|
|
|
|
* group contains.
|
|
|
|
*
|
|
|
|
* nhg_dependents the RB tree of entries that
|
|
|
|
* this group is being used by
|
|
|
|
*
|
|
|
|
* NHG id 3 with nexthops id 1/2
|
|
|
|
* nhg(3)->nhg_depends has 1 and 2 in the tree
|
|
|
|
* nhg(3)->nhg_dependents is empty
|
|
|
|
*
|
|
|
|
* nhg(1)->nhg_depends is empty
|
|
|
|
* nhg(1)->nhg_dependents is 3 in the tree
|
|
|
|
*
|
|
|
|
* nhg(2)->nhg_depends is empty
|
2024-02-08 01:50:49 +01:00
|
|
|
* nhg(2)->nhg_dependents is 3 in the tree
|
2019-03-21 15:43:16 +01:00
|
|
|
*/
|
2019-07-24 18:27:40 +02:00
|
|
|
struct nhg_connected_tree_head nhg_depends, nhg_dependents;
|
2019-12-24 20:22:03 +01:00
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *timer;
|
2021-10-29 14:16:13 +02:00
|
|
|
|
2019-01-24 03:32:49 +01:00
|
|
|
/*
|
|
|
|
* Is this nexthop group valid, ie all nexthops are fully resolved.
|
|
|
|
* What is fully resolved? It's a nexthop that is either self contained
|
|
|
|
* and correct( ie no recursive pointer ) or a nexthop that is recursively
|
|
|
|
* resolved and correct.
|
|
|
|
*/
|
2019-08-01 20:07:04 +02:00
|
|
|
#define NEXTHOP_GROUP_VALID (1 << 0)
|
2019-01-24 03:32:49 +01:00
|
|
|
/*
|
|
|
|
* Has this nexthop group been installed? At this point in time, this
|
|
|
|
* means that the data-plane has been told about this nexthop group
|
|
|
|
* and it's possible usage by a route entry.
|
|
|
|
*/
|
2019-08-01 20:07:04 +02:00
|
|
|
#define NEXTHOP_GROUP_INSTALLED (1 << 1)
|
2019-03-06 20:56:04 +01:00
|
|
|
/*
|
|
|
|
* Has the nexthop group been queued to be send to the FIB?
|
|
|
|
* The NEXTHOP_GROUP_VALID flag should also be set by this point.
|
|
|
|
*/
|
2019-08-01 20:07:04 +02:00
|
|
|
#define NEXTHOP_GROUP_QUEUED (1 << 2)
|
2019-05-15 00:27:40 +02:00
|
|
|
/*
|
|
|
|
* Is this a nexthop that is recursively resolved?
|
|
|
|
*/
|
2019-08-01 20:07:04 +02:00
|
|
|
#define NEXTHOP_GROUP_RECURSIVE (1 << 3)
|
2019-12-24 20:22:03 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Backup nexthop support - identify groups that are backups for
|
|
|
|
* another group.
|
|
|
|
*/
|
2020-05-05 21:57:35 +02:00
|
|
|
#define NEXTHOP_GROUP_BACKUP (1 << 4)
|
2019-12-24 20:22:03 +01:00
|
|
|
|
2020-07-22 19:45:47 +02:00
|
|
|
/*
|
|
|
|
* The NHG has been release by an upper level protocol via the
|
|
|
|
* `zebra_nhg_proto_del()` API.
|
|
|
|
*
|
|
|
|
* We use this flag to track this state in case the NHG is still being used
|
|
|
|
* by routes therefore holding their refcnts as well. Otherwise, the NHG will
|
|
|
|
* be removed and uninstalled.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define NEXTHOP_GROUP_PROTO_RELEASED (1 << 5)
|
|
|
|
|
2021-10-29 14:16:13 +02:00
|
|
|
/*
|
|
|
|
* When deleting a NHG notice that it is still installed
|
|
|
|
* and if it is, slightly delay the actual removal to
|
|
|
|
* the future. So that upper level protocols might
|
|
|
|
* be able to take advantage of some NHG's that
|
|
|
|
* are there
|
|
|
|
*/
|
|
|
|
#define NEXTHOP_GROUP_KEEP_AROUND (1 << 6)
|
|
|
|
|
2020-05-05 20:02:14 +02:00
|
|
|
/*
|
|
|
|
* Track FPM installation status..
|
|
|
|
*/
|
2024-01-21 01:10:07 +01:00
|
|
|
#define NEXTHOP_GROUP_FPM (1 << 7)
|
2024-02-08 18:32:26 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When an interface comes up install the
|
|
|
|
* singleton's and schedule the NHG's that
|
|
|
|
* are using this nhg to be reinstalled
|
|
|
|
* when installation is successful.
|
|
|
|
*/
|
|
|
|
#define NEXTHOP_GROUP_REINSTALL (1 << 8)
|
2024-08-29 17:29:55 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Connected routes and kernel routes received
|
|
|
|
* from the kernel or created by Zebra do no
|
|
|
|
* need to be installed. For connected, this
|
|
|
|
* is because the routes are in the local table
|
|
|
|
* but not imported and we create an amalgram
|
|
|
|
* route for it. For kernel routes if the route
|
|
|
|
* is an pre-nhg route, there is no nexthop associated
|
|
|
|
* with it and we should not create it until it
|
|
|
|
* is used by something else.
|
|
|
|
* The reason for this is because is that this just
|
|
|
|
* fills up the DPlane's nexthop slots when there
|
|
|
|
* are a bunch of interfaces or pre-existing routes
|
|
|
|
* As such let's not initially install it ( but
|
|
|
|
* pretend it was successful ) and if another route
|
|
|
|
* chooses this NHG then we can install it then.
|
|
|
|
*/
|
|
|
|
#define NEXTHOP_GROUP_INITIAL_DELAY_INSTALL (1 << 9)
|
2019-01-24 14:06:34 +01:00
|
|
|
};
|
|
|
|
|
2020-05-15 02:00:10 +02:00
|
|
|
/* Upper 4 bits of the NHG are reserved for indicating the NHG type */
|
|
|
|
#define NHG_ID_TYPE_POS 28
|
|
|
|
enum nhg_type {
|
|
|
|
NHG_TYPE_L3 = 0,
|
|
|
|
NHG_TYPE_L2_NH, /* NHs in a L2 NHG used as a MAC/FDB dest */
|
|
|
|
NHG_TYPE_L2, /* L2 NHG used as a MAC/FDB dest */
|
|
|
|
};
|
|
|
|
|
2019-08-01 20:07:04 +02:00
|
|
|
/* Was this one we created, either this session or previously? */
|
2020-05-05 21:57:35 +02:00
|
|
|
#define ZEBRA_NHG_CREATED(NHE) \
|
|
|
|
(((NHE->type) <= ZEBRA_ROUTE_MAX) && (NHE->type != ZEBRA_ROUTE_KERNEL))
|
|
|
|
|
|
|
|
/* Is this an NHE owned by zebra and not an upper level protocol? */
|
|
|
|
#define ZEBRA_OWNED(NHE) (NHE->type == ZEBRA_ROUTE_NHG)
|
2019-08-01 20:07:04 +02:00
|
|
|
|
2021-04-22 23:16:57 +02:00
|
|
|
#define PROTO_OWNED(NHE) (NHE->id >= ZEBRA_NHG_PROTO_LOWER)
|
|
|
|
|
2019-12-24 20:22:03 +01:00
|
|
|
/*
|
|
|
|
* Backup nexthops: this is a group object itself, so
|
|
|
|
* that the backup nexthops can use the same code as a normal object.
|
|
|
|
*/
|
|
|
|
struct nhg_backup_info {
|
|
|
|
struct nhg_hash_entry *nhe;
|
|
|
|
};
|
2019-05-15 00:03:29 +02:00
|
|
|
|
|
|
|
enum nhg_ctx_op_e {
|
|
|
|
NHG_CTX_OP_NONE = 0,
|
|
|
|
NHG_CTX_OP_NEW,
|
|
|
|
NHG_CTX_OP_DEL,
|
|
|
|
};
|
|
|
|
|
2019-08-13 02:09:59 +02:00
|
|
|
enum nhg_ctx_status {
|
2019-05-15 00:03:29 +02:00
|
|
|
NHG_CTX_NONE = 0,
|
|
|
|
NHG_CTX_QUEUED,
|
2019-08-13 02:09:59 +02:00
|
|
|
NHG_CTX_REQUEUED,
|
2019-05-15 00:03:29 +02:00
|
|
|
NHG_CTX_SUCCESS,
|
|
|
|
NHG_CTX_FAILURE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Context needed to queue nhg updates on the
|
|
|
|
* work queue.
|
|
|
|
*/
|
|
|
|
struct nhg_ctx {
|
|
|
|
|
|
|
|
/* Unique ID */
|
|
|
|
uint32_t id;
|
|
|
|
|
|
|
|
vrf_id_t vrf_id;
|
|
|
|
afi_t afi;
|
2021-01-29 21:43:52 +01:00
|
|
|
|
2019-08-01 20:07:04 +02:00
|
|
|
/*
|
2021-01-29 21:43:52 +01:00
|
|
|
* This should only ever be ZEBRA_ROUTE_NHG unless we get a a kernel
|
2019-08-01 20:07:04 +02:00
|
|
|
* created nexthop not made by us.
|
|
|
|
*/
|
|
|
|
int type;
|
2019-05-15 00:03:29 +02:00
|
|
|
|
|
|
|
/* If its a group array, how many? */
|
2024-10-07 18:40:46 +02:00
|
|
|
uint16_t count;
|
2019-05-15 00:03:29 +02:00
|
|
|
|
|
|
|
/* Its either a single nexthop or an array of ID's */
|
|
|
|
union {
|
|
|
|
struct nexthop nh;
|
|
|
|
struct nh_grp grp[MULTIPATH_NUM];
|
|
|
|
} u;
|
|
|
|
|
2022-10-22 21:37:27 +02:00
|
|
|
struct nhg_resilience resilience;
|
2019-05-15 00:03:29 +02:00
|
|
|
enum nhg_ctx_op_e op;
|
2019-08-13 02:09:59 +02:00
|
|
|
enum nhg_ctx_status status;
|
2019-05-15 00:03:29 +02:00
|
|
|
};
|
|
|
|
|
2020-01-28 17:00:42 +01:00
|
|
|
/* Global control to disable use of kernel nexthops, if available. We can't
|
|
|
|
* force the kernel to support nexthop ids, of course, but we can disable
|
|
|
|
* zebra's use of them, for testing e.g. By default, if the kernel supports
|
|
|
|
* nexthop ids, zebra uses them.
|
|
|
|
*/
|
|
|
|
void zebra_nhg_enable_kernel_nexthops(bool set);
|
|
|
|
bool zebra_nhg_kernel_nexthops_enabled(void);
|
2019-05-15 00:03:29 +02:00
|
|
|
|
2020-05-13 21:50:14 +02:00
|
|
|
/* Global control for zebra to only use proto-owned nexthops */
|
|
|
|
void zebra_nhg_set_proto_nexthops_only(bool set);
|
|
|
|
bool zebra_nhg_proto_nexthops_only(void);
|
|
|
|
|
2021-02-22 21:06:28 +01:00
|
|
|
/* Global control for use of activated backups for recursive resolution. */
|
|
|
|
void zebra_nhg_set_recursive_use_backups(bool set);
|
|
|
|
bool zebra_nhg_recursive_use_backups(void);
|
|
|
|
|
2019-05-14 18:53:19 +02:00
|
|
|
/**
|
|
|
|
* NHE abstracted tree functions.
|
2019-12-24 20:22:03 +01:00
|
|
|
* Use these where possible instead of direct access.
|
2019-05-14 18:53:19 +02:00
|
|
|
*/
|
2019-11-22 21:30:53 +01:00
|
|
|
struct nhg_hash_entry *zebra_nhg_alloc(void);
|
|
|
|
void zebra_nhg_free(struct nhg_hash_entry *nhe);
|
|
|
|
/* In order to clear a generic hash, we need a generic api, sigh. */
|
|
|
|
void zebra_nhg_hash_free(void *p);
|
2022-08-04 13:05:46 +02:00
|
|
|
void zebra_nhg_hash_free_zero_id(struct hash_bucket *b, void *arg);
|
2019-11-22 21:30:53 +01:00
|
|
|
|
2020-03-10 15:50:40 +01:00
|
|
|
/* Init an nhe, for use in a hash lookup for example. There's some fuzziness
|
|
|
|
* if the nhe represents only a single nexthop, so we try to capture that
|
|
|
|
* variant also.
|
|
|
|
*/
|
|
|
|
void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
|
|
|
|
const struct nexthop *nh);
|
|
|
|
|
2020-05-05 23:03:33 +02:00
|
|
|
/*
|
|
|
|
* Shallow copy of 'orig', into new/allocated nhe.
|
|
|
|
*/
|
|
|
|
struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig,
|
|
|
|
uint32_t id);
|
|
|
|
|
2019-12-24 20:22:03 +01:00
|
|
|
/* Allocate, free backup nexthop info objects */
|
|
|
|
struct nhg_backup_info *zebra_nhg_backup_alloc(void);
|
|
|
|
void zebra_nhg_backup_free(struct nhg_backup_info **p);
|
|
|
|
|
|
|
|
struct nexthop_group *zebra_nhg_get_backup_nhg(struct nhg_hash_entry *nhe);
|
|
|
|
|
2019-05-15 00:27:40 +02:00
|
|
|
extern struct nhg_hash_entry *zebra_nhg_resolve(struct nhg_hash_entry *nhe);
|
|
|
|
|
2019-05-14 18:53:19 +02:00
|
|
|
extern unsigned int zebra_nhg_depends_count(const struct nhg_hash_entry *nhe);
|
2019-05-14 02:10:34 +02:00
|
|
|
extern bool zebra_nhg_depends_is_empty(const struct nhg_hash_entry *nhe);
|
2019-10-24 00:28:10 +02:00
|
|
|
|
2019-05-14 18:53:19 +02:00
|
|
|
extern unsigned int
|
|
|
|
zebra_nhg_dependents_count(const struct nhg_hash_entry *nhe);
|
|
|
|
extern bool zebra_nhg_dependents_is_empty(const struct nhg_hash_entry *nhe);
|
2019-03-21 15:43:16 +01:00
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Lookup ID, doesn't create */
|
2019-02-26 00:18:07 +01:00
|
|
|
extern struct nhg_hash_entry *zebra_nhg_lookup_id(uint32_t id);
|
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Hash functions */
|
2019-01-24 14:06:34 +01:00
|
|
|
extern uint32_t zebra_nhg_hash_key(const void *arg);
|
2019-02-15 19:18:48 +01:00
|
|
|
extern uint32_t zebra_nhg_id_key(const void *arg);
|
2019-01-24 14:06:34 +01:00
|
|
|
|
|
|
|
extern bool zebra_nhg_hash_equal(const void *arg1, const void *arg2);
|
2019-02-26 00:18:07 +01:00
|
|
|
extern bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2);
|
2019-01-24 14:06:34 +01:00
|
|
|
|
2019-05-15 00:03:29 +02:00
|
|
|
/*
|
|
|
|
* Process a context off of a queue.
|
|
|
|
* Specifically this should be from
|
|
|
|
* the rib meta queue.
|
|
|
|
*/
|
|
|
|
extern int nhg_ctx_process(struct nhg_ctx *ctx);
|
2021-04-15 20:20:39 +02:00
|
|
|
void nhg_ctx_free(struct nhg_ctx **ctx);
|
2019-03-21 15:47:19 +01:00
|
|
|
|
2019-05-15 00:03:29 +02:00
|
|
|
/* Find via kernel nh creation */
|
2024-10-07 18:40:46 +02:00
|
|
|
extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp,
|
|
|
|
uint16_t count, vrf_id_t vrf_id, afi_t afi, int type, int startup,
|
2022-10-22 21:37:27 +02:00
|
|
|
struct nhg_resilience *resilience);
|
2019-08-01 20:24:35 +02:00
|
|
|
/* Del via kernel */
|
2020-01-13 22:11:46 +01:00
|
|
|
extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id);
|
2019-05-15 00:03:29 +02:00
|
|
|
|
2020-03-10 15:50:40 +01:00
|
|
|
/* Find an nhe based on a nexthop_group */
|
2020-05-05 21:57:35 +02:00
|
|
|
extern struct nhg_hash_entry *zebra_nhg_rib_find(uint32_t id,
|
|
|
|
struct nexthop_group *nhg,
|
|
|
|
afi_t rt_afi, int type);
|
2019-03-29 15:51:07 +01:00
|
|
|
|
2020-03-10 15:50:40 +01:00
|
|
|
/* Find an nhe based on a route's nhe, used during route creation */
|
|
|
|
struct nhg_hash_entry *
|
|
|
|
zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi);
|
|
|
|
|
2020-05-05 21:57:35 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Functions for Add/Del/Replace via protocol NHG creation.
|
|
|
|
*
|
|
|
|
* The NHEs will not be hashed. They will only be present in the
|
|
|
|
* ID table and therefore not sharable.
|
|
|
|
*
|
|
|
|
* It is the owning protocols job to manage these.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2020-05-20 17:23:36 +02:00
|
|
|
* Add NHE. If already exists, Replace.
|
2020-05-05 21:57:35 +02:00
|
|
|
*
|
|
|
|
* Returns allocated NHE on success, otherwise NULL.
|
|
|
|
*/
|
|
|
|
struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type,
|
2021-01-21 16:12:05 +01:00
|
|
|
uint16_t instance, uint32_t session,
|
2020-05-05 21:57:35 +02:00
|
|
|
struct nexthop_group *nhg,
|
|
|
|
afi_t afi);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Del NHE.
|
|
|
|
*
|
|
|
|
* Returns deleted NHE on success, otherwise NULL.
|
|
|
|
*
|
2020-05-20 21:47:12 +02:00
|
|
|
* Caller must decrement ref with zebra_nhg_decrement_ref() when done.
|
2020-05-05 21:57:35 +02:00
|
|
|
*/
|
2020-09-15 19:42:49 +02:00
|
|
|
struct nhg_hash_entry *zebra_nhg_proto_del(uint32_t id, int type);
|
2020-05-05 21:57:35 +02:00
|
|
|
|
2020-05-20 21:41:18 +02:00
|
|
|
/*
|
|
|
|
* Remove specific by proto NHGs.
|
|
|
|
*
|
|
|
|
* Called after client disconnect.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
unsigned long zebra_nhg_score_proto(int type);
|
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Reference counter functions */
|
|
|
|
extern void zebra_nhg_decrement_ref(struct nhg_hash_entry *nhe);
|
|
|
|
extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe);
|
2019-05-15 01:26:20 +02:00
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Check validity of nhe, if invalid will update dependents as well */
|
|
|
|
extern void zebra_nhg_check_valid(struct nhg_hash_entry *nhe);
|
|
|
|
|
|
|
|
/* Convert nhe depends to a grp context that can be passed around safely */
|
2024-10-07 18:40:46 +02:00
|
|
|
extern uint16_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, int size);
|
2019-05-15 00:27:40 +02:00
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Dataplane install/uninstall */
|
2024-08-29 17:29:55 +02:00
|
|
|
extern void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe, uint8_t type);
|
2019-10-24 00:28:10 +02:00
|
|
|
extern void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe);
|
2023-04-19 20:35:25 +02:00
|
|
|
extern void zebra_interface_nhg_reinstall(struct interface *ifp);
|
2019-03-08 00:11:57 +01:00
|
|
|
|
2019-05-14 02:10:34 +02:00
|
|
|
/* Forward ref of dplane update context type */
|
|
|
|
struct zebra_dplane_ctx;
|
2019-10-24 00:28:10 +02:00
|
|
|
extern void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx);
|
|
|
|
|
2019-08-01 20:07:04 +02:00
|
|
|
|
2020-10-22 14:02:33 +02:00
|
|
|
/* Sweep the nhg hash tables for old entries on restart */
|
2019-10-24 00:28:10 +02:00
|
|
|
extern void zebra_nhg_sweep_table(struct hash *hash);
|
|
|
|
|
2020-10-22 14:02:33 +02:00
|
|
|
/*
|
|
|
|
* We are shutting down but the nexthops should be kept
|
|
|
|
* as that -r has been specified and we don't want to delete
|
|
|
|
* the routes unintentionally
|
|
|
|
*/
|
|
|
|
extern void zebra_nhg_mark_keep(void);
|
|
|
|
|
2019-10-24 00:28:10 +02:00
|
|
|
/* Nexthop resolution processing */
|
2019-11-21 21:05:52 +01:00
|
|
|
struct route_entry; /* Forward ref to avoid circular includes */
|
zebra: fix table heap-after-free crash
Fix a heap-after-free that causes zebra to crash even without
address-sanitizer. To reproduce:
> echo "100 my_table" | tee -a /etc/iproute2/rt_tables
> ip route add blackhole default table 100
> ip route show table 100
> ip l add red type vrf table 100
> ip l del red
> ip route del blackhole default table 100
Zebra manages routing tables for all existing Linux RT tables,
regardless of whether they are assigned to a VRF interface. When a table
is not assigned to any VRF, zebra arbitrarily assigns it to the default
VRF, even though this is not strictly accurate (the code expects this
behavior).
When an RT table is created after a VRF, zebra correctly assigns the
table to the VRF. However, if a VRF interface is assigned to an existing
RT table, zebra does not update the table owner, which remains as the
default VRF. As a result, existing routing entries remain under the
default VRF, while new entries are correctly assigned to the VRF. The
VRF mismatch is unexpected in the code and creates crashes and memory
related issues.
Furthermore, Linux does not automatically delete RT tables when they are
unassigned from a VRF. It is incorrect to delete these tables from zebra.
Instead, at VRF disabling, do not release the table but reassign it to
the default VRF. At VRF enabling, change the table owner back to the
appropriate VRF.
> ==2866266==ERROR: AddressSanitizer: heap-use-after-free on address 0x606000154f54 at pc 0x7fa32474b83f bp 0x7ffe94f67d90 sp 0x7ffe94f67d88
> READ of size 1 at 0x606000154f54 thread T0
> #0 0x7fa32474b83e in rn_hash_node_const_find lib/table.c:28
> #1 0x7fa32474bab1 in rn_hash_node_find lib/table.c:28
> #2 0x7fa32474d783 in route_node_get lib/table.c:283
> #3 0x7fa3247328dd in srcdest_rnode_get lib/srcdest_table.c:231
> #4 0x55b0e4fa8da4 in rib_find_rn_from_ctx zebra/zebra_rib.c:1957
> #5 0x55b0e4fa8e31 in rib_process_result zebra/zebra_rib.c:1988
> #6 0x55b0e4fb9d64 in rib_process_dplane_results zebra/zebra_rib.c:4894
> #7 0x7fa32476689c in event_call lib/event.c:1996
> #8 0x7fa32463b7b2 in frr_run lib/libfrr.c:1232
> #9 0x55b0e4e6c32a in main zebra/main.c:526
> #10 0x7fa32424fd09 in __libc_start_main ../csu/libc-start.c:308
> #11 0x55b0e4e2d649 in _start (/usr/lib/frr/zebra+0x1a1649)
>
> 0x606000154f54 is located 20 bytes inside of 56-byte region [0x606000154f40,0x606000154f78)
> freed by thread T0 here:
> #0 0x7fa324ca9b6f in __interceptor_free ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:123
> #1 0x7fa324668d8f in qfree lib/memory.c:130
> #2 0x7fa32474c421 in route_table_free lib/table.c:126
> #3 0x7fa32474bf96 in route_table_finish lib/table.c:46
> #4 0x55b0e4fbca3a in zebra_router_free_table zebra/zebra_router.c:191
> #5 0x55b0e4fbccea in zebra_router_release_table zebra/zebra_router.c:214
> #6 0x55b0e4fd428e in zebra_vrf_disable zebra/zebra_vrf.c:219
> #7 0x7fa32476fabf in vrf_disable lib/vrf.c:326
> #8 0x7fa32476f5d4 in vrf_delete lib/vrf.c:231
> #9 0x55b0e4e4ad36 in interface_vrf_change zebra/interface.c:1478
> #10 0x55b0e4e4d5d2 in zebra_if_dplane_ifp_handling zebra/interface.c:1949
> #11 0x55b0e4e4fb89 in zebra_if_dplane_result zebra/interface.c:2268
> #12 0x55b0e4fb9f26 in rib_process_dplane_results zebra/zebra_rib.c:4954
> #13 0x7fa32476689c in event_call lib/event.c:1996
> #14 0x7fa32463b7b2 in frr_run lib/libfrr.c:1232
> #15 0x55b0e4e6c32a in main zebra/main.c:526
> #16 0x7fa32424fd09 in __libc_start_main ../csu/libc-start.c:308
>
> previously allocated by thread T0 here:
> #0 0x7fa324caa037 in __interceptor_calloc ../../../../src/libsanitizer/asan/asan_malloc_linux.cpp:154
> #1 0x7fa324668c4d in qcalloc lib/memory.c:105
> #2 0x7fa32474bf33 in route_table_init_with_delegate lib/table.c:38
> #3 0x7fa32474e73c in route_table_init lib/table.c:512
> #4 0x55b0e4fbc353 in zebra_router_get_table zebra/zebra_router.c:137
> #5 0x55b0e4fd4da0 in zebra_vrf_table_create zebra/zebra_vrf.c:358
> #6 0x55b0e4fd3d30 in zebra_vrf_enable zebra/zebra_vrf.c:140
> #7 0x7fa32476f9b2 in vrf_enable lib/vrf.c:286
> #8 0x55b0e4e4af76 in interface_vrf_change zebra/interface.c:1533
> #9 0x55b0e4e4d612 in zebra_if_dplane_ifp_handling zebra/interface.c:1968
> #10 0x55b0e4e4fb89 in zebra_if_dplane_result zebra/interface.c:2268
> #11 0x55b0e4fb9f26 in rib_process_dplane_results zebra/zebra_rib.c:4954
> #12 0x7fa32476689c in event_call lib/event.c:1996
> #13 0x7fa32463b7b2 in frr_run lib/libfrr.c:1232
> #14 0x55b0e4e6c32a in main zebra/main.c:526
> #15 0x7fa32424fd09 in __libc_start_main ../csu/libc-start.c:308
Fixes: d8612e6 ("zebra: Track tables allocated by vrf and cleanup")
Signed-off-by: Louis Scalbert <louis.scalbert@6wind.com>
2024-11-20 16:33:55 +01:00
|
|
|
extern void nexthop_vrf_update(struct route_node *rn, struct route_entry *re, vrf_id_t vrf_id);
|
zebra: Attempt to reuse NHG after interface up and route reinstall
The previous commit modified zebra to reinstall the singleton
nexthops for a nexthop group when a interface event comes up.
Now let's modify zebra to attempt to reuse the nexthop group
when this happens and the upper level protocol resends the
route down with that. Only match if the protocol is the same
as well as the instance and the nexthop groups would match.
Here is the new behavior:
eva(config)# do show ip route 9.9.9.9/32
Routing entry for 9.9.9.9/32
Known via "static", distance 1, metric 0, best
Last update 00:00:08 ago
* 192.168.99.33, via dummy1, weight 1
* 192.168.100.33, via dummy2, weight 1
* 192.168.101.33, via dummy3, weight 1
* 192.168.102.33, via dummy4, weight 1
eva(config)# do show ip route nexthop-group 9.9.9.9/32
% Unknown command: do show ip route nexthop-group 9.9.9.9/32
eva(config)# do show ip route 9.9.9.9/32 nexthop-group
Routing entry for 9.9.9.9/32
Known via "static", distance 1, metric 0, best
Last update 00:00:54 ago
Nexthop Group ID: 57
* 192.168.99.33, via dummy1, weight 1
* 192.168.100.33, via dummy2, weight 1
* 192.168.101.33, via dummy3, weight 1
* 192.168.102.33, via dummy4, weight 1
eva(config)# exit
eva# conf
eva(config)# int dummy3
eva(config-if)# shut
eva(config-if)# no shut
eva(config-if)# do show ip route 9.9.9.9/32 nexthop-group
Routing entry for 9.9.9.9/32
Known via "static", distance 1, metric 0, best
Last update 00:00:08 ago
Nexthop Group ID: 57
* 192.168.99.33, via dummy1, weight 1
* 192.168.100.33, via dummy2, weight 1
* 192.168.101.33, via dummy3, weight 1
* 192.168.102.33, via dummy4, weight 1
eva(config-if)# exit
eva(config)# exit
eva# exit
sharpd@eva ~/frr1 (master) [255]> ip nexthop show id 57
id 57 group 37/43/50/58 proto zebra
sharpd@eva ~/frr1 (master)> ip route show 9.9.9.9/32
9.9.9.9 nhid 57 proto 196 metric 20
nexthop via 192.168.99.33 dev dummy1 weight 1
nexthop via 192.168.100.33 dev dummy2 weight 1
nexthop via 192.168.101.33 dev dummy3 weight 1
nexthop via 192.168.102.33 dev dummy4 weight 1
sharpd@eva ~/frr1 (master)>
Notice that we now no longer are creating a bunch of new
nexthop groups.
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
2024-09-11 20:24:27 +02:00
|
|
|
extern int nexthop_active_update(struct route_node *rn, struct route_entry *re,
|
|
|
|
struct route_entry *old_re);
|
2020-01-28 17:00:42 +01:00
|
|
|
|
2025-04-23 18:00:55 +02:00
|
|
|
extern const char *zebra_nhg_afi2str(struct nhg_hash_entry *nhe);
|
|
|
|
|
2022-06-14 20:45:01 +02:00
|
|
|
#ifdef _FRR_ATTRIBUTE_PRINTFRR
|
|
|
|
#pragma FRR printfrr_ext "%pNG" (const struct nhg_hash_entry *)
|
|
|
|
#endif
|
|
|
|
|
2020-04-21 01:53:19 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-01-28 17:00:42 +01:00
|
|
|
#endif /* __ZEBRA_NHG_H__ */
|