2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2018-06-27 16:29:02 +02:00
|
|
|
/*********************************************************************
|
|
|
|
* Copyright 2013 Cumulus Networks, LLC. All rights reserved.
|
|
|
|
* Copyright 2014,2015,2016,2017 Cumulus Networks, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* bfd.c: implements the BFD protocol.
|
|
|
|
*
|
|
|
|
* Authors
|
|
|
|
* -------
|
|
|
|
* Shrijeet Mukherjee [shm@cumulusnetworks.com]
|
|
|
|
* Kanna Rajagopal [kanna@cumulusnetworks.com]
|
|
|
|
* Radhika Mahankali [Radhika@cumulusnetworks.com]
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
|
|
|
|
|
|
|
#include "lib/jhash.h"
|
2020-04-17 15:35:15 +02:00
|
|
|
#include "lib/network.h"
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
#include "bfd.h"
|
|
|
|
|
2019-06-21 08:04:57 +02:00
|
|
|
DEFINE_MTYPE_STATIC(BFDD, BFDD_CONFIG, "long-lived configuration memory");
|
2020-05-15 22:38:04 +02:00
|
|
|
DEFINE_MTYPE_STATIC(BFDD, BFDD_PROFILE, "long-lived profile memory");
|
2019-06-21 08:04:57 +02:00
|
|
|
DEFINE_MTYPE_STATIC(BFDD, BFDD_SESSION_OBSERVER, "Session observer");
|
|
|
|
DEFINE_MTYPE_STATIC(BFDD, BFDD_VRF, "BFD VRF");
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prototypes
|
|
|
|
*/
|
|
|
|
static uint32_t ptm_bfd_gen_ID(void);
|
|
|
|
static void ptm_bfd_echo_xmt_TO(struct bfd_session *bfd);
|
|
|
|
static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
|
|
|
|
uint32_t ldisc);
|
|
|
|
static int bfd_session_update(struct bfd_session *bs, struct bfd_peer_cfg *bpc);
|
|
|
|
static const char *get_diag_str(int diag);
|
|
|
|
|
2019-02-05 01:05:39 +01:00
|
|
|
static void bs_admin_down_handler(struct bfd_session *bs, int nstate);
|
|
|
|
static void bs_down_handler(struct bfd_session *bs, int nstate);
|
|
|
|
static void bs_init_handler(struct bfd_session *bs, int nstate);
|
|
|
|
static void bs_up_handler(struct bfd_session *bs, int nstate);
|
|
|
|
|
2020-05-20 20:09:12 +02:00
|
|
|
/**
|
|
|
|
* Remove BFD profile from all BFD sessions so we don't leave dangling
|
|
|
|
* pointers.
|
|
|
|
*/
|
|
|
|
static void bfd_profile_detach(struct bfd_profile *bp);
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
/* Zeroed array with the size of an IPv6 address. */
|
|
|
|
struct in6_addr zero_addr;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/** BFD profiles list. */
|
|
|
|
struct bfdproflist bplist;
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
/*
|
|
|
|
* Functions
|
|
|
|
*/
|
2020-05-15 22:38:04 +02:00
|
|
|
struct bfd_profile *bfd_profile_lookup(const char *name)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH (bp, &bplist, entry) {
|
|
|
|
if (strcmp(name, bp->name))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return bp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bfd_profile_set_default(struct bfd_profile *bp)
|
|
|
|
{
|
2021-03-10 20:11:19 +01:00
|
|
|
bp->admin_shutdown = false;
|
2020-05-15 22:38:04 +02:00
|
|
|
bp->detection_multiplier = BFD_DEFDETECTMULT;
|
|
|
|
bp->echo_mode = false;
|
2020-08-06 21:25:44 +02:00
|
|
|
bp->passive = false;
|
2020-08-11 19:43:56 +02:00
|
|
|
bp->minimum_ttl = BFD_DEF_MHOP_TTL;
|
2021-03-10 15:31:57 +01:00
|
|
|
bp->min_echo_rx = BFD_DEF_REQ_MIN_ECHO_RX;
|
|
|
|
bp->min_echo_tx = BFD_DEF_DES_MIN_ECHO_TX;
|
2020-05-15 22:38:04 +02:00
|
|
|
bp->min_rx = BFD_DEFREQUIREDMINRX;
|
|
|
|
bp->min_tx = BFD_DEFDESIREDMINTX;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bfd_profile *bfd_profile_new(const char *name)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp;
|
|
|
|
|
|
|
|
/* Search for duplicates. */
|
|
|
|
if (bfd_profile_lookup(name) != NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Allocate, name it and put into list. */
|
|
|
|
bp = XCALLOC(MTYPE_BFDD_PROFILE, sizeof(*bp));
|
|
|
|
strlcpy(bp->name, name, sizeof(bp->name));
|
|
|
|
TAILQ_INSERT_TAIL(&bplist, bp, entry);
|
|
|
|
|
|
|
|
/* Set default values. */
|
|
|
|
bfd_profile_set_default(bp);
|
|
|
|
|
|
|
|
return bp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_profile_free(struct bfd_profile *bp)
|
|
|
|
{
|
2020-05-20 20:09:12 +02:00
|
|
|
/* Detach from any session. */
|
2020-05-29 22:44:54 +02:00
|
|
|
if (bglobal.bg_shutdown == false)
|
|
|
|
bfd_profile_detach(bp);
|
2020-05-20 20:09:12 +02:00
|
|
|
|
|
|
|
/* Remove from global list. */
|
2020-05-15 22:38:04 +02:00
|
|
|
TAILQ_REMOVE(&bplist, bp, entry);
|
2020-07-03 17:43:00 +02:00
|
|
|
|
|
|
|
XFREE(MTYPE_BFDD_PROFILE, bp);
|
2020-05-15 22:38:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_profile_apply(const char *profname, struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp;
|
|
|
|
|
|
|
|
/* Remove previous profile if any. */
|
|
|
|
if (bs->profile_name) {
|
|
|
|
/* We are changing profiles. */
|
|
|
|
if (strcmp(bs->profile_name, profname)) {
|
|
|
|
XFREE(MTYPE_BFDD_PROFILE, bs->profile_name);
|
|
|
|
bs->profile_name =
|
|
|
|
XSTRDUP(MTYPE_BFDD_PROFILE, profname);
|
|
|
|
}
|
|
|
|
} else /* Save the current profile name (in case it doesn't exist). */
|
|
|
|
bs->profile_name = XSTRDUP(MTYPE_BFDD_PROFILE, profname);
|
|
|
|
|
|
|
|
/* Look up new profile to apply. */
|
|
|
|
bp = bfd_profile_lookup(profname);
|
|
|
|
|
|
|
|
/* Point to profile if it exists. */
|
|
|
|
bs->profile = bp;
|
|
|
|
|
2020-08-10 22:17:16 +02:00
|
|
|
/* Apply configuration. */
|
|
|
|
bfd_session_apply(bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_session_apply(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp;
|
|
|
|
uint32_t min_tx = bs->timers.desired_min_tx;
|
|
|
|
uint32_t min_rx = bs->timers.required_min_rx;
|
|
|
|
|
|
|
|
/* Pick the source of configuration. */
|
|
|
|
bp = bs->profile ? bs->profile : &bs->peer_profile;
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/* Set multiplier if not the default. */
|
|
|
|
if (bs->peer_profile.detection_multiplier == BFD_DEFDETECTMULT)
|
|
|
|
bs->detect_mult = bp->detection_multiplier;
|
|
|
|
else
|
|
|
|
bs->detect_mult = bs->peer_profile.detection_multiplier;
|
|
|
|
|
|
|
|
/* Set timers if not the default. */
|
|
|
|
if (bs->peer_profile.min_tx == BFD_DEFDESIREDMINTX)
|
|
|
|
bs->timers.desired_min_tx = bp->min_tx;
|
|
|
|
else
|
|
|
|
bs->timers.desired_min_tx = bs->peer_profile.min_tx;
|
|
|
|
|
|
|
|
if (bs->peer_profile.min_rx == BFD_DEFREQUIREDMINRX)
|
|
|
|
bs->timers.required_min_rx = bp->min_rx;
|
|
|
|
else
|
|
|
|
bs->timers.required_min_rx = bs->peer_profile.min_rx;
|
|
|
|
|
|
|
|
/* We can only apply echo options on single hop sessions. */
|
|
|
|
if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) {
|
2021-03-10 15:31:57 +01:00
|
|
|
/* Configure echo timers if they were default. */
|
|
|
|
if (bs->peer_profile.min_echo_rx == BFD_DEF_REQ_MIN_ECHO_RX)
|
|
|
|
bs->timers.required_min_echo_rx = bp->min_echo_rx;
|
2020-05-15 22:38:04 +02:00
|
|
|
else
|
2021-03-10 15:31:57 +01:00
|
|
|
bs->timers.required_min_echo_rx =
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.min_echo_rx;
|
|
|
|
|
2021-03-10 15:31:57 +01:00
|
|
|
if (bs->peer_profile.min_echo_tx == BFD_DEF_DES_MIN_ECHO_TX)
|
|
|
|
bs->timers.desired_min_echo_tx = bp->min_echo_tx;
|
|
|
|
else
|
|
|
|
bs->timers.desired_min_echo_tx =
|
|
|
|
bs->peer_profile.min_echo_tx;
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/* Toggle echo if default value. */
|
|
|
|
if (bs->peer_profile.echo_mode == false)
|
|
|
|
bfd_set_echo(bs, bp->echo_mode);
|
|
|
|
else
|
|
|
|
bfd_set_echo(bs, bs->peer_profile.echo_mode);
|
2020-08-11 19:43:56 +02:00
|
|
|
} else {
|
|
|
|
/* Configure the TTL packet filter. */
|
|
|
|
if (bs->peer_profile.minimum_ttl == BFD_DEF_MHOP_TTL)
|
|
|
|
bs->mh_ttl = bp->minimum_ttl;
|
|
|
|
else
|
|
|
|
bs->mh_ttl = bs->peer_profile.minimum_ttl;
|
2020-05-15 22:38:04 +02:00
|
|
|
}
|
|
|
|
|
2020-08-06 21:25:44 +02:00
|
|
|
/* Toggle 'passive-mode' if default value. */
|
|
|
|
if (bs->peer_profile.passive == false)
|
|
|
|
bfd_set_passive_mode(bs, bp->passive);
|
|
|
|
else
|
|
|
|
bfd_set_passive_mode(bs, bs->peer_profile.passive);
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/* Toggle 'no shutdown' if default value. */
|
2021-03-10 20:11:19 +01:00
|
|
|
if (bs->peer_profile.admin_shutdown == false)
|
2020-05-15 22:38:04 +02:00
|
|
|
bfd_set_shutdown(bs, bp->admin_shutdown);
|
|
|
|
else
|
|
|
|
bfd_set_shutdown(bs, bs->peer_profile.admin_shutdown);
|
2020-08-10 22:17:16 +02:00
|
|
|
|
|
|
|
/* If session interval changed negotiate new timers. */
|
|
|
|
if (bs->ses_state == PTM_BFD_UP
|
|
|
|
&& (bs->timers.desired_min_tx != min_tx
|
|
|
|
|| bs->timers.required_min_rx != min_rx))
|
|
|
|
bfd_set_polling(bs);
|
2020-08-18 18:06:48 +02:00
|
|
|
|
|
|
|
/* Send updated information to data plane. */
|
|
|
|
bfd_dplane_update_session(bs);
|
2020-05-15 22:38:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_profile_remove(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
/* Remove any previous set profile name. */
|
|
|
|
XFREE(MTYPE_BFDD_PROFILE, bs->profile_name);
|
2020-08-10 22:17:16 +02:00
|
|
|
bs->profile = NULL;
|
2020-05-15 22:38:04 +02:00
|
|
|
|
2020-08-10 22:17:16 +02:00
|
|
|
bfd_session_apply(bs);
|
2020-05-15 22:38:04 +02:00
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
void gen_bfd_key(struct bfd_key *key, struct sockaddr_any *peer,
|
|
|
|
struct sockaddr_any *local, bool mhop, const char *ifname,
|
|
|
|
const char *vrfname)
|
2019-02-02 12:57:08 +01:00
|
|
|
{
|
2019-03-11 19:09:15 +01:00
|
|
|
memset(key, 0, sizeof(*key));
|
2019-02-02 12:57:08 +01:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
switch (peer->sa_sin.sin_family) {
|
|
|
|
case AF_INET:
|
|
|
|
key->family = AF_INET;
|
|
|
|
memcpy(&key->peer, &peer->sa_sin.sin_addr,
|
|
|
|
sizeof(peer->sa_sin.sin_addr));
|
|
|
|
memcpy(&key->local, &local->sa_sin.sin_addr,
|
|
|
|
sizeof(local->sa_sin.sin_addr));
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
key->family = AF_INET6;
|
|
|
|
memcpy(&key->peer, &peer->sa_sin6.sin6_addr,
|
|
|
|
sizeof(peer->sa_sin6.sin6_addr));
|
|
|
|
memcpy(&key->local, &local->sa_sin6.sin6_addr,
|
|
|
|
sizeof(local->sa_sin6.sin6_addr));
|
2019-02-02 12:57:08 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
key->mhop = mhop;
|
|
|
|
if (ifname && ifname[0])
|
|
|
|
strlcpy(key->ifname, ifname, sizeof(key->ifname));
|
|
|
|
if (vrfname && vrfname[0])
|
|
|
|
strlcpy(key->vrfname, vrfname, sizeof(key->vrfname));
|
2019-05-23 21:09:24 +02:00
|
|
|
else
|
|
|
|
strlcpy(key->vrfname, VRF_DEFAULT_NAME, sizeof(key->vrfname));
|
2019-02-02 12:57:08 +01:00
|
|
|
}
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
struct bfd_session *bs_peer_find(struct bfd_peer_cfg *bpc)
|
|
|
|
{
|
|
|
|
struct bfd_session *bs;
|
|
|
|
struct peer_label *pl;
|
2019-03-11 19:09:15 +01:00
|
|
|
struct bfd_key key;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/* Try to find label first. */
|
|
|
|
if (bpc->bpc_has_label) {
|
|
|
|
pl = pl_find(bpc->bpc_label);
|
|
|
|
if (pl != NULL) {
|
|
|
|
bs = pl->pl_bs;
|
|
|
|
return bs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise fallback to peer/local hash lookup. */
|
2019-03-11 19:09:15 +01:00
|
|
|
gen_bfd_key(&key, &bpc->bpc_peer, &bpc->bpc_local, bpc->bpc_mhop,
|
|
|
|
bpc->bpc_localif, bpc->bpc_vrfname);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
return bfd_key_lookup(key);
|
2019-02-02 12:57:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Starts a disabled BFD session.
|
|
|
|
*
|
|
|
|
* A session is disabled when the specified interface/VRF doesn't exist
|
|
|
|
* yet. It might happen on FRR boot or with virtual interfaces.
|
|
|
|
*/
|
|
|
|
int bfd_session_enable(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
|
|
|
struct vrf *vrf = NULL;
|
|
|
|
int psock;
|
|
|
|
|
2020-08-18 18:06:48 +02:00
|
|
|
/* We are using data plane, we don't need software. */
|
|
|
|
if (bs->bdc)
|
|
|
|
return 0;
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/*
|
|
|
|
* If the interface or VRF doesn't exist, then we must register
|
|
|
|
* the session but delay its start.
|
|
|
|
*/
|
2019-03-25 17:20:35 +01:00
|
|
|
if (bs->key.vrfname[0]) {
|
|
|
|
vrf = vrf_lookup_by_name(bs->key.vrfname);
|
2019-02-02 12:57:08 +01:00
|
|
|
if (vrf == NULL) {
|
2020-04-10 16:06:22 +02:00
|
|
|
zlog_err(
|
2021-10-14 20:06:38 +02:00
|
|
|
"session-enable: specified VRF %s doesn't exists.",
|
|
|
|
bs->key.vrfname);
|
2019-02-02 12:57:08 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2021-10-14 20:06:38 +02:00
|
|
|
} else {
|
|
|
|
vrf = vrf_lookup_by_id(VRF_DEFAULT);
|
2019-02-02 12:57:08 +01:00
|
|
|
}
|
|
|
|
|
2021-10-14 20:06:38 +02:00
|
|
|
assert(vrf);
|
2021-01-08 10:34:20 +01:00
|
|
|
|
2019-03-25 17:20:35 +01:00
|
|
|
if (bs->key.ifname[0]) {
|
2021-10-14 20:06:38 +02:00
|
|
|
ifp = if_lookup_by_name(bs->key.ifname, vrf->vrf_id);
|
2019-03-25 17:20:35 +01:00
|
|
|
if (ifp == NULL) {
|
2020-04-10 16:06:22 +02:00
|
|
|
zlog_err(
|
2021-01-08 16:17:28 +01:00
|
|
|
"session-enable: specified interface %s (VRF %s) doesn't exist.",
|
2021-10-14 20:06:38 +02:00
|
|
|
bs->key.ifname, vrf->name);
|
2019-02-02 12:57:08 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assign interface/VRF pointers. */
|
|
|
|
bs->vrf = vrf;
|
|
|
|
|
2020-08-18 18:06:48 +02:00
|
|
|
/* Assign interface pointer (if any). */
|
|
|
|
bs->ifp = ifp;
|
|
|
|
|
|
|
|
/* Attempt to use data plane. */
|
|
|
|
if (bglobal.bg_use_dplane && bfd_dplane_add_session(bs) == 0) {
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_ADD, bs);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-02-02 12:57:08 +01:00
|
|
|
|
2019-03-12 01:26:13 +01:00
|
|
|
/* Sanity check: don't leak open sockets. */
|
|
|
|
if (bs->sock != -1) {
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
2022-09-05 10:39:35 +02:00
|
|
|
zlog_debug("%s: previous socket open", __func__);
|
2020-04-13 12:36:23 +02:00
|
|
|
|
2019-03-12 01:26:13 +01:00
|
|
|
close(bs->sock);
|
|
|
|
bs->sock = -1;
|
|
|
|
}
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/*
|
|
|
|
* Get socket for transmitting control packets. Note that if we
|
|
|
|
* could use the destination port (3784) for the source
|
|
|
|
* port we wouldn't need a socket per session.
|
|
|
|
*/
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_IPV6) == 0) {
|
2019-02-02 12:57:08 +01:00
|
|
|
psock = bp_peer_socket(bs);
|
|
|
|
if (psock == -1)
|
2019-03-12 01:26:13 +01:00
|
|
|
return 0;
|
2019-02-02 12:57:08 +01:00
|
|
|
} else {
|
|
|
|
psock = bp_peer_socketv6(bs);
|
|
|
|
if (psock == -1)
|
2019-03-12 01:26:13 +01:00
|
|
|
return 0;
|
2019-02-02 12:57:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We've got a valid socket, lets start the timers and the
|
|
|
|
* protocol.
|
|
|
|
*/
|
|
|
|
bs->sock = psock;
|
2020-08-06 21:25:44 +02:00
|
|
|
|
|
|
|
/* Only start timers if we are using active mode. */
|
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE) == 0) {
|
|
|
|
bfd_recvtimer_update(bs);
|
|
|
|
ptm_bfd_start_xmt_timer(bs, false);
|
|
|
|
}
|
2019-02-02 12:57:08 +01:00
|
|
|
|
2022-07-26 01:02:46 +02:00
|
|
|
/* initialize RTT */
|
|
|
|
bfd_rtt_init(bs);
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disabled a running BFD session.
|
|
|
|
*
|
|
|
|
* A session is disabled when the specified interface/VRF gets removed
|
|
|
|
* (e.g. virtual interfaces).
|
|
|
|
*/
|
|
|
|
void bfd_session_disable(struct bfd_session *bs)
|
|
|
|
{
|
2020-08-18 18:06:48 +02:00
|
|
|
/* We are using data plane, we don't need software. */
|
|
|
|
if (bs->bdc)
|
|
|
|
return;
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/* Free up socket resources. */
|
|
|
|
if (bs->sock != -1) {
|
|
|
|
close(bs->sock);
|
|
|
|
bs->sock = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable all timers. */
|
|
|
|
bfd_recvtimer_delete(bs);
|
|
|
|
bfd_xmttimer_delete(bs);
|
2019-10-11 18:12:26 +02:00
|
|
|
ptm_bfd_echo_stop(bs);
|
|
|
|
|
|
|
|
/* Set session down so it doesn't report UP and disabled. */
|
|
|
|
ptm_bfd_sess_dn(bs, BD_PATH_DOWN);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t ptm_bfd_gen_ID(void)
|
|
|
|
{
|
2019-02-01 12:50:06 +01:00
|
|
|
uint32_t session_id;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-01 12:50:06 +01:00
|
|
|
/*
|
|
|
|
* RFC 5880, Section 6.8.1. recommends that we should generate
|
|
|
|
* random session identification numbers.
|
|
|
|
*/
|
|
|
|
do {
|
2020-04-17 15:35:15 +02:00
|
|
|
session_id = ((frr_weak_random() << 16) & 0xFFFF0000)
|
|
|
|
| (frr_weak_random() & 0x0000FFFF);
|
2019-02-01 12:50:06 +01:00
|
|
|
} while (session_id == 0 || bfd_id_lookup(session_id) != NULL);
|
|
|
|
|
|
|
|
return session_id;
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void ptm_bfd_start_xmt_timer(struct bfd_session *bfd, bool is_echo)
|
|
|
|
{
|
|
|
|
uint64_t jitter, xmt_TO;
|
|
|
|
int maxpercent;
|
|
|
|
|
|
|
|
xmt_TO = is_echo ? bfd->echo_xmt_TO : bfd->xmt_TO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From section 6.5.2: trasmit interval should be randomly jittered
|
|
|
|
* between
|
|
|
|
* 75% and 100% of nominal value, unless detect_mult is 1, then should
|
|
|
|
* be
|
|
|
|
* between 75% and 90%.
|
|
|
|
*/
|
|
|
|
maxpercent = (bfd->detect_mult == 1) ? 16 : 26;
|
2020-04-17 15:35:15 +02:00
|
|
|
jitter = (xmt_TO * (75 + (frr_weak_random() % maxpercent))) / 100;
|
2018-06-27 16:29:02 +02:00
|
|
|
/* XXX remove that division above */
|
|
|
|
|
|
|
|
if (is_echo)
|
|
|
|
bfd_echo_xmttimer_update(bfd, jitter);
|
|
|
|
else
|
|
|
|
bfd_xmttimer_update(bfd, jitter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ptm_bfd_echo_xmt_TO(struct bfd_session *bfd)
|
|
|
|
{
|
|
|
|
/* Send the scheduled echo packet */
|
2022-06-07 01:40:17 +02:00
|
|
|
/* if ipv4 use the new echo implementation that causes
|
|
|
|
* the packet to be looped in forwarding plane of peer
|
|
|
|
*/
|
|
|
|
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6) == 0)
|
|
|
|
#ifdef BFD_LINUX
|
|
|
|
ptm_bfd_echo_fp_snd(bfd);
|
|
|
|
#else
|
|
|
|
ptm_bfd_echo_snd(bfd);
|
|
|
|
#endif
|
|
|
|
else
|
|
|
|
ptm_bfd_echo_snd(bfd);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/* Restart the timer for next time */
|
|
|
|
ptm_bfd_start_xmt_timer(bfd, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ptm_bfd_xmt_TO(struct bfd_session *bfd, int fbit)
|
|
|
|
{
|
|
|
|
/* Send the scheduled control packet */
|
|
|
|
ptm_bfd_snd(bfd, fbit);
|
|
|
|
|
|
|
|
/* Restart the timer for next time */
|
|
|
|
ptm_bfd_start_xmt_timer(bfd, false);
|
|
|
|
}
|
|
|
|
|
2019-01-30 18:49:11 +01:00
|
|
|
void ptm_bfd_echo_stop(struct bfd_session *bfd)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
bfd->echo_xmt_TO = 0;
|
|
|
|
bfd->echo_detect_TO = 0;
|
2020-04-09 21:52:49 +02:00
|
|
|
UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
bfd_echo_xmttimer_delete(bfd);
|
|
|
|
bfd_echo_recvtimer_delete(bfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ptm_bfd_echo_start(struct bfd_session *bfd)
|
|
|
|
{
|
|
|
|
bfd->echo_detect_TO = (bfd->remote_detect_mult * bfd->echo_xmt_TO);
|
2021-03-09 23:17:47 +01:00
|
|
|
if (bfd->echo_detect_TO > 0) {
|
|
|
|
bfd_echo_recvtimer_update(bfd);
|
2018-09-13 15:09:03 +02:00
|
|
|
ptm_bfd_echo_xmt_TO(bfd);
|
2021-03-09 23:17:47 +01:00
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-04-11 10:25:55 +02:00
|
|
|
void ptm_bfd_sess_up(struct bfd_session *bfd)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2018-07-25 05:03:47 +02:00
|
|
|
int old_state = bfd->ses_state;
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
bfd->local_diag = 0;
|
|
|
|
bfd->ses_state = PTM_BFD_UP;
|
|
|
|
monotime(&bfd->uptime);
|
|
|
|
|
2019-01-29 20:33:16 +01:00
|
|
|
/* Connection is up, lets negotiate timers. */
|
|
|
|
bfd_set_polling(bfd);
|
|
|
|
|
|
|
|
/* Start sending control packets with poll bit immediately. */
|
|
|
|
ptm_bfd_snd(bfd, 0);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-10-21 07:53:01 +02:00
|
|
|
control_notify(bfd, bfd->ses_state);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2018-08-13 22:47:21 +02:00
|
|
|
if (old_state != bfd->ses_state) {
|
|
|
|
bfd->stats.session_up++;
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: [%s] %s -> %s",
|
|
|
|
bs_to_string(bfd), state_list[old_state].str,
|
|
|
|
state_list[bfd->ses_state].str);
|
2018-08-13 22:47:21 +02:00
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-04-11 10:25:55 +02:00
|
|
|
void ptm_bfd_sess_dn(struct bfd_session *bfd, uint8_t diag)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
int old_state = bfd->ses_state;
|
|
|
|
|
|
|
|
bfd->local_diag = diag;
|
|
|
|
bfd->discrs.remote_discr = 0;
|
|
|
|
bfd->ses_state = PTM_BFD_DOWN;
|
|
|
|
bfd->polling = 0;
|
|
|
|
bfd->demand_mode = 0;
|
|
|
|
monotime(&bfd->downtime);
|
|
|
|
|
2019-10-11 18:12:26 +02:00
|
|
|
/*
|
|
|
|
* Only attempt to send if we have a valid socket:
|
|
|
|
* this function might be called by session disablers and in
|
|
|
|
* this case we won't have a valid socket (i.e. interface was
|
|
|
|
* removed or VRF doesn't exist anymore).
|
|
|
|
*/
|
|
|
|
if (bfd->sock != -1)
|
|
|
|
ptm_bfd_snd(bfd, 0);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-01-31 21:36:44 +01:00
|
|
|
/* Slow down the control packets, the connection is down. */
|
|
|
|
bs_set_slow_timers(bfd);
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
/* only signal clients when going from up->down state */
|
|
|
|
if (old_state == PTM_BFD_UP)
|
2019-10-21 07:53:01 +02:00
|
|
|
control_notify(bfd, PTM_BFD_DOWN);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/* Stop echo packet transmission if they are active */
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_ECHO_ACTIVE))
|
2019-01-30 18:49:11 +01:00
|
|
|
ptm_bfd_echo_stop(bfd);
|
2018-07-25 05:03:47 +02:00
|
|
|
|
2020-08-06 21:25:44 +02:00
|
|
|
/* Stop attempting to transmit or expect control packets if passive. */
|
|
|
|
if (CHECK_FLAG(bfd->flags, BFD_SESS_FLAG_PASSIVE)) {
|
|
|
|
bfd_recvtimer_delete(bfd);
|
|
|
|
bfd_xmttimer_delete(bfd);
|
|
|
|
}
|
|
|
|
|
2018-08-13 22:47:21 +02:00
|
|
|
if (old_state != bfd->ses_state) {
|
|
|
|
bfd->stats.session_down++;
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: [%s] %s -> %s reason:%s",
|
|
|
|
bs_to_string(bfd), state_list[old_state].str,
|
|
|
|
state_list[bfd->ses_state].str,
|
|
|
|
get_diag_str(bfd->local_diag));
|
2018-08-13 22:47:21 +02:00
|
|
|
}
|
2022-06-07 01:40:17 +02:00
|
|
|
|
|
|
|
/* clear peer's mac address */
|
|
|
|
UNSET_FLAG(bfd->flags, BFD_SESS_FLAG_MAC_SET);
|
|
|
|
memset(bfd->peer_hw_addr, 0, sizeof(bfd->peer_hw_addr));
|
|
|
|
/* reset local address ,it might has been be changed after bfd is up*/
|
|
|
|
memset(&bfd->local_address, 0, sizeof(bfd->local_address));
|
2022-07-26 01:02:46 +02:00
|
|
|
|
|
|
|
/* reset RTT */
|
|
|
|
bfd_rtt_init(bfd);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bfd_session *bfd_find_disc(struct sockaddr_any *sa,
|
|
|
|
uint32_t ldisc)
|
|
|
|
{
|
|
|
|
struct bfd_session *bs;
|
|
|
|
|
|
|
|
bs = bfd_id_lookup(ldisc);
|
|
|
|
if (bs == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
switch (bs->key.family) {
|
2018-06-27 16:29:02 +02:00
|
|
|
case AF_INET:
|
2019-03-11 19:09:15 +01:00
|
|
|
if (memcmp(&sa->sa_sin.sin_addr, &bs->key.peer,
|
|
|
|
sizeof(sa->sa_sin.sin_addr)))
|
|
|
|
return NULL;
|
2018-06-27 16:29:02 +02:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2019-03-11 19:09:15 +01:00
|
|
|
if (memcmp(&sa->sa_sin6.sin6_addr, &bs->key.peer,
|
|
|
|
sizeof(sa->sa_sin6.sin6_addr)))
|
|
|
|
return NULL;
|
2018-06-27 16:29:02 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
return bs;
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-02-01 12:22:00 +01:00
|
|
|
struct bfd_session *ptm_bfd_sess_find(struct bfd_pkt *cp,
|
2018-06-27 16:29:02 +02:00
|
|
|
struct sockaddr_any *peer,
|
|
|
|
struct sockaddr_any *local,
|
2021-10-14 17:55:33 +02:00
|
|
|
struct interface *ifp,
|
|
|
|
vrf_id_t vrfid,
|
2019-02-01 12:22:00 +01:00
|
|
|
bool is_mhop)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-03-11 19:09:15 +01:00
|
|
|
struct vrf *vrf;
|
|
|
|
struct bfd_key key;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2018-07-25 03:32:36 +02:00
|
|
|
/* Find our session using the ID signaled by the remote end. */
|
|
|
|
if (cp->discrs.remote_discr)
|
|
|
|
return bfd_find_disc(peer, ntohl(cp->discrs.remote_discr));
|
|
|
|
|
2021-10-14 17:55:33 +02:00
|
|
|
/* Search for session without using discriminator. */
|
|
|
|
vrf = vrf_lookup_by_id(vrfid);
|
2018-07-25 03:32:36 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
gen_bfd_key(&key, peer, local, is_mhop, ifp ? ifp->name : NULL,
|
2019-05-23 21:09:24 +02:00
|
|
|
vrf ? vrf->name : VRF_DEFAULT_NAME);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2018-07-25 03:32:36 +02:00
|
|
|
/* XXX maybe remoteDiscr should be checked for remoteHeard cases. */
|
2019-03-11 19:09:15 +01:00
|
|
|
return bfd_key_lookup(key);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
void bfd_xmt_cb(struct event *t)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs = THREAD_ARG(t);
|
|
|
|
|
|
|
|
ptm_bfd_xmt_TO(bs, 0);
|
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
void bfd_echo_xmt_cb(struct event *t)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs = THREAD_ARG(t);
|
|
|
|
|
2018-09-13 15:09:03 +02:00
|
|
|
if (bs->echo_xmt_TO > 0)
|
|
|
|
ptm_bfd_echo_xmt_TO(bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Was ptm_bfd_detect_TO() */
|
2022-03-01 22:18:12 +01:00
|
|
|
void bfd_recvtimer_cb(struct event *t)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs = THREAD_ARG(t);
|
|
|
|
|
|
|
|
switch (bs->ses_state) {
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
case PTM_BFD_UP:
|
2019-04-11 10:25:55 +02:00
|
|
|
ptm_bfd_sess_dn(bs, BD_CONTROL_EXPIRED);
|
2018-06-27 16:29:02 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Was ptm_bfd_echo_detect_TO() */
|
2022-03-01 22:18:12 +01:00
|
|
|
void bfd_echo_recvtimer_cb(struct event *t)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs = THREAD_ARG(t);
|
|
|
|
|
|
|
|
switch (bs->ses_state) {
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
case PTM_BFD_UP:
|
2019-04-11 10:25:55 +02:00
|
|
|
ptm_bfd_sess_dn(bs, BD_ECHO_FAILED);
|
2018-06-27 16:29:02 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:22:08 +02:00
|
|
|
struct bfd_session *bfd_session_new(void)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs;
|
|
|
|
|
|
|
|
bs = XCALLOC(MTYPE_BFDD_CONFIG, sizeof(*bs));
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/* Set peer session defaults. */
|
|
|
|
bfd_profile_set_default(&bs->peer_profile);
|
|
|
|
|
2019-01-31 21:10:32 +01:00
|
|
|
bs->timers.desired_min_tx = BFD_DEFDESIREDMINTX;
|
2018-06-27 16:29:02 +02:00
|
|
|
bs->timers.required_min_rx = BFD_DEFREQUIREDMINRX;
|
2021-03-10 15:31:57 +01:00
|
|
|
bs->timers.required_min_echo_rx = BFD_DEF_REQ_MIN_ECHO_RX;
|
|
|
|
bs->timers.desired_min_echo_tx = BFD_DEF_DES_MIN_ECHO_TX;
|
2018-06-27 16:29:02 +02:00
|
|
|
bs->detect_mult = BFD_DEFDETECTMULT;
|
|
|
|
bs->mh_ttl = BFD_DEF_MHOP_TTL;
|
2019-02-02 12:57:08 +01:00
|
|
|
bs->ses_state = PTM_BFD_DOWN;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-01-31 21:36:44 +01:00
|
|
|
/* Initiate connection with slow timers. */
|
|
|
|
bs_set_slow_timers(bs);
|
2019-01-31 21:10:32 +01:00
|
|
|
|
|
|
|
/* Initiate remote settings as well. */
|
|
|
|
bs->remote_timers = bs->cur_timers;
|
|
|
|
bs->remote_detect_mult = BFD_DEFDETECTMULT;
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
bs->sock = -1;
|
2018-06-27 16:29:02 +02:00
|
|
|
monotime(&bs->uptime);
|
|
|
|
bs->downtime = bs->uptime;
|
|
|
|
|
|
|
|
return bs;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bfd_session_update_label(struct bfd_session *bs, const char *nlabel)
|
|
|
|
{
|
|
|
|
/* New label treatment:
|
|
|
|
* - Check if the label is taken;
|
|
|
|
* - Try to allocate the memory for it and register;
|
|
|
|
*/
|
|
|
|
if (bs->pl == NULL) {
|
|
|
|
if (pl_find(nlabel) != NULL) {
|
|
|
|
/* Someone is already using it. */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-04-13 19:23:03 +02:00
|
|
|
pl_new(nlabel, bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test label change consistency:
|
|
|
|
* - Do nothing if it's the same label;
|
|
|
|
* - Check if the future label is already taken;
|
|
|
|
* - Change label;
|
|
|
|
*/
|
|
|
|
if (strcmp(nlabel, bs->pl->pl_label) == 0)
|
|
|
|
return -1;
|
|
|
|
if (pl_find(nlabel) != NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
strlcpy(bs->pl->pl_label, nlabel, sizeof(bs->pl->pl_label));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _bfd_session_update(struct bfd_session *bs,
|
|
|
|
struct bfd_peer_cfg *bpc)
|
|
|
|
{
|
2020-05-15 22:38:04 +02:00
|
|
|
if (bpc->bpc_has_txinterval) {
|
2019-01-31 21:10:32 +01:00
|
|
|
bs->timers.desired_min_tx = bpc->bpc_txinterval * 1000;
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.min_tx = bs->timers.desired_min_tx;
|
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
if (bpc->bpc_has_recvinterval) {
|
2018-06-27 16:29:02 +02:00
|
|
|
bs->timers.required_min_rx = bpc->bpc_recvinterval * 1000;
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.min_rx = bs->timers.required_min_rx;
|
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
if (bpc->bpc_has_detectmultiplier) {
|
2018-06-27 16:29:02 +02:00
|
|
|
bs->detect_mult = bpc->bpc_detectmultiplier;
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.detection_multiplier = bs->detect_mult;
|
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2021-03-10 15:31:57 +01:00
|
|
|
if (bpc->bpc_has_echorecvinterval) {
|
|
|
|
bs->timers.required_min_echo_rx = bpc->bpc_echorecvinterval * 1000;
|
|
|
|
bs->peer_profile.min_echo_rx = bs->timers.required_min_echo_rx;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bpc->bpc_has_echotxinterval) {
|
|
|
|
bs->timers.desired_min_echo_tx = bpc->bpc_echotxinterval * 1000;
|
|
|
|
bs->peer_profile.min_echo_tx = bs->timers.desired_min_echo_tx;
|
2020-05-15 22:38:04 +02:00
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
if (bpc->bpc_has_label)
|
|
|
|
bfd_session_update_label(bs, bpc->bpc_label);
|
|
|
|
|
2020-05-15 16:32:47 +02:00
|
|
|
if (bpc->bpc_cbit)
|
2020-04-09 21:52:49 +02:00
|
|
|
SET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT);
|
2020-05-15 16:32:47 +02:00
|
|
|
else
|
2020-04-09 21:52:49 +02:00
|
|
|
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CBIT);
|
2020-05-15 16:32:47 +02:00
|
|
|
|
2020-08-11 19:43:56 +02:00
|
|
|
if (bpc->bpc_has_minimum_ttl) {
|
|
|
|
bs->mh_ttl = bpc->bpc_minimum_ttl;
|
|
|
|
bs->peer_profile.minimum_ttl = bpc->bpc_minimum_ttl;
|
|
|
|
}
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.echo_mode = bpc->bpc_echo;
|
2020-05-15 16:32:47 +02:00
|
|
|
bfd_set_echo(bs, bpc->bpc_echo);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shutdown needs to be the last in order to avoid timers enable when
|
|
|
|
* the session is disabled.
|
|
|
|
*/
|
2020-05-15 22:38:04 +02:00
|
|
|
bs->peer_profile.admin_shutdown = bpc->bpc_shutdown;
|
2020-08-06 21:25:44 +02:00
|
|
|
bfd_set_passive_mode(bs, bpc->bpc_passive);
|
2020-05-15 16:32:47 +02:00
|
|
|
bfd_set_shutdown(bs, bpc->bpc_shutdown);
|
2020-05-20 00:30:21 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Apply profile last: it also calls `bfd_set_shutdown`.
|
|
|
|
*
|
|
|
|
* There is no problem calling `shutdown` twice if the value doesn't
|
2022-04-19 14:27:02 +02:00
|
|
|
* change or if it is overridden by peer specific configuration.
|
2020-05-20 00:30:21 +02:00
|
|
|
*/
|
|
|
|
if (bpc->bpc_has_profile)
|
|
|
|
bfd_profile_apply(bpc->bpc_profile, bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bfd_session_update(struct bfd_session *bs, struct bfd_peer_cfg *bpc)
|
|
|
|
{
|
|
|
|
/* User didn't want to update, return failure. */
|
|
|
|
if (bpc->bpc_createonly)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
_bfd_session_update(bs, bpc);
|
|
|
|
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_UPDATE, bs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:22:08 +02:00
|
|
|
void bfd_session_free(struct bfd_session *bs)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-02-02 12:57:08 +01:00
|
|
|
struct bfd_session_observer *bso;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
bfd_session_disable(bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2020-08-18 18:06:48 +02:00
|
|
|
/* Remove session from data plane if any. */
|
|
|
|
bfd_dplane_delete_session(bs);
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
bfd_key_delete(bs->key);
|
|
|
|
bfd_id_delete(bs->discrs.my_discr);
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/* Remove observer if any. */
|
|
|
|
TAILQ_FOREACH(bso, &bglobal.bg_obslist, bso_entry) {
|
|
|
|
if (bso->bso_bs != bs)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (bso != NULL)
|
|
|
|
bs_observer_del(bso);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
pl_free(bs->pl);
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
XFREE(MTYPE_BFDD_PROFILE, bs->profile_name);
|
2018-06-27 16:29:02 +02:00
|
|
|
XFREE(MTYPE_BFDD_CONFIG, bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bfd_session *ptm_bfd_sess_new(struct bfd_peer_cfg *bpc)
|
|
|
|
{
|
|
|
|
struct bfd_session *bfd, *l_bfd;
|
|
|
|
|
|
|
|
/* check to see if this needs a new session */
|
|
|
|
l_bfd = bs_peer_find(bpc);
|
|
|
|
if (l_bfd) {
|
|
|
|
/* Requesting a duplicated peer means update configuration. */
|
|
|
|
if (bfd_session_update(l_bfd, bpc) == 0)
|
|
|
|
return l_bfd;
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/* Get BFD session storage with its defaults. */
|
|
|
|
bfd = bfd_session_new();
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/*
|
|
|
|
* Store interface/VRF name in case we need to delay session
|
|
|
|
* start. See `bfd_session_enable` for more information.
|
|
|
|
*/
|
|
|
|
if (bpc->bpc_has_localif)
|
2019-03-11 19:09:15 +01:00
|
|
|
strlcpy(bfd->key.ifname, bpc->bpc_localif,
|
|
|
|
sizeof(bfd->key.ifname));
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
if (bpc->bpc_has_vrfname)
|
2019-03-11 19:09:15 +01:00
|
|
|
strlcpy(bfd->key.vrfname, bpc->bpc_vrfname,
|
|
|
|
sizeof(bfd->key.vrfname));
|
2019-05-23 21:09:24 +02:00
|
|
|
else
|
|
|
|
strlcpy(bfd->key.vrfname, VRF_DEFAULT_NAME,
|
|
|
|
sizeof(bfd->key.vrfname));
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
/* Copy remaining data. */
|
|
|
|
if (bpc->bpc_ipv4 == false)
|
2020-04-09 21:52:49 +02:00
|
|
|
SET_FLAG(bfd->flags, BFD_SESS_FLAG_IPV6);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
bfd->key.family = (bpc->bpc_ipv4) ? AF_INET : AF_INET6;
|
|
|
|
switch (bfd->key.family) {
|
|
|
|
case AF_INET:
|
|
|
|
memcpy(&bfd->key.peer, &bpc->bpc_peer.sa_sin.sin_addr,
|
|
|
|
sizeof(bpc->bpc_peer.sa_sin.sin_addr));
|
|
|
|
memcpy(&bfd->key.local, &bpc->bpc_local.sa_sin.sin_addr,
|
|
|
|
sizeof(bpc->bpc_local.sa_sin.sin_addr));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_INET6:
|
|
|
|
memcpy(&bfd->key.peer, &bpc->bpc_peer.sa_sin6.sin6_addr,
|
|
|
|
sizeof(bpc->bpc_peer.sa_sin6.sin6_addr));
|
|
|
|
memcpy(&bfd->key.local, &bpc->bpc_local.sa_sin6.sin6_addr,
|
|
|
|
sizeof(bpc->bpc_local.sa_sin6.sin6_addr));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert(1);
|
|
|
|
break;
|
2019-02-02 12:57:08 +01:00
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
if (bpc->bpc_mhop)
|
2020-04-09 21:52:49 +02:00
|
|
|
SET_FLAG(bfd->flags, BFD_SESS_FLAG_MH);
|
2019-03-11 19:09:15 +01:00
|
|
|
|
|
|
|
bfd->key.mhop = bpc->bpc_mhop;
|
|
|
|
|
2019-05-23 20:22:08 +02:00
|
|
|
if (bs_registrate(bfd) == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Apply other configurations. */
|
|
|
|
_bfd_session_update(bfd, bpc);
|
|
|
|
|
|
|
|
return bfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bfd_session *bs_registrate(struct bfd_session *bfd)
|
|
|
|
{
|
2019-03-11 19:09:15 +01:00
|
|
|
/* Registrate session into data structures. */
|
|
|
|
bfd_key_insert(bfd);
|
|
|
|
bfd->discrs.my_discr = ptm_bfd_gen_ID();
|
|
|
|
bfd_id_insert(bfd);
|
2019-02-02 12:57:08 +01:00
|
|
|
|
|
|
|
/* Try to enable session and schedule for packet receive/send. */
|
|
|
|
if (bfd_session_enable(bfd) == -1) {
|
|
|
|
/* Unrecoverable failure, remove the session/peer. */
|
|
|
|
bfd_session_free(bfd);
|
|
|
|
return NULL;
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-03-12 01:26:13 +01:00
|
|
|
/* Add observer if we have moving parts. */
|
|
|
|
if (bfd->key.ifname[0] || bfd->key.vrfname[0] || bfd->sock == -1)
|
|
|
|
bs_observer_add(bfd);
|
|
|
|
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("session-new: %s", bs_to_string(bfd));
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_ADD, bfd);
|
|
|
|
|
|
|
|
return bfd;
|
|
|
|
}
|
|
|
|
|
2019-04-11 10:25:55 +02:00
|
|
|
int ptm_bfd_sess_del(struct bfd_peer_cfg *bpc)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs;
|
|
|
|
|
|
|
|
/* Find session and call free(). */
|
|
|
|
bs = bs_peer_find(bpc);
|
|
|
|
if (bs == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* This pointer is being referenced, don't let it be deleted. */
|
|
|
|
if (bs->refcount > 0) {
|
2020-03-27 12:51:47 +01:00
|
|
|
zlog_err("session-delete: refcount failure: %" PRIu64" references",
|
2020-04-10 16:06:22 +02:00
|
|
|
bs->refcount);
|
2018-06-27 16:29:02 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
2022-09-05 10:39:35 +02:00
|
|
|
zlog_debug("%s: %s", __func__, bs_to_string(bs));
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs);
|
|
|
|
|
|
|
|
bfd_session_free(bs);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-07-10 16:00:09 +02:00
|
|
|
void bfd_set_polling(struct bfd_session *bs)
|
|
|
|
{
|
2019-01-29 20:33:16 +01:00
|
|
|
/*
|
|
|
|
* Start polling procedure: the only timers that require polling
|
|
|
|
* to change value without losing connection are:
|
|
|
|
*
|
|
|
|
* - Desired minimum transmission interval;
|
|
|
|
* - Required minimum receive interval;
|
|
|
|
*
|
|
|
|
* RFC 5880, Section 6.8.3.
|
|
|
|
*/
|
2018-07-10 16:00:09 +02:00
|
|
|
bs->polling = 1;
|
|
|
|
}
|
|
|
|
|
2019-01-15 23:23:06 +01:00
|
|
|
/*
|
|
|
|
* bs_<state>_handler() functions implement the BFD state machine
|
|
|
|
* transition mechanism. `<state>` is the current session state and
|
|
|
|
* the parameter `nstate` is the peer new state.
|
|
|
|
*/
|
2019-02-05 01:05:39 +01:00
|
|
|
static void bs_admin_down_handler(struct bfd_session *bs
|
|
|
|
__attribute__((__unused__)),
|
|
|
|
int nstate __attribute__((__unused__)))
|
2019-01-15 23:23:06 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We are administratively down, there is no state machine
|
|
|
|
* handling.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2019-02-05 01:05:39 +01:00
|
|
|
static void bs_down_handler(struct bfd_session *bs, int nstate)
|
2019-01-15 23:23:06 +01:00
|
|
|
{
|
|
|
|
switch (nstate) {
|
|
|
|
case PTM_BFD_ADM_DOWN:
|
|
|
|
/*
|
|
|
|
* Remote peer doesn't want to talk, so lets keep the
|
|
|
|
* connection down.
|
|
|
|
*/
|
|
|
|
case PTM_BFD_UP:
|
|
|
|
/* Peer can't be up yet, wait it go to 'init' or 'down'. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTM_BFD_DOWN:
|
|
|
|
/*
|
|
|
|
* Remote peer agreed that the path is down, lets try to
|
|
|
|
* bring it up.
|
|
|
|
*/
|
|
|
|
bs->ses_state = PTM_BFD_INIT;
|
2020-08-06 21:25:44 +02:00
|
|
|
|
bfdd: fix broken FSM in passive mode
Problem:
One is with active mode, the other is with passive mode. Sometimes
the one with active mode is in `Down` stauts, but the other one
with passive mode is unluckily stuck in `Init` status:
It doesn't answer its peer with any packets, even receiving continuous
`Down` packets.
Root Cause:
bfdd with passive mode answers its peer only *one* packet in `Down` status,
then it enters into `Init` status and ignores subsequent `Down` packets.
Unluckily that *one* answered packet is lost, at that moment its peer
with active mode can only have to send `Down` packets.
Fix:
1) With passive mode, bfdd should start xmittimer after received `Down` packet.
Refer to RFC5880:
"A system taking the Passive role MUST NOT begin sending BFD packets for
a particular session until it has received a BFD packet for that session, and
thus has learned the remote system's discriminator value."
2) Currently this added xmittimer for passive mode can be safely removed
except receiving `AdminDown` packet:
- `bfd_session_enable/bfd_set_passive_mode` doesn't start xmittimer
- `ptm_bfd_sess_dn/bfd_set_shutdown` can remove xmittimer
Per RFC5880, receiving `AdminDown` packet should be also regarded as `Down`,
so just call `ptm_bfd_sess_dn`, which will safely remove the added xmittimer
for passive mode. In summary, call `ptm_bfd_sess_dn` for two status changes
on receiving `AdminDown`: `Init`->`Down` and `Up`->`Down`.
Signed-off-by: anlan_cs <vic.lan@pica8.com>
2022-01-19 08:10:18 +01:00
|
|
|
/*
|
|
|
|
* RFC 5880, Section 6.1.
|
|
|
|
* A system taking the Passive role MUST NOT begin
|
|
|
|
* sending BFD packets for a particular session until
|
|
|
|
* it has received a BFD packet for that session, and thus
|
|
|
|
* has learned the remote system's discriminator value.
|
|
|
|
*
|
|
|
|
* Now we can start transmission timer in passive mode.
|
|
|
|
*/
|
2020-08-06 21:25:44 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE))
|
bfdd: fix broken FSM in passive mode
Problem:
One is with active mode, the other is with passive mode. Sometimes
the one with active mode is in `Down` stauts, but the other one
with passive mode is unluckily stuck in `Init` status:
It doesn't answer its peer with any packets, even receiving continuous
`Down` packets.
Root Cause:
bfdd with passive mode answers its peer only *one* packet in `Down` status,
then it enters into `Init` status and ignores subsequent `Down` packets.
Unluckily that *one* answered packet is lost, at that moment its peer
with active mode can only have to send `Down` packets.
Fix:
1) With passive mode, bfdd should start xmittimer after received `Down` packet.
Refer to RFC5880:
"A system taking the Passive role MUST NOT begin sending BFD packets for
a particular session until it has received a BFD packet for that session, and
thus has learned the remote system's discriminator value."
2) Currently this added xmittimer for passive mode can be safely removed
except receiving `AdminDown` packet:
- `bfd_session_enable/bfd_set_passive_mode` doesn't start xmittimer
- `ptm_bfd_sess_dn/bfd_set_shutdown` can remove xmittimer
Per RFC5880, receiving `AdminDown` packet should be also regarded as `Down`,
so just call `ptm_bfd_sess_dn`, which will safely remove the added xmittimer
for passive mode. In summary, call `ptm_bfd_sess_dn` for two status changes
on receiving `AdminDown`: `Init`->`Down` and `Up`->`Down`.
Signed-off-by: anlan_cs <vic.lan@pica8.com>
2022-01-19 08:10:18 +01:00
|
|
|
ptm_bfd_xmt_TO(bs, 0);
|
|
|
|
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
/*
|
|
|
|
* Remote peer told us his path is up, lets turn
|
|
|
|
* activate the session.
|
|
|
|
*/
|
2019-04-11 10:25:55 +02:00
|
|
|
ptm_bfd_sess_up(bs);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: unhandled neighbor state: %d",
|
|
|
|
nstate);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-05 01:05:39 +01:00
|
|
|
static void bs_init_handler(struct bfd_session *bs, int nstate)
|
2019-01-15 23:23:06 +01:00
|
|
|
{
|
|
|
|
switch (nstate) {
|
|
|
|
case PTM_BFD_ADM_DOWN:
|
|
|
|
/*
|
|
|
|
* Remote peer doesn't want to talk, so lets make the
|
|
|
|
* connection down.
|
|
|
|
*/
|
bfdd: fix broken FSM in passive mode
Problem:
One is with active mode, the other is with passive mode. Sometimes
the one with active mode is in `Down` stauts, but the other one
with passive mode is unluckily stuck in `Init` status:
It doesn't answer its peer with any packets, even receiving continuous
`Down` packets.
Root Cause:
bfdd with passive mode answers its peer only *one* packet in `Down` status,
then it enters into `Init` status and ignores subsequent `Down` packets.
Unluckily that *one* answered packet is lost, at that moment its peer
with active mode can only have to send `Down` packets.
Fix:
1) With passive mode, bfdd should start xmittimer after received `Down` packet.
Refer to RFC5880:
"A system taking the Passive role MUST NOT begin sending BFD packets for
a particular session until it has received a BFD packet for that session, and
thus has learned the remote system's discriminator value."
2) Currently this added xmittimer for passive mode can be safely removed
except receiving `AdminDown` packet:
- `bfd_session_enable/bfd_set_passive_mode` doesn't start xmittimer
- `ptm_bfd_sess_dn/bfd_set_shutdown` can remove xmittimer
Per RFC5880, receiving `AdminDown` packet should be also regarded as `Down`,
so just call `ptm_bfd_sess_dn`, which will safely remove the added xmittimer
for passive mode. In summary, call `ptm_bfd_sess_dn` for two status changes
on receiving `AdminDown`: `Init`->`Down` and `Up`->`Down`.
Signed-off-by: anlan_cs <vic.lan@pica8.com>
2022-01-19 08:10:18 +01:00
|
|
|
ptm_bfd_sess_dn(bs, BD_NEIGHBOR_DOWN);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTM_BFD_DOWN:
|
|
|
|
/* Remote peer hasn't moved to first stage yet. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
case PTM_BFD_UP:
|
|
|
|
/* We agreed on the settings and the path is up. */
|
2019-04-11 10:25:55 +02:00
|
|
|
ptm_bfd_sess_up(bs);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: unhandled neighbor state: %d",
|
|
|
|
nstate);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-05 01:05:39 +01:00
|
|
|
static void bs_up_handler(struct bfd_session *bs, int nstate)
|
2019-01-15 23:23:06 +01:00
|
|
|
{
|
|
|
|
switch (nstate) {
|
|
|
|
case PTM_BFD_ADM_DOWN:
|
|
|
|
case PTM_BFD_DOWN:
|
|
|
|
/* Peer lost or asked to shutdown connection. */
|
2019-04-11 10:25:55 +02:00
|
|
|
ptm_bfd_sess_dn(bs, BD_NEIGHBOR_DOWN);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
case PTM_BFD_UP:
|
|
|
|
/* Path is up and working. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: unhandled neighbor state: %d",
|
|
|
|
nstate);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bs_state_handler(struct bfd_session *bs, int nstate)
|
|
|
|
{
|
|
|
|
switch (bs->ses_state) {
|
|
|
|
case PTM_BFD_ADM_DOWN:
|
|
|
|
bs_admin_down_handler(bs, nstate);
|
|
|
|
break;
|
|
|
|
case PTM_BFD_DOWN:
|
|
|
|
bs_down_handler(bs, nstate);
|
|
|
|
break;
|
|
|
|
case PTM_BFD_INIT:
|
|
|
|
bs_init_handler(bs, nstate);
|
|
|
|
break;
|
|
|
|
case PTM_BFD_UP:
|
|
|
|
bs_up_handler(bs, nstate);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_peer_event)
|
|
|
|
zlog_debug("state-change: [%s] is in invalid state: %d",
|
|
|
|
bs_to_string(bs), nstate);
|
2019-01-15 23:23:06 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:33:16 +01:00
|
|
|
/*
|
|
|
|
* Handles echo timer manipulation after updating timer.
|
|
|
|
*/
|
|
|
|
void bs_echo_timer_handler(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
uint32_t old_timer;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before doing any echo handling, check if it is possible to
|
|
|
|
* use it.
|
|
|
|
*
|
|
|
|
* - Check for `echo-mode` configuration.
|
|
|
|
* - Check that we are not using multi hop (RFC 5883,
|
|
|
|
* Section 3).
|
|
|
|
* - Check that we are already at the up state.
|
|
|
|
*/
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO) == 0
|
|
|
|
|| CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)
|
2019-01-29 20:33:16 +01:00
|
|
|
|| bs->ses_state != PTM_BFD_UP)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Remote peer asked to stop echo. */
|
|
|
|
if (bs->remote_timers.required_min_echo == 0) {
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE))
|
2019-01-30 18:49:11 +01:00
|
|
|
ptm_bfd_echo_stop(bs);
|
2019-01-29 20:33:16 +01:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the echo transmission timer: we must not send
|
|
|
|
* echo packets faster than the minimum required time
|
|
|
|
* announced by the remote system.
|
|
|
|
*
|
|
|
|
* RFC 5880, Section 6.8.9.
|
|
|
|
*/
|
|
|
|
old_timer = bs->echo_xmt_TO;
|
2021-03-10 15:31:57 +01:00
|
|
|
if (bs->remote_timers.required_min_echo > bs->timers.desired_min_echo_tx)
|
2019-01-29 20:33:16 +01:00
|
|
|
bs->echo_xmt_TO = bs->remote_timers.required_min_echo;
|
|
|
|
else
|
2021-03-10 15:31:57 +01:00
|
|
|
bs->echo_xmt_TO = bs->timers.desired_min_echo_tx;
|
2019-01-29 20:33:16 +01:00
|
|
|
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO_ACTIVE) == 0
|
2019-01-29 20:33:16 +01:00
|
|
|
|| old_timer != bs->echo_xmt_TO)
|
|
|
|
ptm_bfd_echo_start(bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RFC 5880 Section 6.5.
|
|
|
|
*
|
|
|
|
* When a BFD control packet with the final bit is received, we must
|
|
|
|
* update the session parameters.
|
|
|
|
*/
|
|
|
|
void bs_final_handler(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
/* Start using our new timers. */
|
2019-01-31 21:10:32 +01:00
|
|
|
bs->cur_timers.desired_min_tx = bs->timers.desired_min_tx;
|
|
|
|
bs->cur_timers.required_min_rx = bs->timers.required_min_rx;
|
2019-01-29 20:33:16 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: demand mode. See RFC 5880 Section 6.1.
|
|
|
|
*
|
|
|
|
* When using demand mode we must disable the detection timer
|
|
|
|
* for lost control packets.
|
|
|
|
*/
|
|
|
|
if (bs->demand_mode) {
|
|
|
|
/* Notify watchers about changed timers. */
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_UPDATE, bs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-04-09 22:46:02 +02:00
|
|
|
* Calculate transmission time based on new timers.
|
2019-01-29 20:33:16 +01:00
|
|
|
*
|
|
|
|
* Transmission calculation:
|
2020-04-09 22:46:02 +02:00
|
|
|
* Unless specified by exceptions at the end of Section 6.8.7, the
|
|
|
|
* transmission time will be determined by the system with the
|
|
|
|
* slowest rate.
|
2019-01-29 20:33:16 +01:00
|
|
|
*
|
2020-04-09 22:46:02 +02:00
|
|
|
* RFC 5880, Section 6.8.7.
|
2019-01-29 20:33:16 +01:00
|
|
|
*/
|
|
|
|
if (bs->timers.desired_min_tx > bs->remote_timers.required_min_rx)
|
|
|
|
bs->xmt_TO = bs->timers.desired_min_tx;
|
2020-04-09 22:46:02 +02:00
|
|
|
else
|
|
|
|
bs->xmt_TO = bs->remote_timers.required_min_rx;
|
2019-01-29 20:33:16 +01:00
|
|
|
|
|
|
|
/* Apply new transmission timer immediately. */
|
|
|
|
ptm_bfd_start_xmt_timer(bs, false);
|
|
|
|
|
|
|
|
/* Notify watchers about changed timers. */
|
|
|
|
control_notify_config(BCM_NOTIFY_CONFIG_UPDATE, bs);
|
|
|
|
}
|
|
|
|
|
2019-01-31 21:36:44 +01:00
|
|
|
void bs_set_slow_timers(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* BFD connection must use slow timers before going up or after
|
|
|
|
* losing connectivity to avoid wasting bandwidth.
|
|
|
|
*
|
|
|
|
* RFC 5880, Section 6.8.3.
|
|
|
|
*/
|
|
|
|
bs->cur_timers.desired_min_tx = BFD_DEF_SLOWTX;
|
|
|
|
bs->cur_timers.required_min_rx = BFD_DEF_SLOWTX;
|
|
|
|
bs->cur_timers.required_min_echo = 0;
|
|
|
|
|
|
|
|
/* Set the appropriated timeouts for slow connection. */
|
|
|
|
bs->detect_TO = (BFD_DEFDETECTMULT * BFD_DEF_SLOWTX);
|
|
|
|
bs->xmt_TO = BFD_DEF_SLOWTX;
|
|
|
|
}
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2020-05-15 16:32:47 +02:00
|
|
|
void bfd_set_echo(struct bfd_session *bs, bool echo)
|
|
|
|
{
|
|
|
|
if (echo) {
|
|
|
|
/* Check if echo mode is already active. */
|
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
|
|
|
|
return;
|
|
|
|
|
|
|
|
SET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
|
|
|
|
|
|
|
|
/* Activate/update echo receive timeout timer. */
|
2020-08-18 18:06:48 +02:00
|
|
|
if (bs->bdc == NULL)
|
|
|
|
bs_echo_timer_handler(bs);
|
2020-05-15 16:32:47 +02:00
|
|
|
} else {
|
|
|
|
/* Check if echo mode is already disabled. */
|
|
|
|
if (!CHECK_FLAG(bs->flags, BFD_SESS_FLAG_ECHO))
|
|
|
|
return;
|
|
|
|
|
|
|
|
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_ECHO);
|
2020-08-18 18:06:48 +02:00
|
|
|
|
|
|
|
/* Deactivate timeout timer. */
|
|
|
|
if (bs->bdc == NULL)
|
|
|
|
ptm_bfd_echo_stop(bs);
|
2020-05-15 16:32:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_set_shutdown(struct bfd_session *bs, bool shutdown)
|
|
|
|
{
|
2020-05-15 22:38:04 +02:00
|
|
|
bool is_shutdown;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Special case: we are batching changes and the previous state was
|
|
|
|
* not shutdown. Instead of potentially disconnect a running peer,
|
|
|
|
* we'll get the current status to validate we were really down.
|
|
|
|
*/
|
|
|
|
if (bs->ses_state == PTM_BFD_UP)
|
|
|
|
is_shutdown = false;
|
|
|
|
else
|
|
|
|
is_shutdown = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
|
|
|
|
|
2020-05-15 16:32:47 +02:00
|
|
|
if (shutdown) {
|
|
|
|
/* Already shutdown. */
|
2020-05-15 22:38:04 +02:00
|
|
|
if (is_shutdown)
|
2020-05-15 16:32:47 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
SET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
|
|
|
|
|
2020-08-18 18:06:48 +02:00
|
|
|
/* Handle data plane shutdown case. */
|
|
|
|
if (bs->bdc) {
|
|
|
|
bs->ses_state = PTM_BFD_ADM_DOWN;
|
|
|
|
bfd_dplane_update_session(bs);
|
|
|
|
control_notify(bs, bs->ses_state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-15 16:32:47 +02:00
|
|
|
/* Disable all events. */
|
|
|
|
bfd_recvtimer_delete(bs);
|
|
|
|
bfd_echo_recvtimer_delete(bs);
|
|
|
|
bfd_xmttimer_delete(bs);
|
|
|
|
bfd_echo_xmttimer_delete(bs);
|
|
|
|
|
|
|
|
/* Change and notify state change. */
|
|
|
|
bs->ses_state = PTM_BFD_ADM_DOWN;
|
|
|
|
control_notify(bs, bs->ses_state);
|
|
|
|
|
|
|
|
/* Don't try to send packets with a disabled session. */
|
|
|
|
if (bs->sock != -1)
|
|
|
|
ptm_bfd_snd(bs, 0);
|
|
|
|
} else {
|
|
|
|
/* Already working. */
|
2020-05-15 22:38:04 +02:00
|
|
|
if (!is_shutdown)
|
2020-05-15 16:32:47 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_SHUTDOWN);
|
|
|
|
|
2020-08-18 18:06:48 +02:00
|
|
|
/* Handle data plane shutdown case. */
|
|
|
|
if (bs->bdc) {
|
|
|
|
bs->ses_state = PTM_BFD_DOWN;
|
|
|
|
bfd_dplane_update_session(bs);
|
|
|
|
control_notify(bs, bs->ses_state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-05-15 16:32:47 +02:00
|
|
|
/* Change and notify state change. */
|
|
|
|
bs->ses_state = PTM_BFD_DOWN;
|
|
|
|
control_notify(bs, bs->ses_state);
|
|
|
|
|
2020-08-06 21:25:44 +02:00
|
|
|
/* Enable timers if non passive, otherwise stop them. */
|
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE)) {
|
|
|
|
bfd_recvtimer_delete(bs);
|
|
|
|
bfd_xmttimer_delete(bs);
|
|
|
|
} else {
|
|
|
|
bfd_recvtimer_update(bs);
|
|
|
|
bfd_xmttimer_update(bs, bs->xmt_TO);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_set_passive_mode(struct bfd_session *bs, bool passive)
|
|
|
|
{
|
|
|
|
if (passive) {
|
|
|
|
SET_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE);
|
|
|
|
|
|
|
|
/* Session is already up and running, nothing to do now. */
|
|
|
|
if (bs->ses_state != PTM_BFD_DOWN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Lets disable the timers since we are now passive. */
|
|
|
|
bfd_recvtimer_delete(bs);
|
|
|
|
bfd_xmttimer_delete(bs);
|
|
|
|
} else {
|
|
|
|
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_PASSIVE);
|
|
|
|
|
|
|
|
/* Session is already up and running, nothing to do now. */
|
|
|
|
if (bs->ses_state != PTM_BFD_DOWN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Session is down, let it attempt to start the connection. */
|
2020-05-15 16:32:47 +02:00
|
|
|
bfd_xmttimer_update(bs, bs->xmt_TO);
|
2020-08-06 21:25:44 +02:00
|
|
|
bfd_recvtimer_update(bs);
|
2020-05-15 16:32:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
/*
|
|
|
|
* Helper functions.
|
|
|
|
*/
|
|
|
|
static const char *get_diag_str(int diag)
|
|
|
|
{
|
|
|
|
for (int i = 0; diag_list[i].str; i++) {
|
|
|
|
if (diag_list[i].type == diag)
|
|
|
|
return diag_list[i].str;
|
|
|
|
}
|
|
|
|
return "N/A";
|
|
|
|
}
|
|
|
|
|
2020-04-13 20:18:17 +02:00
|
|
|
const char *satostr(const struct sockaddr_any *sa)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
#define INETSTR_BUFCOUNT 8
|
|
|
|
static char buf[INETSTR_BUFCOUNT][INET6_ADDRSTRLEN];
|
|
|
|
static int bufidx;
|
2020-04-13 20:18:17 +02:00
|
|
|
const struct sockaddr_in *sin = &sa->sa_sin;
|
|
|
|
const struct sockaddr_in6 *sin6 = &sa->sa_sin6;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
bufidx += (bufidx + 1) % INETSTR_BUFCOUNT;
|
|
|
|
buf[bufidx][0] = 0;
|
|
|
|
|
|
|
|
switch (sin->sin_family) {
|
|
|
|
case AF_INET:
|
|
|
|
inet_ntop(AF_INET, &sin->sin_addr, buf[bufidx],
|
|
|
|
sizeof(buf[bufidx]));
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
inet_ntop(AF_INET6, &sin6->sin6_addr, buf[bufidx],
|
|
|
|
sizeof(buf[bufidx]));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
strlcpy(buf[bufidx], "unknown", sizeof(buf[bufidx]));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf[bufidx];
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *diag2str(uint8_t diag)
|
|
|
|
{
|
|
|
|
switch (diag) {
|
|
|
|
case 0:
|
|
|
|
return "ok";
|
|
|
|
case 1:
|
|
|
|
return "control detection time expired";
|
|
|
|
case 2:
|
|
|
|
return "echo function failed";
|
|
|
|
case 3:
|
|
|
|
return "neighbor signaled session down";
|
|
|
|
case 4:
|
|
|
|
return "forwarding plane reset";
|
|
|
|
case 5:
|
|
|
|
return "path down";
|
|
|
|
case 6:
|
|
|
|
return "concatenated path down";
|
|
|
|
case 7:
|
|
|
|
return "administratively down";
|
|
|
|
case 8:
|
|
|
|
return "reverse concatenated path down";
|
|
|
|
default:
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int strtosa(const char *addr, struct sockaddr_any *sa)
|
|
|
|
{
|
|
|
|
memset(sa, 0, sizeof(*sa));
|
|
|
|
|
|
|
|
if (inet_pton(AF_INET, addr, &sa->sa_sin.sin_addr) == 1) {
|
|
|
|
sa->sa_sin.sin_family = AF_INET;
|
|
|
|
#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
|
|
|
|
sa->sa_sin.sin_len = sizeof(sa->sa_sin);
|
|
|
|
#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (inet_pton(AF_INET6, addr, &sa->sa_sin6.sin6_addr) == 1) {
|
|
|
|
sa->sa_sin6.sin6_family = AF_INET6;
|
|
|
|
#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
|
|
|
|
sa->sa_sin6.sin6_len = sizeof(sa->sa_sin6);
|
|
|
|
#endif /* HAVE_STRUCT_SOCKADDR_SA_LEN */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void integer2timestr(uint64_t time, char *buf, size_t buflen)
|
|
|
|
{
|
2022-02-18 16:45:46 +01:00
|
|
|
uint64_t year, month, day, hour, minute, second;
|
2018-06-27 16:29:02 +02:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
#define MINUTES (60)
|
2019-01-12 22:22:32 +01:00
|
|
|
#define HOURS (60 * MINUTES)
|
|
|
|
#define DAYS (24 * HOURS)
|
|
|
|
#define MONTHS (30 * DAYS)
|
|
|
|
#define YEARS (12 * MONTHS)
|
2018-06-27 16:29:02 +02:00
|
|
|
if (time >= YEARS) {
|
|
|
|
year = time / YEARS;
|
|
|
|
time -= year * YEARS;
|
|
|
|
|
2022-02-18 16:45:46 +01:00
|
|
|
rv = snprintfrr(buf, buflen, "%" PRIu64 " year(s), ", year);
|
2018-06-27 16:29:02 +02:00
|
|
|
buf += rv;
|
|
|
|
buflen -= rv;
|
|
|
|
}
|
|
|
|
if (time >= MONTHS) {
|
|
|
|
month = time / MONTHS;
|
|
|
|
time -= month * MONTHS;
|
|
|
|
|
2022-02-18 16:45:46 +01:00
|
|
|
rv = snprintfrr(buf, buflen, "%" PRIu64 " month(s), ", month);
|
2018-06-27 16:29:02 +02:00
|
|
|
buf += rv;
|
|
|
|
buflen -= rv;
|
|
|
|
}
|
|
|
|
if (time >= DAYS) {
|
|
|
|
day = time / DAYS;
|
|
|
|
time -= day * DAYS;
|
|
|
|
|
2022-02-18 16:45:46 +01:00
|
|
|
rv = snprintfrr(buf, buflen, "%" PRIu64 " day(s), ", day);
|
2018-06-27 16:29:02 +02:00
|
|
|
buf += rv;
|
|
|
|
buflen -= rv;
|
|
|
|
}
|
|
|
|
if (time >= HOURS) {
|
|
|
|
hour = time / HOURS;
|
|
|
|
time -= hour * HOURS;
|
|
|
|
|
2022-02-18 16:45:46 +01:00
|
|
|
rv = snprintfrr(buf, buflen, "%" PRIu64 " hour(s), ", hour);
|
2018-06-27 16:29:02 +02:00
|
|
|
buf += rv;
|
|
|
|
buflen -= rv;
|
|
|
|
}
|
|
|
|
if (time >= MINUTES) {
|
|
|
|
minute = time / MINUTES;
|
|
|
|
time -= minute * MINUTES;
|
|
|
|
|
2022-02-18 16:45:46 +01:00
|
|
|
rv = snprintfrr(buf, buflen, "%" PRIu64 " minute(s), ", minute);
|
2018-06-27 16:29:02 +02:00
|
|
|
buf += rv;
|
|
|
|
buflen -= rv;
|
|
|
|
}
|
|
|
|
second = time % MINUTES;
|
2022-02-18 16:45:46 +01:00
|
|
|
snprintfrr(buf, buflen, "%" PRIu64 " second(s)", second);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
const char *bs_to_string(const struct bfd_session *bs)
|
2018-07-25 05:03:47 +02:00
|
|
|
{
|
|
|
|
static char buf[256];
|
2019-03-11 19:09:15 +01:00
|
|
|
char addr_buf[INET6_ADDRSTRLEN];
|
2018-07-25 05:03:47 +02:00
|
|
|
int pos;
|
2020-04-09 21:52:49 +02:00
|
|
|
bool is_mhop = CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH);
|
2018-07-25 05:03:47 +02:00
|
|
|
|
|
|
|
pos = snprintf(buf, sizeof(buf), "mhop:%s", is_mhop ? "yes" : "no");
|
2019-03-11 19:09:15 +01:00
|
|
|
pos += snprintf(buf + pos, sizeof(buf) - pos, " peer:%s",
|
|
|
|
inet_ntop(bs->key.family, &bs->key.peer, addr_buf,
|
|
|
|
sizeof(addr_buf)));
|
|
|
|
pos += snprintf(buf + pos, sizeof(buf) - pos, " local:%s",
|
|
|
|
inet_ntop(bs->key.family, &bs->key.local, addr_buf,
|
|
|
|
sizeof(addr_buf)));
|
|
|
|
if (bs->key.vrfname[0])
|
|
|
|
pos += snprintf(buf + pos, sizeof(buf) - pos, " vrf:%s",
|
|
|
|
bs->key.vrfname);
|
|
|
|
if (bs->key.ifname[0])
|
|
|
|
pos += snprintf(buf + pos, sizeof(buf) - pos, " ifname:%s",
|
|
|
|
bs->key.ifname);
|
2019-03-29 03:22:26 +01:00
|
|
|
|
|
|
|
(void)pos;
|
|
|
|
|
2018-07-25 05:03:47 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
int bs_observer_add(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
struct bfd_session_observer *bso;
|
|
|
|
|
2019-03-17 02:25:59 +01:00
|
|
|
bso = XCALLOC(MTYPE_BFDD_SESSION_OBSERVER, sizeof(*bso));
|
2019-02-02 12:57:08 +01:00
|
|
|
bso->bso_bs = bs;
|
2019-10-11 21:13:24 +02:00
|
|
|
bso->bso_addr.family = bs->key.family;
|
|
|
|
memcpy(&bso->bso_addr.u.prefix, &bs->key.local,
|
|
|
|
sizeof(bs->key.local));
|
2019-03-12 01:26:13 +01:00
|
|
|
|
2019-02-02 12:57:08 +01:00
|
|
|
TAILQ_INSERT_TAIL(&bglobal.bg_obslist, bso, bso_entry);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bs_observer_del(struct bfd_session_observer *bso)
|
|
|
|
{
|
|
|
|
TAILQ_REMOVE(&bglobal.bg_obslist, bso, bso_entry);
|
|
|
|
XFREE(MTYPE_BFDD_SESSION_OBSERVER, bso);
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
void bs_to_bpc(struct bfd_session *bs, struct bfd_peer_cfg *bpc)
|
|
|
|
{
|
|
|
|
memset(bpc, 0, sizeof(*bpc));
|
|
|
|
|
|
|
|
bpc->bpc_ipv4 = (bs->key.family == AF_INET);
|
|
|
|
bpc->bpc_mhop = bs->key.mhop;
|
|
|
|
|
|
|
|
switch (bs->key.family) {
|
|
|
|
case AF_INET:
|
|
|
|
bpc->bpc_peer.sa_sin.sin_family = AF_INET;
|
|
|
|
memcpy(&bpc->bpc_peer.sa_sin.sin_addr, &bs->key.peer,
|
|
|
|
sizeof(bpc->bpc_peer.sa_sin.sin_addr));
|
|
|
|
|
|
|
|
if (memcmp(&bs->key.local, &zero_addr, sizeof(bs->key.local))) {
|
|
|
|
bpc->bpc_local.sa_sin.sin_family = AF_INET6;
|
2019-03-19 21:22:58 +01:00
|
|
|
memcpy(&bpc->bpc_local.sa_sin.sin_addr, &bs->key.local,
|
2019-03-11 19:09:15 +01:00
|
|
|
sizeof(bpc->bpc_local.sa_sin.sin_addr));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_INET6:
|
|
|
|
bpc->bpc_peer.sa_sin.sin_family = AF_INET6;
|
|
|
|
memcpy(&bpc->bpc_peer.sa_sin6.sin6_addr, &bs->key.peer,
|
|
|
|
sizeof(bpc->bpc_peer.sa_sin6.sin6_addr));
|
|
|
|
|
|
|
|
bpc->bpc_local.sa_sin6.sin6_family = AF_INET6;
|
2019-03-19 21:22:58 +01:00
|
|
|
memcpy(&bpc->bpc_local.sa_sin6.sin6_addr, &bs->key.local,
|
2019-03-11 19:09:15 +01:00
|
|
|
sizeof(bpc->bpc_local.sa_sin6.sin6_addr));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bs->key.ifname[0]) {
|
|
|
|
bpc->bpc_has_localif = true;
|
|
|
|
strlcpy(bpc->bpc_localif, bs->key.ifname,
|
|
|
|
sizeof(bpc->bpc_localif));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bs->key.vrfname[0]) {
|
|
|
|
bpc->bpc_has_vrfname = true;
|
|
|
|
strlcpy(bpc->bpc_vrfname, bs->key.vrfname,
|
|
|
|
sizeof(bpc->bpc_vrfname));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BFD hash data structures to find sessions.
|
|
|
|
*/
|
|
|
|
static struct hash *bfd_id_hash;
|
2019-03-11 19:09:15 +01:00
|
|
|
static struct hash *bfd_key_hash;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-05-14 22:19:07 +02:00
|
|
|
static unsigned int bfd_id_hash_do(const void *p);
|
|
|
|
static unsigned int bfd_key_hash_do(const void *p);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-02-19 16:46:52 +01:00
|
|
|
static void _bfd_free(struct hash_bucket *hb,
|
2018-06-27 16:29:02 +02:00
|
|
|
void *arg __attribute__((__unused__)));
|
|
|
|
|
|
|
|
/* BFD hash for our discriminator. */
|
2019-05-14 22:19:07 +02:00
|
|
|
static unsigned int bfd_id_hash_do(const void *p)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-05-14 22:19:07 +02:00
|
|
|
const struct bfd_session *bs = p;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
return jhash_1word(bs->discrs.my_discr, 0);
|
|
|
|
}
|
|
|
|
|
2018-10-17 21:27:12 +02:00
|
|
|
static bool bfd_id_hash_cmp(const void *n1, const void *n2)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
const struct bfd_session *bs1 = n1, *bs2 = n2;
|
|
|
|
|
|
|
|
return bs1->discrs.my_discr == bs2->discrs.my_discr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* BFD hash for single hop. */
|
2019-05-14 22:19:07 +02:00
|
|
|
static unsigned int bfd_key_hash_do(const void *p)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-05-14 22:19:07 +02:00
|
|
|
const struct bfd_session *bs = p;
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
struct bfd_key key = bs->key;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
/*
|
|
|
|
* Local address and interface name are optional and
|
|
|
|
* can be filled any time after session creation.
|
|
|
|
* Hash key should not depend on these fields.
|
|
|
|
*/
|
|
|
|
memset(&key.local, 0, sizeof(key.local));
|
|
|
|
memset(key.ifname, 0, sizeof(key.ifname));
|
|
|
|
|
|
|
|
return jhash(&key, sizeof(key), 0);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
static bool bfd_key_hash_cmp(const void *n1, const void *n2)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
|
|
|
const struct bfd_session *bs1 = n1, *bs2 = n2;
|
|
|
|
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
if (bs1->key.family != bs2->key.family)
|
|
|
|
return false;
|
|
|
|
if (bs1->key.mhop != bs2->key.mhop)
|
|
|
|
return false;
|
|
|
|
if (memcmp(&bs1->key.peer, &bs2->key.peer, sizeof(bs1->key.peer)))
|
|
|
|
return false;
|
|
|
|
if (memcmp(bs1->key.vrfname, bs2->key.vrfname,
|
|
|
|
sizeof(bs1->key.vrfname)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local address is optional and can be empty.
|
|
|
|
* If both addresses are not empty and different,
|
|
|
|
* then the keys are different.
|
|
|
|
*/
|
|
|
|
if (memcmp(&bs1->key.local, &zero_addr, sizeof(bs1->key.local))
|
|
|
|
&& memcmp(&bs2->key.local, &zero_addr, sizeof(bs2->key.local))
|
|
|
|
&& memcmp(&bs1->key.local, &bs2->key.local, sizeof(bs1->key.local)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interface name is optional and can be empty.
|
|
|
|
* If both names are not empty and different,
|
|
|
|
* then the keys are different.
|
|
|
|
*/
|
|
|
|
if (bs1->key.ifname[0] && bs2->key.ifname[0]
|
|
|
|
&& memcmp(bs1->key.ifname, bs2->key.ifname,
|
|
|
|
sizeof(bs1->key.ifname)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hash public interface / exported functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Lookup functions. */
|
|
|
|
struct bfd_session *bfd_id_lookup(uint32_t id)
|
|
|
|
{
|
|
|
|
struct bfd_session bs;
|
|
|
|
|
|
|
|
bs.discrs.my_discr = id;
|
|
|
|
|
|
|
|
return hash_lookup(bfd_id_hash, &bs);
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
struct bfd_session *bfd_key_lookup(struct bfd_key key)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
struct bfd_session bs;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
bs.key = key;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
return hash_lookup(bfd_key_hash, &bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete functions.
|
|
|
|
*
|
|
|
|
* Delete functions searches and remove the item from the hash and
|
|
|
|
* returns a pointer to the removed item data. If the item was not found
|
|
|
|
* then it returns NULL.
|
|
|
|
*
|
|
|
|
* The data stored inside the hash is not free()ed, so you must do it
|
|
|
|
* manually after getting the pointer back.
|
|
|
|
*/
|
|
|
|
struct bfd_session *bfd_id_delete(uint32_t id)
|
|
|
|
{
|
|
|
|
struct bfd_session bs;
|
|
|
|
|
|
|
|
bs.discrs.my_discr = id;
|
|
|
|
|
|
|
|
return hash_release(bfd_id_hash, &bs);
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
struct bfd_session *bfd_key_delete(struct bfd_key key)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
struct bfd_session bs;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
bs.key = key;
|
2018-06-27 16:29:02 +02:00
|
|
|
|
bfdd: fix session lookup
BFD key has optional fields "local" and "ifname" which can be empty when
the BFD session is created. In this case, the hash key will be calculated
with these fields filled with zeroes.
Later, when we're looking for the BFD session using the key with fields
"local" and "ifname" populated with actual values, the hash key will be
different. To work around this issue, we're doing multiple hash lookups,
first with full key, then with fields "local" and "ifname" filled with
zeroes.
But there may be another case when the initial key has the actual values
for "local" and "ifname", but the key we're using for lookup has empty
values. This case is covered for IPv4 by using additional hash walk with
bfd_key_lookup_ignore_partial_walker function but is not covered for IPv6.
Instead of introducing more hacks and workarounds, the following solution
is proposed:
- the hash key is always calculated in bfd_key_hash_do using only
required fields
- the hash data is compared in bfd_key_hash_cmp, taking into account the
fact that fields "local" and "ifname" may be empty
Using this solution, it's enough to make only one hash lookup.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-02-02 23:02:15 +01:00
|
|
|
return hash_release(bfd_key_hash, &bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Iteration functions. */
|
|
|
|
void bfd_id_iterate(hash_iter_func hif, void *arg)
|
|
|
|
{
|
|
|
|
hash_iterate(bfd_id_hash, hif, arg);
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
void bfd_key_iterate(hash_iter_func hif, void *arg)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-03-11 19:09:15 +01:00
|
|
|
hash_iterate(bfd_key_hash, hif, arg);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert functions.
|
|
|
|
*
|
|
|
|
* Inserts session into hash and returns `true` on success, otherwise
|
|
|
|
* `false`.
|
|
|
|
*/
|
|
|
|
bool bfd_id_insert(struct bfd_session *bs)
|
|
|
|
{
|
|
|
|
return (hash_get(bfd_id_hash, bs, hash_alloc_intern) == bs);
|
|
|
|
}
|
|
|
|
|
2019-03-11 19:09:15 +01:00
|
|
|
bool bfd_key_insert(struct bfd_session *bs)
|
2018-06-27 16:29:02 +02:00
|
|
|
{
|
2019-03-11 19:09:15 +01:00
|
|
|
return (hash_get(bfd_key_hash, bs, hash_alloc_intern) == bs);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_initialize(void)
|
|
|
|
{
|
|
|
|
bfd_id_hash = hash_create(bfd_id_hash_do, bfd_id_hash_cmp,
|
2019-03-11 19:09:15 +01:00
|
|
|
"BFD session discriminator hash");
|
|
|
|
bfd_key_hash = hash_create(bfd_key_hash_do, bfd_key_hash_cmp,
|
|
|
|
"BFD session hash");
|
2020-05-15 22:38:04 +02:00
|
|
|
TAILQ_INIT(&bplist);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
|
|
|
|
2019-02-19 16:46:52 +01:00
|
|
|
static void _bfd_free(struct hash_bucket *hb,
|
2018-06-27 16:29:02 +02:00
|
|
|
void *arg __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
struct bfd_session *bs = hb->data;
|
|
|
|
|
|
|
|
bfd_session_free(bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_shutdown(void)
|
|
|
|
{
|
2020-05-20 20:09:12 +02:00
|
|
|
struct bfd_profile *bp;
|
|
|
|
|
2018-06-27 16:29:02 +02:00
|
|
|
/*
|
|
|
|
* Close and free all BFD sessions.
|
|
|
|
*
|
|
|
|
* _bfd_free() will call bfd_session_free() which will take care
|
|
|
|
* of removing the session from all hashes, so we just run an
|
|
|
|
* assert() here to make sure it really happened.
|
|
|
|
*/
|
|
|
|
bfd_id_iterate(_bfd_free, NULL);
|
2019-03-11 19:09:15 +01:00
|
|
|
assert(bfd_key_hash->count == 0);
|
2018-06-27 16:29:02 +02:00
|
|
|
|
|
|
|
/* Now free the hashes themselves. */
|
|
|
|
hash_free(bfd_id_hash);
|
2019-03-11 19:09:15 +01:00
|
|
|
hash_free(bfd_key_hash);
|
2020-05-20 20:09:12 +02:00
|
|
|
|
|
|
|
/* Free all profile allocations. */
|
|
|
|
while ((bp = TAILQ_FIRST(&bplist)) != NULL)
|
|
|
|
bfd_profile_free(bp);
|
2018-06-27 16:29:02 +02:00
|
|
|
}
|
2019-03-25 17:57:36 +01:00
|
|
|
|
2019-05-23 21:09:24 +02:00
|
|
|
struct bfd_session_iterator {
|
|
|
|
int bsi_stop;
|
|
|
|
bool bsi_mhop;
|
|
|
|
const struct bfd_session *bsi_bs;
|
|
|
|
};
|
|
|
|
|
2019-07-02 18:05:27 +02:00
|
|
|
static int _bfd_session_next(struct hash_bucket *hb, void *arg)
|
2019-05-23 21:09:24 +02:00
|
|
|
{
|
|
|
|
struct bfd_session_iterator *bsi = arg;
|
|
|
|
struct bfd_session *bs = hb->data;
|
|
|
|
|
|
|
|
/* Previous entry signaled stop. */
|
|
|
|
if (bsi->bsi_stop == 1) {
|
|
|
|
/* Match the single/multi hop sessions. */
|
|
|
|
if (bs->key.mhop != bsi->bsi_mhop)
|
|
|
|
return HASHWALK_CONTINUE;
|
|
|
|
|
|
|
|
bsi->bsi_bs = bs;
|
|
|
|
return HASHWALK_ABORT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We found the current item, stop in the next one. */
|
|
|
|
if (bsi->bsi_bs == hb->data) {
|
|
|
|
bsi->bsi_stop = 1;
|
|
|
|
/* Set entry to NULL to signal end of list. */
|
|
|
|
bsi->bsi_bs = NULL;
|
|
|
|
} else if (bsi->bsi_bs == NULL && bsi->bsi_mhop == bs->key.mhop) {
|
|
|
|
/* We want the first list item. */
|
|
|
|
bsi->bsi_stop = 1;
|
|
|
|
bsi->bsi_bs = hb->data;
|
|
|
|
return HASHWALK_ABORT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return HASHWALK_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bfd_session_next: uses the current session to find the next.
|
|
|
|
*
|
|
|
|
* `bs` might point to NULL to get the first item of the data structure.
|
|
|
|
*/
|
|
|
|
const struct bfd_session *bfd_session_next(const struct bfd_session *bs,
|
|
|
|
bool mhop)
|
|
|
|
{
|
|
|
|
struct bfd_session_iterator bsi;
|
|
|
|
|
|
|
|
bsi.bsi_stop = 0;
|
|
|
|
bsi.bsi_bs = bs;
|
|
|
|
bsi.bsi_mhop = mhop;
|
|
|
|
hash_walk(bfd_key_hash, _bfd_session_next, &bsi);
|
|
|
|
if (bsi.bsi_stop == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return bsi.bsi_bs;
|
|
|
|
}
|
|
|
|
|
2019-07-02 18:05:27 +02:00
|
|
|
static void _bfd_session_remove_manual(struct hash_bucket *hb,
|
|
|
|
void *arg __attribute__((__unused__)))
|
2019-05-31 21:48:25 +02:00
|
|
|
{
|
|
|
|
struct bfd_session *bs = hb->data;
|
|
|
|
|
|
|
|
/* Delete only manually configured sessions. */
|
2020-04-09 21:52:49 +02:00
|
|
|
if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG) == 0)
|
2019-05-31 21:48:25 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
bs->refcount--;
|
2020-04-09 21:52:49 +02:00
|
|
|
UNSET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
|
2019-05-31 21:48:25 +02:00
|
|
|
|
|
|
|
/* Don't delete sessions still in use. */
|
|
|
|
if (bs->refcount != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bfd_session_free(bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bfd_sessions_remove_manual: remove all manually configured sessions.
|
|
|
|
*
|
|
|
|
* NOTE: this function doesn't remove automatically created sessions.
|
|
|
|
*/
|
|
|
|
void bfd_sessions_remove_manual(void)
|
|
|
|
{
|
|
|
|
hash_iterate(bfd_key_hash, _bfd_session_remove_manual, NULL);
|
|
|
|
}
|
|
|
|
|
2021-04-01 14:29:18 +02:00
|
|
|
void bfd_profiles_remove(void)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp;
|
|
|
|
|
|
|
|
while ((bp = TAILQ_FIRST(&bplist)) != NULL)
|
|
|
|
bfd_profile_free(bp);
|
|
|
|
}
|
|
|
|
|
2020-05-15 22:38:04 +02:00
|
|
|
/*
|
|
|
|
* Profile related hash functions.
|
|
|
|
*/
|
|
|
|
static void _bfd_profile_update(struct hash_bucket *hb, void *arg)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp = arg;
|
|
|
|
struct bfd_session *bs = hb->data;
|
|
|
|
|
|
|
|
/* This session is not using the profile. */
|
|
|
|
if (bs->profile_name == NULL || strcmp(bs->profile_name, bp->name) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bfd_profile_apply(bp->name, bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_profile_update(struct bfd_profile *bp)
|
|
|
|
{
|
|
|
|
hash_iterate(bfd_key_hash, _bfd_profile_update, bp);
|
|
|
|
}
|
|
|
|
|
2020-05-20 20:09:12 +02:00
|
|
|
static void _bfd_profile_detach(struct hash_bucket *hb, void *arg)
|
|
|
|
{
|
|
|
|
struct bfd_profile *bp = arg;
|
|
|
|
struct bfd_session *bs = hb->data;
|
|
|
|
|
|
|
|
/* This session is not using the profile. */
|
|
|
|
if (bs->profile_name == NULL || strcmp(bs->profile_name, bp->name) != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bfd_profile_remove(bs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bfd_profile_detach(struct bfd_profile *bp)
|
|
|
|
{
|
|
|
|
hash_iterate(bfd_key_hash, _bfd_profile_detach, bp);
|
|
|
|
}
|
|
|
|
|
2019-05-23 20:22:08 +02:00
|
|
|
/*
|
|
|
|
* VRF related functions.
|
|
|
|
*/
|
2019-03-25 17:57:36 +01:00
|
|
|
static int bfd_vrf_new(struct vrf *vrf)
|
|
|
|
{
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_zebra)
|
|
|
|
zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
|
|
|
|
|
2019-03-25 17:57:36 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bfd_vrf_delete(struct vrf *vrf)
|
|
|
|
{
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_zebra)
|
|
|
|
zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
|
|
|
|
|
2019-03-25 17:57:36 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bfd_vrf_enable(struct vrf *vrf)
|
|
|
|
{
|
2019-03-26 10:23:18 +01:00
|
|
|
struct bfd_vrf_global *bvrf;
|
|
|
|
|
|
|
|
/* a different name */
|
|
|
|
if (!vrf->info) {
|
|
|
|
bvrf = XCALLOC(MTYPE_BFDD_VRF, sizeof(struct bfd_vrf_global));
|
|
|
|
bvrf->vrf = vrf;
|
|
|
|
vrf->info = (void *)bvrf;
|
2020-08-18 17:36:56 +02:00
|
|
|
|
|
|
|
/* Disable sockets if using data plane. */
|
|
|
|
if (bglobal.bg_use_dplane) {
|
|
|
|
bvrf->bg_shop = -1;
|
|
|
|
bvrf->bg_mhop = -1;
|
|
|
|
bvrf->bg_shop6 = -1;
|
|
|
|
bvrf->bg_mhop6 = -1;
|
|
|
|
bvrf->bg_echo = -1;
|
|
|
|
bvrf->bg_echov6 = -1;
|
|
|
|
}
|
2019-03-26 10:23:18 +01:00
|
|
|
} else
|
|
|
|
bvrf = vrf->info;
|
2020-04-13 12:36:23 +02:00
|
|
|
|
|
|
|
if (bglobal.debug_zebra)
|
|
|
|
zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id);
|
|
|
|
|
bfdd: allow l3vrf bfd sessions without udp leaking
Until now, when in vrf-lite mode, the BFD implementation
creates a single UDP socket and relies on the following
sysctl value to 1:
echo 1 > /proc/sys/net/ipv4/udp_l3mdev_accept
With this setting, the incoming BFD packets from a given
vrf, would leak to the default vrf, and would match the
UDP socket.
The drawback of this solution is that udp packets received
on a given vrf may leak to an other vrf. This may be a
security concern.
The commit addresses this issue by avoiding this leak
mechanism. An UDP socket is created for each vrf, and each
socket uses new setsockopt option: SO_REUSEADDR + SO_REUSEPORT.
With this option, the incoming UDP packets are distributed on
the available sockets. The impact of those options with l3mdev
devices is unknown. It has been observed that this option is not
needed, until the default vrf sockets are created.
To ensure the BFD packets are correctly routed to the appropriate
socket, a BPF filter has been put in place and attached to the
sockets : SO_ATTACH_REUSEPORT_CBPF. This option adds a criterium
to force the packet to choose a given socket. If initial criteria
from the default distribution algorithm were not good, at least
two sockets would be available, and the CBPF would force the
selection to the same socket. This would come to the situation
where an incoming packet would be processed on a different vrf.
The bpf code is the following one:
struct sock_filter code[] = {
{ BPF_RET | BPF_K, 0, 0, 0 },
};
struct sock_fprog p = {
.len = sizeof(code)/sizeof(struct sock_filter),
.filter = code,
};
if (setsockopt(sd, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF, &p, sizeof(p))) {
zlog_warn("unable to set SO_ATTACH_REUSEPORT_CBPF on socket: %s",
strerror(errno));
return -1;
}
Some tests have been done with by creating vrf contexts, and by using
the below vtysh configuration:
ip route 2.2.2.2/32 10.126.0.2
vrf vrf2
ip route 2.2.2.2/32 10.126.0.2
!
interface ntfp2
ip address 10.126.0.1/24
!
interface ntfp3 vrf vrf4
ip address 10.126.0.1/24
!
interface ntfp2 vrf vrf1
ip address 10.126.0.1/24
!
interface ntfp2.100 vrf vrf2
ip address 10.126.0.1/24
!
interface ntfp2.200 vrf vrf3
ip address 10.126.0.1/24
!
line vty
!
bfd
peer 10.126.0.2 vrf vrf2
!
peer 10.126.0.2 vrf vrf3
!
peer 10.126.0.2
!
peer 10.126.0.2 vrf vrf4
!
peer 2.2.2.2 multihop local-address 1.1.1.1
!
peer 2.2.2.2 multihop local-address 1.1.1.1 vrf vrf2
transmit-interval 1500
receive-interval 1500
!
The results showed no issue related to packets received by
the wrong vrf. Even changing the udp_l3mdev_accept flag to
1 did not change the test results.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2022-07-07 14:33:48 +02:00
|
|
|
if (!bvrf->bg_shop)
|
|
|
|
bvrf->bg_shop = bp_udp_shop(vrf);
|
|
|
|
if (!bvrf->bg_mhop)
|
|
|
|
bvrf->bg_mhop = bp_udp_mhop(vrf);
|
|
|
|
if (!bvrf->bg_shop6)
|
|
|
|
bvrf->bg_shop6 = bp_udp6_shop(vrf);
|
|
|
|
if (!bvrf->bg_mhop6)
|
|
|
|
bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
|
|
|
|
if (!bvrf->bg_echo)
|
|
|
|
bvrf->bg_echo = bp_echo_socket(vrf);
|
|
|
|
if (!bvrf->bg_echov6)
|
|
|
|
bvrf->bg_echov6 = bp_echov6_socket(vrf);
|
|
|
|
|
|
|
|
if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
|
|
|
|
&bvrf->bg_ev[0]);
|
|
|
|
if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
|
|
|
|
&bvrf->bg_ev[1]);
|
|
|
|
if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
|
|
|
|
&bvrf->bg_ev[2]);
|
|
|
|
if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
|
|
|
|
&bvrf->bg_ev[3]);
|
|
|
|
if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
|
|
|
|
&bvrf->bg_ev[4]);
|
|
|
|
if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
|
|
|
|
thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
|
|
|
|
&bvrf->bg_ev[5]);
|
|
|
|
|
2019-03-26 14:48:13 +01:00
|
|
|
if (vrf->vrf_id != VRF_DEFAULT) {
|
2019-03-26 12:13:38 +01:00
|
|
|
bfdd_zclient_register(vrf->vrf_id);
|
2019-03-26 14:48:13 +01:00
|
|
|
bfdd_sessions_enable_vrf(vrf);
|
|
|
|
}
|
2019-03-25 17:57:36 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bfd_vrf_disable(struct vrf *vrf)
|
|
|
|
{
|
2019-03-26 10:23:18 +01:00
|
|
|
struct bfd_vrf_global *bvrf;
|
|
|
|
|
|
|
|
if (!vrf->info)
|
2019-03-25 17:57:36 +01:00
|
|
|
return 0;
|
2019-03-26 10:23:18 +01:00
|
|
|
bvrf = vrf->info;
|
2019-03-26 12:13:38 +01:00
|
|
|
|
2019-03-26 14:48:13 +01:00
|
|
|
if (vrf->vrf_id != VRF_DEFAULT) {
|
|
|
|
bfdd_sessions_disable_vrf(vrf);
|
2019-03-26 12:13:38 +01:00
|
|
|
bfdd_zclient_unregister(vrf->vrf_id);
|
2019-03-26 14:48:13 +01:00
|
|
|
}
|
2019-03-26 12:13:38 +01:00
|
|
|
|
2020-04-13 12:36:23 +02:00
|
|
|
if (bglobal.debug_zebra)
|
|
|
|
zlog_debug("VRF disable %s id %d", vrf->name, vrf->vrf_id);
|
2019-10-11 16:15:56 +02:00
|
|
|
|
|
|
|
/* Disable read/write poll triggering. */
|
|
|
|
THREAD_OFF(bvrf->bg_ev[0]);
|
|
|
|
THREAD_OFF(bvrf->bg_ev[1]);
|
|
|
|
THREAD_OFF(bvrf->bg_ev[2]);
|
|
|
|
THREAD_OFF(bvrf->bg_ev[3]);
|
|
|
|
THREAD_OFF(bvrf->bg_ev[4]);
|
|
|
|
THREAD_OFF(bvrf->bg_ev[5]);
|
|
|
|
|
2019-03-26 10:23:18 +01:00
|
|
|
/* Close all descriptors. */
|
|
|
|
socket_close(&bvrf->bg_echo);
|
|
|
|
socket_close(&bvrf->bg_shop);
|
|
|
|
socket_close(&bvrf->bg_mhop);
|
2020-08-05 19:10:27 +02:00
|
|
|
if (bvrf->bg_shop6 != -1)
|
|
|
|
socket_close(&bvrf->bg_shop6);
|
|
|
|
if (bvrf->bg_mhop6 != -1)
|
|
|
|
socket_close(&bvrf->bg_mhop6);
|
2019-09-03 16:53:14 +02:00
|
|
|
socket_close(&bvrf->bg_echo);
|
2020-08-05 19:10:27 +02:00
|
|
|
if (bvrf->bg_echov6 != -1)
|
|
|
|
socket_close(&bvrf->bg_echov6);
|
2019-03-26 10:23:18 +01:00
|
|
|
|
|
|
|
/* free context */
|
|
|
|
XFREE(MTYPE_BFDD_VRF, bvrf);
|
|
|
|
vrf->info = NULL;
|
|
|
|
|
2019-03-25 17:57:36 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bfd_vrf_init(void)
|
|
|
|
{
|
*: rework renaming the default VRF
Currently, it is possible to rename the default VRF either by passing
`-o` option to zebra or by creating a file in `/var/run/netns` and
binding it to `/proc/self/ns/net`.
In both cases, only zebra knows about the rename and other daemons learn
about it only after they connect to zebra. This is a problem, because
daemons may read their config before they connect to zebra. To handle
this rename after the config is read, we have some special code in every
single daemon, which is not very bad but not desirable in my opinion.
But things are getting worse when we need to handle this in northbound
layer as we have to manually rewrite the config nodes. This approach is
already hacky, but still works as every daemon handles its own NB
structures. But it is completely incompatible with the central
management daemon architecture we are aiming for, as mgmtd doesn't even
have a connection with zebra to learn from it. And it shouldn't have it,
because operational state changes should never affect configuration.
To solve the problem and simplify the code, I propose to expand the `-o`
option to all daemons. By using the startup option, we let daemons know
about the rename before they read their configs so we don't need any
special code to deal with it. There's an easy way to pass the option to
all daemons by using `frr_global_options` variable.
Unfortunately, the second way of renaming by creating a file in
`/var/run/netns` is incompatible with the new mgmtd architecture.
Theoretically, we could force daemons to read their configs only after
they connect to zebra, but it means adding even more code to handle a
very specific use-case. And anyway this won't work for mgmtd as it
doesn't have a connection with zebra. So I had to remove this option.
Signed-off-by: Igor Ryzhov <iryzhov@nfware.com>
2021-12-03 23:22:55 +01:00
|
|
|
vrf_init(bfd_vrf_new, bfd_vrf_enable, bfd_vrf_disable, bfd_vrf_delete);
|
2019-03-25 17:57:36 +01:00
|
|
|
}
|
|
|
|
|
2019-03-26 10:23:18 +01:00
|
|
|
void bfd_vrf_terminate(void)
|
|
|
|
{
|
|
|
|
vrf_terminate();
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bfd_vrf_global *bfd_vrf_look_by_session(struct bfd_session *bfd)
|
|
|
|
{
|
|
|
|
struct vrf *vrf;
|
|
|
|
|
|
|
|
if (!vrf_is_backend_netns()) {
|
|
|
|
vrf = vrf_lookup_by_id(VRF_DEFAULT);
|
|
|
|
if (vrf)
|
|
|
|
return (struct bfd_vrf_global *)vrf->info;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (!bfd)
|
|
|
|
return NULL;
|
|
|
|
if (!bfd->vrf)
|
|
|
|
return NULL;
|
|
|
|
return bfd->vrf->info;
|
|
|
|
}
|
2019-06-19 17:48:32 +02:00
|
|
|
|
2019-09-19 09:35:17 +02:00
|
|
|
unsigned long bfd_get_session_count(void)
|
|
|
|
{
|
|
|
|
return bfd_key_hash->count;
|
|
|
|
}
|
2022-07-26 01:02:46 +02:00
|
|
|
|
|
|
|
void bfd_rtt_init(struct bfd_session *bfd)
|
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
/* initialize RTT */
|
|
|
|
bfd->rtt_valid = 0;
|
|
|
|
bfd->rtt_index = 0;
|
|
|
|
for (i = 0; i < BFD_RTT_SAMPLE; i++)
|
|
|
|
bfd->rtt[i] = 0;
|
|
|
|
}
|