2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-05-20 02:40:44 +02:00
|
|
|
/* Kernel routing table updates using netlink over GNU/Linux system.
|
|
|
|
* Copyright (C) 1997, 98, 99 Kunihiro Ishiguro
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <zebra.h>
|
2018-06-19 20:29:05 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
#include <sys/un.h> /* for sockaddr_un */
|
|
|
|
#include <net/if.h>
|
2018-06-19 20:29:05 +02:00
|
|
|
|
|
|
|
#include "bfd.h"
|
|
|
|
#include "buffer.h"
|
|
|
|
#include "command.h"
|
|
|
|
#include "if.h"
|
|
|
|
#include "network.h"
|
|
|
|
#include "ptm_lib.h"
|
|
|
|
#include "rib.h"
|
|
|
|
#include "stream.h"
|
2023-05-17 18:47:23 +02:00
|
|
|
#include "lib/version.h"
|
2018-06-19 20:29:05 +02:00
|
|
|
#include "vrf.h"
|
2015-07-22 22:07:08 +02:00
|
|
|
#include "vty.h"
|
2018-08-24 19:14:09 +02:00
|
|
|
#include "lib_errors.h"
|
2018-06-19 20:29:05 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
#include "zebra/debug.h"
|
2018-06-19 20:29:05 +02:00
|
|
|
#include "zebra/interface.h"
|
|
|
|
#include "zebra/zebra_errors.h"
|
2015-05-20 02:40:44 +02:00
|
|
|
#include "zebra/zebra_ptm.h"
|
2015-06-12 16:59:11 +02:00
|
|
|
#include "zebra/zebra_ptm_redistribute.h"
|
2019-01-11 19:38:19 +01:00
|
|
|
#include "zebra/zebra_router.h"
|
2016-04-14 15:20:47 +02:00
|
|
|
#include "zebra_vrf.h"
|
2015-05-20 02:40:44 +02:00
|
|
|
|
2018-06-27 18:40:50 +02:00
|
|
|
/*
|
|
|
|
* Choose the BFD implementation that we'll use.
|
|
|
|
*
|
|
|
|
* There are two implementations:
|
|
|
|
* - PTM BFD: which uses an external daemon;
|
|
|
|
* - bfdd: FRR's own BFD daemon;
|
|
|
|
*/
|
|
|
|
#if HAVE_BFDD == 0
|
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
#define ZEBRA_PTM_RECONNECT_TIME_INITIAL 1 /* initial reconnect is 1s */
|
|
|
|
#define ZEBRA_PTM_RECONNECT_TIME_MAX 300
|
2015-05-20 02:47:24 +02:00
|
|
|
|
|
|
|
#define PTM_MSG_LEN 4
|
2015-05-20 03:04:22 +02:00
|
|
|
#define PTM_HEADER_LEN 37
|
2015-06-12 16:59:11 +02:00
|
|
|
|
|
|
|
const char ZEBRA_PTM_GET_STATUS_CMD[] = "get-status";
|
|
|
|
const char ZEBRA_PTM_BFD_START_CMD[] = "start-bfd-sess";
|
|
|
|
const char ZEBRA_PTM_BFD_STOP_CMD[] = "stop-bfd-sess";
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
const char ZEBRA_PTM_BFD_CLIENT_REG_CMD[] = "reg-bfd-client";
|
2016-05-09 05:11:18 +02:00
|
|
|
const char ZEBRA_PTM_BFD_CLIENT_DEREG_CMD[] = "dereg-bfd-client";
|
2015-06-12 16:59:11 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
const char ZEBRA_PTM_CMD_STR[] = "cmd";
|
2015-08-26 21:37:46 +02:00
|
|
|
const char ZEBRA_PTM_CMD_STATUS_STR[] = "cmd_status";
|
2015-06-12 16:59:11 +02:00
|
|
|
const char ZEBRA_PTM_PORT_STR[] = "port";
|
|
|
|
const char ZEBRA_PTM_CBL_STR[] = "cbl status";
|
|
|
|
const char ZEBRA_PTM_PASS_STR[] = "pass";
|
|
|
|
const char ZEBRA_PTM_FAIL_STR[] = "fail";
|
|
|
|
const char ZEBRA_PTM_BFDSTATUS_STR[] = "state";
|
|
|
|
const char ZEBRA_PTM_BFDSTATUS_UP_STR[] = "Up";
|
|
|
|
const char ZEBRA_PTM_BFDSTATUS_DOWN_STR[] = "Down";
|
|
|
|
const char ZEBRA_PTM_BFDDEST_STR[] = "peer";
|
|
|
|
const char ZEBRA_PTM_BFDSRC_STR[] = "local";
|
2016-03-08 14:10:56 +01:00
|
|
|
const char ZEBRA_PTM_BFDVRF_STR[] = "vrf";
|
2015-06-12 16:59:11 +02:00
|
|
|
const char ZEBRA_PTM_INVALID_PORT_NAME[] = "N/A";
|
|
|
|
const char ZEBRA_PTM_INVALID_SRC_IP[] = "N/A";
|
2016-03-08 14:10:56 +01:00
|
|
|
const char ZEBRA_PTM_INVALID_VRF[] = "N/A";
|
2015-06-12 16:59:11 +02:00
|
|
|
|
|
|
|
const char ZEBRA_PTM_BFD_DST_IP_FIELD[] = "dstIPaddr";
|
|
|
|
const char ZEBRA_PTM_BFD_SRC_IP_FIELD[] = "srcIPaddr";
|
|
|
|
const char ZEBRA_PTM_BFD_MIN_RX_FIELD[] = "requiredMinRx";
|
|
|
|
const char ZEBRA_PTM_BFD_MIN_TX_FIELD[] = "upMinTx";
|
|
|
|
const char ZEBRA_PTM_BFD_DETECT_MULT_FIELD[] = "detectMult";
|
|
|
|
const char ZEBRA_PTM_BFD_MULTI_HOP_FIELD[] = "multiHop";
|
|
|
|
const char ZEBRA_PTM_BFD_CLIENT_FIELD[] = "client";
|
|
|
|
const char ZEBRA_PTM_BFD_SEQID_FIELD[] = "seqid";
|
|
|
|
const char ZEBRA_PTM_BFD_IFNAME_FIELD[] = "ifName";
|
|
|
|
const char ZEBRA_PTM_BFD_MAX_HOP_CNT_FIELD[] = "maxHopCnt";
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
const char ZEBRA_PTM_BFD_SEND_EVENT[] = "sendEvent";
|
2016-03-08 14:10:56 +01:00
|
|
|
const char ZEBRA_PTM_BFD_VRF_NAME_FIELD[] = "vrfName";
|
bfdd, lib, bgpd: add bfd cbit usage
bfd cbit is a value carried out in bfd messages, that permit to keep or
not, the independence between control plane and dataplane. In other
words, while most of the cases plan to flush entries, when bfd goes
down, there are some cases where that bfd event should be ignored. this
is the case with non stop forwarding mechanisms where entries may be
kept. this is the case for BGP, when graceful restart capability is
used. If BFD event down happens, and bgp is in graceful restart mode, it
is wished to ignore the BFD event while waiting for the remote router to
restart.
The changes take into account the following:
- add a config flag across zebra layer so that daemon can set or not the
cbit capability.
- ability for daemons to read the remote bfd capability associated to a bfd
notification.
- in bfdd, according to the value, the cbit value is set
- in bfdd, the received value is retrived and stored in the bfd session
context.
- by default, the local cbit announced to remote is set to 1 while
preservation of the local path is not set.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2019-04-15 17:20:25 +02:00
|
|
|
const char ZEBRA_PTM_BFD_CBIT_FIELD[] = "bfdcbit";
|
2015-05-20 02:47:24 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
static ptm_lib_handle_t *ptm_hdl;
|
2015-05-20 02:40:44 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
struct zebra_ptm_cb ptm_cb;
|
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
static int zebra_ptm_socket_init(void);
|
2022-03-01 22:18:12 +01:00
|
|
|
void zebra_ptm_sock_read(struct event *thread);
|
2015-05-20 02:40:44 +02:00
|
|
|
static void zebra_ptm_install_commands(void);
|
2015-08-26 21:37:46 +02:00
|
|
|
static int zebra_ptm_handle_msg_cb(void *arg, void *in_ctxt);
|
2015-06-12 16:59:11 +02:00
|
|
|
void zebra_bfd_peer_replay_req(void);
|
2015-10-09 20:18:09 +02:00
|
|
|
void zebra_ptm_send_status_req(void);
|
|
|
|
void zebra_ptm_reset_status(int ptm_disable);
|
2018-04-23 04:32:49 +02:00
|
|
|
static int zebra_ptm_bfd_client_deregister(struct zserv *client);
|
2015-05-20 02:40:44 +02:00
|
|
|
|
|
|
|
const char ZEBRA_PTM_SOCK_NAME[] = "\0/var/run/ptmd.socket";
|
|
|
|
|
|
|
|
void zebra_ptm_init(void)
|
|
|
|
{
|
2015-06-12 16:59:11 +02:00
|
|
|
char buf[64];
|
|
|
|
|
2022-05-11 12:16:44 +02:00
|
|
|
memset(&ptm_cb, 0, sizeof(ptm_cb));
|
2015-07-22 22:07:08 +02:00
|
|
|
|
|
|
|
ptm_cb.out_data = calloc(1, ZEBRA_PTM_SEND_MAX_SOCKBUF);
|
|
|
|
if (!ptm_cb.out_data) {
|
2018-08-16 22:10:32 +02:00
|
|
|
zlog_debug("%s: Allocation of send data failed", __func__);
|
2015-07-22 22:07:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ptm_cb.in_data = calloc(1, ZEBRA_PTM_MAX_SOCKBUF);
|
|
|
|
if (!ptm_cb.in_data) {
|
2018-08-16 22:10:32 +02:00
|
|
|
zlog_debug("%s: Allocation of recv data failed", __func__);
|
2015-07-22 22:07:08 +02:00
|
|
|
free(ptm_cb.out_data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ptm_cb.pid = getpid();
|
2015-05-20 02:40:44 +02:00
|
|
|
zebra_ptm_install_commands();
|
2015-06-12 16:59:11 +02:00
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s", FRR_PTM_NAME);
|
2015-08-26 21:37:46 +02:00
|
|
|
ptm_hdl = ptm_lib_register(buf, NULL, zebra_ptm_handle_msg_cb,
|
|
|
|
zebra_ptm_handle_msg_cb);
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_cb.wb = buffer_new(0);
|
|
|
|
|
|
|
|
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
|
|
|
|
|
|
|
|
ptm_cb.ptm_sock = -1;
|
2018-04-22 23:03:52 +02:00
|
|
|
|
2018-04-24 00:35:35 +02:00
|
|
|
hook_register(zserv_client_close, zebra_ptm_bfd_client_deregister);
|
2015-07-22 22:07:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_finish(void)
|
|
|
|
{
|
2016-06-21 12:39:58 +02:00
|
|
|
buffer_flush_all(ptm_cb.wb, ptm_cb.ptm_sock);
|
2015-07-22 22:07:08 +02:00
|
|
|
|
2016-11-01 21:57:53 +01:00
|
|
|
free(ptm_hdl);
|
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.out_data)
|
|
|
|
free(ptm_cb.out_data);
|
|
|
|
|
|
|
|
if (ptm_cb.in_data)
|
|
|
|
free(ptm_cb.in_data);
|
|
|
|
|
2020-07-06 18:55:03 +02:00
|
|
|
/* Cancel events. */
|
2022-12-25 16:26:52 +01:00
|
|
|
EVENT_OFF(ptm_cb.t_read);
|
|
|
|
EVENT_OFF(ptm_cb.t_write);
|
|
|
|
EVENT_OFF(ptm_cb.t_timer);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
|
|
|
if (ptm_cb.wb)
|
|
|
|
buffer_free(ptm_cb.wb);
|
|
|
|
|
2017-08-25 14:07:58 +02:00
|
|
|
if (ptm_cb.ptm_sock >= 0)
|
2016-06-21 12:39:58 +02:00
|
|
|
close(ptm_cb.ptm_sock);
|
2015-07-22 22:07:08 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
static void zebra_ptm_flush_messages(struct event *thread)
|
2015-07-22 22:07:08 +02:00
|
|
|
{
|
|
|
|
ptm_cb.t_write = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_sock == -1)
|
2022-02-23 01:04:25 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
errno = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
switch (buffer_flush_available(ptm_cb.wb, ptm_cb.ptm_sock)) {
|
|
|
|
case BUFFER_ERROR:
|
2018-09-13 21:38:57 +02:00
|
|
|
flog_err_sys(EC_LIB_SOCKET, "%s ptm socket error: %s", __func__,
|
|
|
|
safe_strerror(errno));
|
2015-07-22 22:07:08 +02:00
|
|
|
close(ptm_cb.ptm_sock);
|
|
|
|
ptm_cb.ptm_sock = -1;
|
2015-10-09 20:18:09 +02:00
|
|
|
zebra_ptm_reset_status(0);
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2022-02-23 01:04:25 +01:00
|
|
|
return;
|
2015-07-22 22:07:08 +02:00
|
|
|
case BUFFER_PENDING:
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_write = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
|
|
|
|
ptm_cb.ptm_sock, &ptm_cb.t_write);
|
2015-07-22 22:07:08 +02:00
|
|
|
break;
|
|
|
|
case BUFFER_EMPTY:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int zebra_ptm_send_message(char *data, int size)
|
|
|
|
{
|
|
|
|
errno = 0;
|
|
|
|
switch (buffer_write(ptm_cb.wb, ptm_cb.ptm_sock, data, size)) {
|
|
|
|
case BUFFER_ERROR:
|
2018-09-13 21:38:57 +02:00
|
|
|
flog_err_sys(EC_LIB_SOCKET, "%s ptm socket error: %s", __func__,
|
|
|
|
safe_strerror(errno));
|
2015-07-22 22:07:08 +02:00
|
|
|
close(ptm_cb.ptm_sock);
|
|
|
|
ptm_cb.ptm_sock = -1;
|
2015-10-09 20:18:09 +02:00
|
|
|
zebra_ptm_reset_status(0);
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2015-07-22 22:07:08 +02:00
|
|
|
return -1;
|
|
|
|
case BUFFER_EMPTY:
|
2022-12-25 16:26:52 +01:00
|
|
|
EVENT_OFF(ptm_cb.t_write);
|
2015-07-22 22:07:08 +02:00
|
|
|
break;
|
|
|
|
case BUFFER_PENDING:
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
|
|
|
|
ptm_cb.ptm_sock, &ptm_cb.t_write);
|
2015-07-22 22:07:08 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
return 0;
|
2015-05-20 02:40:44 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
void zebra_ptm_connect(struct event *t)
|
2015-05-20 02:40:44 +02:00
|
|
|
{
|
2015-06-12 16:59:11 +02:00
|
|
|
int init = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_sock == -1) {
|
2015-06-12 16:59:11 +02:00
|
|
|
zebra_ptm_socket_init();
|
|
|
|
init = 1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_sock != -1) {
|
2015-06-12 16:59:11 +02:00
|
|
|
if (init) {
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_read = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_read(zrouter.master, zebra_ptm_sock_read,
|
|
|
|
NULL, ptm_cb.ptm_sock, &ptm_cb.t_read);
|
2015-06-12 16:59:11 +02:00
|
|
|
zebra_bfd_peer_replay_req();
|
|
|
|
}
|
2015-10-09 20:18:09 +02:00
|
|
|
zebra_ptm_send_status_req();
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
|
2016-04-22 00:39:38 +02:00
|
|
|
} else if (ptm_cb.reconnect_time < ZEBRA_PTM_RECONNECT_TIME_MAX) {
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_cb.reconnect_time *= 2;
|
|
|
|
if (ptm_cb.reconnect_time > ZEBRA_PTM_RECONNECT_TIME_MAX)
|
|
|
|
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_MAX;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2016-04-22 00:39:38 +02:00
|
|
|
} else if (ptm_cb.reconnect_time >= ZEBRA_PTM_RECONNECT_TIME_MAX) {
|
|
|
|
ptm_cb.reconnect_time = ZEBRA_PTM_RECONNECT_TIME_INITIAL;
|
2015-05-20 02:40:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (zebra_ptm_enable,
|
|
|
|
zebra_ptm_enable_cmd,
|
|
|
|
"ptm-enable",
|
|
|
|
"Enable neighbor check with specified topology\n")
|
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-05-20 02:40:44 +02:00
|
|
|
struct interface *ifp;
|
2016-04-22 00:39:38 +02:00
|
|
|
struct zebra_if *if_data;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
ptm_cb.ptm_enable = ZEBRA_IF_PTM_ENABLE_ON;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp)
|
2017-09-15 17:47:35 +02:00
|
|
|
if (!ifp->ptm_enable) {
|
|
|
|
if_data = (struct zebra_if *)ifp->info;
|
|
|
|
if (if_data
|
|
|
|
&& (if_data->ptm_enable
|
2017-07-22 14:52:33 +02:00
|
|
|
== ZEBRA_IF_PTM_ENABLE_UNSPEC)) {
|
2017-09-15 17:47:35 +02:00
|
|
|
ifp->ptm_enable =
|
|
|
|
ZEBRA_IF_PTM_ENABLE_ON;
|
|
|
|
}
|
|
|
|
/* Assign a default unknown status */
|
|
|
|
ifp->ptm_status = ZEBRA_PTM_STATUS_UNKNOWN;
|
2016-04-22 00:39:38 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
zebra_ptm_connect(NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_zebra_ptm_enable,
|
|
|
|
no_zebra_ptm_enable_cmd,
|
|
|
|
"no ptm-enable",
|
|
|
|
NO_STR
|
|
|
|
"Enable neighbor check with specified topology\n")
|
|
|
|
{
|
2016-04-22 00:39:38 +02:00
|
|
|
ptm_cb.ptm_enable = ZEBRA_IF_PTM_ENABLE_OFF;
|
2015-10-09 20:18:09 +02:00
|
|
|
zebra_ptm_reset_status(1);
|
2015-05-20 02:40:44 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
DEFUN (zebra_ptm_enable_if,
|
|
|
|
zebra_ptm_enable_if_cmd,
|
|
|
|
"ptm-enable",
|
|
|
|
"Enable neighbor check with specified topology\n")
|
|
|
|
{
|
2016-09-30 15:38:03 +02:00
|
|
|
VTY_DECLVAR_CONTEXT(interface, ifp);
|
2016-04-22 00:39:38 +02:00
|
|
|
struct zebra_if *if_data;
|
|
|
|
int old_ptm_enable;
|
|
|
|
int send_linkdown = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-01 19:10:35 +02:00
|
|
|
if_data = ifp->info;
|
|
|
|
if_data->ptm_enable = ZEBRA_IF_PTM_ENABLE_UNSPEC;
|
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
if (ifp->ifindex == IFINDEX_INTERNAL) {
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
old_ptm_enable = ifp->ptm_enable;
|
|
|
|
ifp->ptm_enable = ptm_cb.ptm_enable;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
if (if_is_no_ptm_operative(ifp))
|
|
|
|
send_linkdown = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
if (!old_ptm_enable && ptm_cb.ptm_enable) {
|
|
|
|
if (!if_is_operative(ifp) && send_linkdown) {
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
2019-03-14 19:41:15 +01:00
|
|
|
zlog_debug("%s: Bringing down interface %s",
|
2016-04-22 00:39:38 +02:00
|
|
|
__func__, ifp->name);
|
|
|
|
if_down(ifp);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-04-22 00:39:38 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFUN (no_zebra_ptm_enable_if,
|
|
|
|
no_zebra_ptm_enable_if_cmd,
|
|
|
|
"no ptm-enable",
|
|
|
|
NO_STR
|
|
|
|
"Enable neighbor check with specified topology\n")
|
|
|
|
{
|
2016-09-30 15:38:03 +02:00
|
|
|
VTY_DECLVAR_CONTEXT(interface, ifp);
|
2016-04-22 00:39:38 +02:00
|
|
|
int send_linkup = 0;
|
|
|
|
struct zebra_if *if_data;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
if ((ifp->ifindex != IFINDEX_INTERNAL) && (ifp->ptm_enable)) {
|
|
|
|
if (!if_is_operative(ifp))
|
|
|
|
send_linkup = 1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
ifp->ptm_enable = ZEBRA_IF_PTM_ENABLE_OFF;
|
|
|
|
if (if_is_no_ptm_operative(ifp) && send_linkup) {
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
2019-03-14 19:41:15 +01:00
|
|
|
zlog_debug("%s: Bringing up interface %s",
|
2016-04-22 00:39:38 +02:00
|
|
|
__func__, ifp->name);
|
2020-10-02 20:49:09 +02:00
|
|
|
if_up(ifp, true);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2016-04-22 00:39:38 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
if_data = ifp->info;
|
|
|
|
if_data->ptm_enable = ZEBRA_IF_PTM_ENABLE_OFF;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-04-22 00:39:38 +02:00
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
void zebra_ptm_write(struct vty *vty)
|
|
|
|
{
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_enable)
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "ptm-enable\n");
|
2015-05-20 02:40:44 +02:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int zebra_ptm_socket_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int sock;
|
|
|
|
struct sockaddr_un addr;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_cb.ptm_sock = -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-08-04 15:07:33 +02:00
|
|
|
sock = socket(PF_UNIX, SOCK_STREAM, 0);
|
2015-05-20 02:40:44 +02:00
|
|
|
if (sock < 0)
|
|
|
|
return -1;
|
2016-08-04 15:07:33 +02:00
|
|
|
if (set_nonblocking(sock) < 0) {
|
2017-02-08 14:26:07 +01:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("%s: Unable to set socket non blocking[%s]",
|
2020-03-05 19:17:54 +01:00
|
|
|
__func__, safe_strerror(errno));
|
2017-02-08 14:26:07 +01:00
|
|
|
close(sock);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
/* Make server socket. */
|
2022-05-11 12:16:44 +02:00
|
|
|
memset(&addr, 0, sizeof(addr));
|
2015-05-20 02:40:44 +02:00
|
|
|
addr.sun_family = AF_UNIX;
|
|
|
|
memcpy(&addr.sun_path, ZEBRA_PTM_SOCK_NAME,
|
|
|
|
sizeof(ZEBRA_PTM_SOCK_NAME));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:40:44 +02:00
|
|
|
ret = connect(sock, (struct sockaddr *)&addr,
|
2015-05-20 02:47:24 +02:00
|
|
|
sizeof(addr.sun_family) + sizeof(ZEBRA_PTM_SOCK_NAME)
|
|
|
|
- 1);
|
2015-05-20 02:40:44 +02:00
|
|
|
if (ret < 0) {
|
2016-04-22 00:39:38 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("%s: Unable to connect to socket %s [%s]",
|
2015-05-20 02:47:24 +02:00
|
|
|
__func__, ZEBRA_PTM_SOCK_NAME,
|
|
|
|
safe_strerror(errno));
|
2015-05-20 02:40:44 +02:00
|
|
|
close(sock);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_cb.ptm_sock = sock;
|
2015-05-20 02:40:44 +02:00
|
|
|
return sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void zebra_ptm_install_commands(void)
|
|
|
|
{
|
|
|
|
install_element(CONFIG_NODE, &zebra_ptm_enable_cmd);
|
|
|
|
install_element(CONFIG_NODE, &no_zebra_ptm_enable_cmd);
|
2016-04-22 00:39:38 +02:00
|
|
|
install_element(INTERFACE_NODE, &zebra_ptm_enable_if_cmd);
|
|
|
|
install_element(INTERFACE_NODE, &no_zebra_ptm_enable_if_cmd);
|
2015-05-20 02:40:44 +02:00
|
|
|
}
|
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
/* BFD session goes down, send message to the protocols. */
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
static void if_bfd_session_update(struct interface *ifp, struct prefix *dp,
|
2016-03-08 14:10:56 +01:00
|
|
|
struct prefix *sp, int status,
|
|
|
|
vrf_id_t vrf_id)
|
2015-05-20 02:47:24 +02:00
|
|
|
{
|
2015-06-12 16:59:11 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT) {
|
|
|
|
char buf[2][INET6_ADDRSTRLEN];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (ifp) {
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
zlog_debug(
|
2020-03-27 12:35:23 +01:00
|
|
|
"MESSAGE: ZEBRA_INTERFACE_BFD_DEST_UPDATE %s/%d on %s %s event",
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
inet_ntop(dp->family, &dp->u.prefix, buf[0],
|
|
|
|
INET6_ADDRSTRLEN),
|
|
|
|
dp->prefixlen, ifp->name,
|
|
|
|
bfd_get_status_str(status));
|
2015-06-12 16:59:11 +02:00
|
|
|
} else {
|
2020-02-14 14:41:04 +01:00
|
|
|
struct vrf *vrf = vrf_lookup_by_id(vrf_id);
|
|
|
|
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
zlog_debug(
|
2020-03-27 12:35:23 +01:00
|
|
|
"MESSAGE: ZEBRA_INTERFACE_BFD_DEST_UPDATE %s/%d with src %s/%d and vrf %s(%u) %s event",
|
2015-06-12 16:59:11 +02:00
|
|
|
inet_ntop(dp->family, &dp->u.prefix, buf[0],
|
|
|
|
INET6_ADDRSTRLEN),
|
|
|
|
dp->prefixlen,
|
|
|
|
inet_ntop(sp->family, &sp->u.prefix, buf[1],
|
|
|
|
INET6_ADDRSTRLEN),
|
2020-02-14 14:41:04 +01:00
|
|
|
sp->prefixlen, VRF_LOGNAME(vrf), vrf_id,
|
2016-03-08 14:10:56 +01:00
|
|
|
bfd_get_status_str(status));
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-03-08 14:10:56 +01:00
|
|
|
zebra_interface_bfd_update(ifp, dp, sp, status, vrf_id);
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
static int zebra_ptm_handle_bfd_msg(void *arg, void *in_ctxt,
|
|
|
|
struct interface *ifp)
|
2015-05-20 02:40:44 +02:00
|
|
|
{
|
2015-06-12 16:59:11 +02:00
|
|
|
char bfdst_str[32];
|
|
|
|
char dest_str[64];
|
|
|
|
char src_str[64];
|
2016-03-08 14:10:56 +01:00
|
|
|
char vrf_str[64];
|
2015-05-20 02:40:44 +02:00
|
|
|
struct prefix dest_prefix;
|
2015-06-12 16:59:11 +02:00
|
|
|
struct prefix src_prefix;
|
2017-01-06 21:54:25 +01:00
|
|
|
vrf_id_t vrf_id;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_BFDSTATUS_STR, bfdst_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (bfdst_str[0] == '\0') {
|
|
|
|
return -1;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_BFDDEST_STR, dest_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (dest_str[0] == '\0') {
|
|
|
|
zlog_debug("%s: Key %s not found in PTM msg", __func__,
|
2015-05-20 02:47:24 +02:00
|
|
|
ZEBRA_PTM_BFDDEST_STR);
|
2015-06-12 16:59:11 +02:00
|
|
|
return -1;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_BFDSRC_STR, src_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_str[0] == '\0') {
|
|
|
|
zlog_debug("%s: Key %s not found in PTM msg", __func__,
|
|
|
|
ZEBRA_PTM_BFDSRC_STR);
|
|
|
|
return -1;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-03-08 14:10:56 +01:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_BFDVRF_STR, vrf_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-03-08 14:10:56 +01:00
|
|
|
if (vrf_str[0] == '\0') {
|
|
|
|
zlog_debug("%s: Key %s not found in PTM msg", __func__,
|
|
|
|
ZEBRA_PTM_BFDVRF_STR);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
2017-01-06 21:54:25 +01:00
|
|
|
zlog_debug(
|
2020-03-27 12:35:23 +01:00
|
|
|
"%s: Recv Port [%s] bfd status [%s] vrf [%s] peer [%s] local [%s]",
|
2015-08-26 21:37:46 +02:00
|
|
|
__func__, ifp ? ifp->name : "N/A", bfdst_str, vrf_str,
|
2016-03-08 14:10:56 +01:00
|
|
|
dest_str, src_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
if (str2prefix(dest_str, &dest_prefix) == 0) {
|
2018-09-13 21:21:05 +02:00
|
|
|
flog_err(EC_ZEBRA_PREFIX_PARSE_ERROR,
|
2018-09-13 21:38:57 +02:00
|
|
|
"%s: Peer addr %s not found", __func__, dest_str);
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-05-11 12:16:44 +02:00
|
|
|
memset(&src_prefix, 0, sizeof(src_prefix));
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
if (strcmp(ZEBRA_PTM_INVALID_SRC_IP, src_str)) {
|
|
|
|
if (str2prefix(src_str, &src_prefix) == 0) {
|
2018-09-13 21:21:05 +02:00
|
|
|
flog_err(EC_ZEBRA_PREFIX_PARSE_ERROR,
|
2018-09-13 21:38:57 +02:00
|
|
|
"%s: Local addr %s not found", __func__,
|
|
|
|
src_str);
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
return -1;
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-01-06 21:54:25 +01:00
|
|
|
if (!strcmp(ZEBRA_PTM_INVALID_VRF, vrf_str) && ifp) {
|
2021-10-22 00:17:40 +02:00
|
|
|
vrf_id = ifp->vrf->vrf_id;
|
2017-01-06 21:54:25 +01:00
|
|
|
} else {
|
2021-08-10 19:28:36 +02:00
|
|
|
struct vrf *pVrf;
|
|
|
|
|
|
|
|
pVrf = vrf_lookup_by_name(vrf_str);
|
|
|
|
if (pVrf)
|
|
|
|
vrf_id = pVrf->vrf_id;
|
|
|
|
else
|
|
|
|
vrf_id = VRF_DEFAULT;
|
2017-01-06 21:54:25 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
if (!strcmp(bfdst_str, ZEBRA_PTM_BFDSTATUS_DOWN_STR)) {
|
2016-03-08 14:10:56 +01:00
|
|
|
if_bfd_session_update(ifp, &dest_prefix, &src_prefix,
|
|
|
|
BFD_STATUS_DOWN, vrf_id);
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
} else {
|
2016-03-08 14:10:56 +01:00
|
|
|
if_bfd_session_update(ifp, &dest_prefix, &src_prefix,
|
|
|
|
BFD_STATUS_UP, vrf_id);
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
return 0;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
static int zebra_ptm_handle_cbl_msg(void *arg, void *in_ctxt,
|
|
|
|
struct interface *ifp, char *cbl_str)
|
2015-05-20 02:47:24 +02:00
|
|
|
{
|
2015-10-09 20:18:09 +02:00
|
|
|
int send_linkup = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("%s: Recv Port [%s] cbl status [%s]", __func__,
|
|
|
|
ifp->name, cbl_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-10-09 20:18:09 +02:00
|
|
|
if (!strcmp(cbl_str, ZEBRA_PTM_PASS_STR)
|
|
|
|
&& (ifp->ptm_status != ZEBRA_PTM_STATUS_UP)) {
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-10-09 20:18:09 +02:00
|
|
|
if (ifp->ptm_status == ZEBRA_PTM_STATUS_DOWN)
|
|
|
|
send_linkup = 1;
|
|
|
|
ifp->ptm_status = ZEBRA_PTM_STATUS_UP;
|
|
|
|
if (ifp->ptm_enable && if_is_no_ptm_operative(ifp)
|
|
|
|
&& send_linkup)
|
2020-10-02 20:49:09 +02:00
|
|
|
if_up(ifp, true);
|
2015-10-09 20:18:09 +02:00
|
|
|
} else if (!strcmp(cbl_str, ZEBRA_PTM_FAIL_STR)
|
|
|
|
&& (ifp->ptm_status != ZEBRA_PTM_STATUS_DOWN)) {
|
|
|
|
ifp->ptm_status = ZEBRA_PTM_STATUS_DOWN;
|
2015-08-26 21:37:46 +02:00
|
|
|
if (ifp->ptm_enable && if_is_no_ptm_operative(ifp))
|
|
|
|
if_down(ifp);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2015-10-09 20:18:09 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
/*
|
|
|
|
* zebra_ptm_handle_msg_cb - The purpose of this callback function is to handle
|
|
|
|
* all the command responses and notifications received from PTM.
|
|
|
|
*
|
|
|
|
* Command responses: Upon establishing connection with PTM, Zebra requests
|
|
|
|
* status of all interfaces using 'get-status' command if global ptm-enable
|
|
|
|
* knob is enabled. As a response to the get-status command PTM sends status
|
|
|
|
* of all the interfaces as command responses. All other type of command
|
|
|
|
* responses with cmd_status key word are dropped. The sole purpose of
|
|
|
|
* registering this function as callback for the command responses is to
|
|
|
|
* handle the responses to get-status command.
|
|
|
|
*
|
|
|
|
* Notifications: Cable status and BFD session status changes are sent as
|
|
|
|
* notifications by PTM. So, this function is also the callback function for
|
|
|
|
* processing all the notifications from the PTM.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int zebra_ptm_handle_msg_cb(void *arg, void *in_ctxt)
|
|
|
|
{
|
|
|
|
struct interface *ifp = NULL;
|
2015-06-12 16:59:11 +02:00
|
|
|
char port_str[128];
|
2015-08-26 21:37:46 +02:00
|
|
|
char cbl_str[32];
|
|
|
|
char cmd_status_str[32];
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_CMD_STATUS_STR,
|
|
|
|
cmd_status_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
/* Drop command response messages */
|
2015-08-26 21:37:46 +02:00
|
|
|
if (cmd_status_str[0] != '\0') {
|
2015-07-22 22:07:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_PORT_STR, port_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (port_str[0] == '\0') {
|
2015-08-26 21:37:46 +02:00
|
|
|
zlog_debug("%s: Key %s not found in PTM msg", __func__,
|
2015-05-20 02:47:24 +02:00
|
|
|
ZEBRA_PTM_PORT_STR);
|
2015-08-26 21:37:46 +02:00
|
|
|
return -1;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
if (strcmp(ZEBRA_PTM_INVALID_PORT_NAME, port_str)) {
|
2021-10-14 20:06:38 +02:00
|
|
|
struct vrf *vrf;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
|
2021-11-02 17:20:24 +01:00
|
|
|
ifp = if_lookup_by_name_vrf(port_str, vrf);
|
2021-10-14 20:06:38 +02:00
|
|
|
if (ifp) {
|
|
|
|
count++;
|
|
|
|
if (!vrf_is_backend_netns())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
if (!ifp) {
|
2018-09-13 21:21:05 +02:00
|
|
|
flog_warn(EC_ZEBRA_UNKNOWN_INTERFACE,
|
2018-08-16 22:10:32 +02:00
|
|
|
"%s: %s not found in interface list",
|
2018-06-19 20:29:05 +02:00
|
|
|
__func__, port_str);
|
2015-08-26 21:37:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2021-10-14 20:06:38 +02:00
|
|
|
if (count > 1) {
|
|
|
|
flog_warn(EC_ZEBRA_UNKNOWN_INTERFACE,
|
|
|
|
"%s: multiple interface with name %s",
|
|
|
|
__func__, port_str);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
ptm_lib_find_key_in_msg(in_ctxt, ZEBRA_PTM_CBL_STR, cbl_str);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-26 21:37:46 +02:00
|
|
|
if (cbl_str[0] == '\0') {
|
|
|
|
return zebra_ptm_handle_bfd_msg(arg, in_ctxt, ifp);
|
|
|
|
} else {
|
|
|
|
if (ifp) {
|
|
|
|
return zebra_ptm_handle_cbl_msg(arg, in_ctxt, ifp,
|
|
|
|
cbl_str);
|
|
|
|
} else {
|
|
|
|
return -1;
|
|
|
|
}
|
2015-05-20 02:40:44 +02:00
|
|
|
}
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
void zebra_ptm_sock_read(struct event *thread)
|
2015-05-20 02:47:24 +02:00
|
|
|
{
|
2018-07-02 18:50:20 +02:00
|
|
|
int sock;
|
2015-06-12 16:59:11 +02:00
|
|
|
int rc;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
errno = 0;
|
2022-12-25 16:26:52 +01:00
|
|
|
sock = EVENT_FD(thread);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:47:24 +02:00
|
|
|
if (sock == -1)
|
2022-02-23 01:04:25 +01:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-05-20 02:47:24 +02:00
|
|
|
/* PTM communicates in CSV format */
|
2018-07-02 18:50:20 +02:00
|
|
|
do {
|
2015-07-22 22:07:08 +02:00
|
|
|
rc = ptm_lib_process_msg(ptm_hdl, sock, ptm_cb.in_data,
|
|
|
|
ZEBRA_PTM_MAX_SOCKBUF, NULL);
|
2018-07-02 18:50:20 +02:00
|
|
|
} while (rc > 0);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-07-02 18:50:20 +02:00
|
|
|
if (((rc == 0) && !errno)
|
|
|
|
|| (errno && (errno != EWOULDBLOCK) && (errno != EAGAIN))) {
|
2018-09-13 21:34:28 +02:00
|
|
|
flog_err_sys(EC_LIB_SOCKET,
|
2018-08-16 22:10:32 +02:00
|
|
|
"%s routing socket error: %s(%d) bytes %d",
|
|
|
|
__func__, safe_strerror(errno), errno, rc);
|
2018-07-02 18:50:20 +02:00
|
|
|
|
|
|
|
close(ptm_cb.ptm_sock);
|
|
|
|
ptm_cb.ptm_sock = -1;
|
|
|
|
zebra_ptm_reset_status(0);
|
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2022-02-23 01:04:25 +01:00
|
|
|
return;
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_read = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_read(zrouter.master, zebra_ptm_sock_read, NULL,
|
|
|
|
ptm_cb.ptm_sock, &ptm_cb.t_read);
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BFD peer/dst register/update */
|
2018-03-06 23:57:33 +01:00
|
|
|
void zebra_ptm_bfd_dst_register(ZAPI_HANDLER_ARGS)
|
2015-06-12 16:59:11 +02:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
struct prefix src_p;
|
|
|
|
struct prefix dst_p;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t multi_hop;
|
|
|
|
uint8_t multi_hop_cnt;
|
|
|
|
uint8_t detect_mul;
|
2015-06-12 16:59:11 +02:00
|
|
|
unsigned int min_rx_timer;
|
|
|
|
unsigned int min_tx_timer;
|
|
|
|
char if_name[INTERFACE_NAMSIZ];
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t len;
|
2015-06-12 16:59:11 +02:00
|
|
|
void *out_ctxt;
|
|
|
|
char buf[INET6_ADDRSTRLEN];
|
|
|
|
char tmp_buf[64];
|
|
|
|
int data_len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
unsigned int pid;
|
bfdd, lib, bgpd: add bfd cbit usage
bfd cbit is a value carried out in bfd messages, that permit to keep or
not, the independence between control plane and dataplane. In other
words, while most of the cases plan to flush entries, when bfd goes
down, there are some cases where that bfd event should be ignored. this
is the case with non stop forwarding mechanisms where entries may be
kept. this is the case for BGP, when graceful restart capability is
used. If BFD event down happens, and bgp is in graceful restart mode, it
is wished to ignore the BFD event while waiting for the remote router to
restart.
The changes take into account the following:
- add a config flag across zebra layer so that daemon can set or not the
cbit capability.
- ability for daemons to read the remote bfd capability associated to a bfd
notification.
- in bfdd, according to the value, the cbit value is set
- in bfdd, the received value is retrived and stored in the bfd session
context.
- by default, the local cbit announced to remote is set to 1 while
preservation of the local path is not set.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2019-04-15 17:20:25 +02:00
|
|
|
uint8_t cbit_set;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-06 23:57:33 +01:00
|
|
|
if (hdr->command == ZEBRA_BFD_DEST_UPDATE)
|
2015-06-12 16:59:11 +02:00
|
|
|
client->bfd_peer_upd8_cnt++;
|
|
|
|
else
|
|
|
|
client->bfd_peer_add_cnt++;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_dst_register msg from client %s: length=%d",
|
2018-03-06 23:57:33 +01:00
|
|
|
zebra_route_string(client->proto), hdr->length);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_sock == -1) {
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL, &out_ctxt);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s", ZEBRA_PTM_BFD_START_CMD);
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_CMD_STR, tmp_buf);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s",
|
|
|
|
zebra_route_string(client->proto));
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_CLIENT_FIELD,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-07 00:08:37 +01:00
|
|
|
s = msg;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETL(s, pid);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", pid);
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_SEQID_FIELD,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, dst_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (dst_p.family == AF_INET)
|
|
|
|
dst_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
dst_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&dst_p.u.prefix, s, dst_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (dst_p.family == AF_INET) {
|
|
|
|
inet_ntop(AF_INET, &dst_p.u.prefix4, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_DST_IP_FIELD, buf);
|
|
|
|
} else {
|
|
|
|
inet_ntop(AF_INET6, &dst_p.u.prefix6, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_DST_IP_FIELD, buf);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETL(s, min_rx_timer);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", min_rx_timer);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_MIN_RX_FIELD,
|
|
|
|
tmp_buf);
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETL(s, min_tx_timer);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", min_tx_timer);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_MIN_TX_FIELD,
|
|
|
|
tmp_buf);
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, detect_mul);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", detect_mul);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_DETECT_MULT_FIELD,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, multi_hop);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (multi_hop) {
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", 1);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_MULTI_HOP_FIELD, tmp_buf);
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, src_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET)
|
|
|
|
src_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
src_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&src_p.u.prefix, s, src_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET) {
|
|
|
|
inet_ntop(AF_INET, &src_p.u.prefix4, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD, buf);
|
|
|
|
} else {
|
|
|
|
inet_ntop(AF_INET6, &src_p.u.prefix6, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD, buf);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, multi_hop_cnt);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", multi_hop_cnt);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_MAX_HOP_CNT_FIELD, tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-30 22:50:26 +01:00
|
|
|
if (zvrf_id(zvrf) != VRF_DEFAULT)
|
2016-04-26 02:01:04 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_VRF_NAME_FIELD,
|
2016-10-30 22:50:26 +01:00
|
|
|
zvrf_name(zvrf));
|
2015-06-12 16:59:11 +02:00
|
|
|
} else {
|
|
|
|
if (dst_p.family == AF_INET6) {
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, src_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET)
|
|
|
|
src_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
src_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&src_p.u.prefix, s, src_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET) {
|
|
|
|
inet_ntop(AF_INET, &src_p.u.prefix4, buf,
|
|
|
|
sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD,
|
|
|
|
buf);
|
|
|
|
} else {
|
|
|
|
inet_ntop(AF_INET6, &src_p.u.prefix6, buf,
|
|
|
|
sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD,
|
|
|
|
buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, len);
|
|
|
|
STREAM_GET(if_name, s, len);
|
2015-06-12 16:59:11 +02:00
|
|
|
if_name[len] = '\0';
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_IFNAME_FIELD, if_name);
|
|
|
|
}
|
bfdd, lib, bgpd: add bfd cbit usage
bfd cbit is a value carried out in bfd messages, that permit to keep or
not, the independence between control plane and dataplane. In other
words, while most of the cases plan to flush entries, when bfd goes
down, there are some cases where that bfd event should be ignored. this
is the case with non stop forwarding mechanisms where entries may be
kept. this is the case for BGP, when graceful restart capability is
used. If BFD event down happens, and bgp is in graceful restart mode, it
is wished to ignore the BFD event while waiting for the remote router to
restart.
The changes take into account the following:
- add a config flag across zebra layer so that daemon can set or not the
cbit capability.
- ability for daemons to read the remote bfd capability associated to a bfd
notification.
- in bfdd, according to the value, the cbit value is set
- in bfdd, the received value is retrived and stored in the bfd session
context.
- by default, the local cbit announced to remote is set to 1 while
preservation of the local path is not set.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2019-04-15 17:20:25 +02:00
|
|
|
STREAM_GETC(s, cbit_set);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", cbit_set);
|
bfdd, lib, bgpd: add bfd cbit usage
bfd cbit is a value carried out in bfd messages, that permit to keep or
not, the independence between control plane and dataplane. In other
words, while most of the cases plan to flush entries, when bfd goes
down, there are some cases where that bfd event should be ignored. this
is the case with non stop forwarding mechanisms where entries may be
kept. this is the case for BGP, when graceful restart capability is
used. If BFD event down happens, and bgp is in graceful restart mode, it
is wished to ignore the BFD event while waiting for the remote router to
restart.
The changes take into account the following:
- add a config flag across zebra layer so that daemon can set or not the
cbit capability.
- ability for daemons to read the remote bfd capability associated to a bfd
notification.
- in bfdd, according to the value, the cbit value is set
- in bfdd, the received value is retrived and stored in the bfd session
context.
- by default, the local cbit announced to remote is set to 1 while
preservation of the local path is not set.
Signed-off-by: Philippe Guibert <philippe.guibert@6wind.com>
2019-04-15 17:20:25 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_CBIT_FIELD, tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", 1);
|
Support of BFD status in Quagga
Ticket:CM-6802, CM-6952
Reviewed By: Donald, Kanna
Testing Done:
Double commit of b76943235e09472ec174edcf7204fc82d27fe966 from br2.5. But, manually resolved all the compilation errors. Also, modified the shows to support the json format which was not supported in br2.5.
CM-6802 – Currently, BFD session status can be monitored only through ptmctl. There is no way to check the BFD status of a peer/neighbor through Quagga. Debugging becomes easier if BFD status is shown in Quagga too. BFD status is relevant when it is shown against the BGP peer/OSPF neighbor. For, this following code changes have been done:
- Only down messages from PTM were being propagated from Zebra daemon to clients (bgpd, ospfd and ospf6d). Now, both up and down messages are redistributed to the clients from zebra. BFD status field has been added to the messaging. Handling of BFD session up messages has been added to the client code. BGP/OSPF neighbor is brought down only if the old BFD session status is ‘Up’ to handle extra/initial down messages.
- BFD status and last update timestamp fields have been added to the common BFD info structure. Also, common show functions for showing BFD information have been added to BFD lib.
- Modified the BGP neighbor show functions to call common BFD lib functions.
- For ospf and ospf6, BFD information was maintained only at interface level. To show BFD status per neighbor, BFD information has been added at neighbor level too. “show ip ospf interface”, “show ip ospf neighbor detail”, “show ipv6 ospf6 interface” and “show ipv6 ospf6 neighbor detail” output have been modified to show BFD information.
CM-6952 - IBGP peers were always assumed to be multi-hop since there was no easy way to determine whether an IBGP peer was single hop or multihop unlike EBGP. But, this is causing problem with IBGP link local peers since BFD doesn't allow multihop BFD session with link local IP addresses. Link local peers were discovered when the interface peering was enabled. Interface peering is always singlehop. So, added checks to treat all interface based peers as single hop irrespective of whether the peer is IBGP or EBGP.
2015-08-31 23:56:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_SEND_EVENT,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_lib_complete_msg(ptm_hdl, out_ctxt, ptm_cb.out_data, &data_len);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_SEND)
|
|
|
|
zlog_debug("%s: Sent message (%d) %s", __func__, data_len,
|
|
|
|
ptm_cb.out_data);
|
|
|
|
zebra_ptm_send_message(ptm_cb.out_data, data_len);
|
2017-11-10 14:51:34 +01:00
|
|
|
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
2018-01-25 02:53:40 +01:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
stream_failure:
|
2017-12-05 01:03:51 +01:00
|
|
|
ptm_lib_cleanup_msg(ptm_hdl, out_ctxt);
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BFD peer/dst deregister */
|
2018-03-06 23:57:33 +01:00
|
|
|
void zebra_ptm_bfd_dst_deregister(ZAPI_HANDLER_ARGS)
|
2015-06-12 16:59:11 +02:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
struct prefix src_p;
|
|
|
|
struct prefix dst_p;
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t multi_hop;
|
2015-06-12 16:59:11 +02:00
|
|
|
char if_name[INTERFACE_NAMSIZ];
|
2018-03-27 21:13:34 +02:00
|
|
|
uint8_t len;
|
2015-06-12 16:59:11 +02:00
|
|
|
char buf[INET6_ADDRSTRLEN];
|
|
|
|
char tmp_buf[64];
|
|
|
|
int data_len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
|
|
|
|
void *out_ctxt;
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
unsigned int pid;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
client->bfd_peer_del_cnt++;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_dst_deregister msg from client %s: length=%d",
|
2018-03-06 23:57:33 +01:00
|
|
|
zebra_route_string(client->proto), hdr->length);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
if (ptm_cb.ptm_sock == -1) {
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
2015-06-12 16:59:11 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL, &out_ctxt);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s", ZEBRA_PTM_BFD_STOP_CMD);
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_CMD_STR, tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s",
|
|
|
|
zebra_route_string(client->proto));
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_CLIENT_FIELD,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-03-07 00:08:37 +01:00
|
|
|
s = msg;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETL(s, pid);
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", pid);
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_SEQID_FIELD,
|
|
|
|
tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, dst_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (dst_p.family == AF_INET)
|
|
|
|
dst_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
dst_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&dst_p.u.prefix, s, dst_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (dst_p.family == AF_INET)
|
2017-01-13 13:57:57 +01:00
|
|
|
inet_ntop(AF_INET, &dst_p.u.prefix4, buf, sizeof(buf));
|
2015-06-12 16:59:11 +02:00
|
|
|
else
|
2017-01-13 13:57:57 +01:00
|
|
|
inet_ntop(AF_INET6, &dst_p.u.prefix6, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_DST_IP_FIELD, buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, multi_hop);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (multi_hop) {
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", 1);
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_MULTI_HOP_FIELD, tmp_buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, src_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET)
|
|
|
|
src_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
src_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&src_p.u.prefix, s, src_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET)
|
2017-01-16 14:57:20 +01:00
|
|
|
inet_ntop(AF_INET, &src_p.u.prefix4, buf, sizeof(buf));
|
2015-06-12 16:59:11 +02:00
|
|
|
else
|
2017-01-16 14:57:20 +01:00
|
|
|
inet_ntop(AF_INET6, &src_p.u.prefix6, buf, sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD, buf);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-10-30 22:50:26 +01:00
|
|
|
if (zvrf_id(zvrf) != VRF_DEFAULT)
|
2016-04-26 02:01:04 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_VRF_NAME_FIELD,
|
2016-10-30 22:50:26 +01:00
|
|
|
zvrf_name(zvrf));
|
2015-06-12 16:59:11 +02:00
|
|
|
} else {
|
|
|
|
if (dst_p.family == AF_INET6) {
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETW(s, src_p.family);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET)
|
|
|
|
src_p.prefixlen = IPV4_MAX_BYTELEN;
|
|
|
|
else
|
|
|
|
src_p.prefixlen = IPV6_MAX_BYTELEN;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GET(&src_p.u.prefix, s, src_p.prefixlen);
|
2015-06-12 16:59:11 +02:00
|
|
|
if (src_p.family == AF_INET) {
|
|
|
|
inet_ntop(AF_INET, &src_p.u.prefix4, buf,
|
|
|
|
sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD,
|
|
|
|
buf);
|
|
|
|
} else {
|
|
|
|
inet_ntop(AF_INET6, &src_p.u.prefix6, buf,
|
|
|
|
sizeof(buf));
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_SRC_IP_FIELD,
|
|
|
|
buf);
|
|
|
|
}
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETC(s, len);
|
|
|
|
STREAM_GET(if_name, s, len);
|
2015-06-12 16:59:11 +02:00
|
|
|
if_name[len] = '\0';
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-06-12 16:59:11 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt,
|
|
|
|
ZEBRA_PTM_BFD_IFNAME_FIELD, if_name);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
ptm_lib_complete_msg(ptm_hdl, out_ctxt, ptm_cb.out_data, &data_len);
|
|
|
|
if (IS_ZEBRA_DEBUG_SEND)
|
|
|
|
zlog_debug("%s: Sent message (%d) %s", __func__, data_len,
|
|
|
|
ptm_cb.out_data);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
zebra_ptm_send_message(ptm_cb.out_data, data_len);
|
2017-11-10 14:51:34 +01:00
|
|
|
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
2018-01-25 02:53:40 +01:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
stream_failure:
|
2017-12-05 01:03:51 +01:00
|
|
|
ptm_lib_cleanup_msg(ptm_hdl, out_ctxt);
|
2015-05-20 02:47:24 +02:00
|
|
|
}
|
2015-07-22 22:07:08 +02:00
|
|
|
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
/* BFD client register */
|
2018-03-06 23:57:33 +01:00
|
|
|
void zebra_ptm_bfd_client_register(ZAPI_HANDLER_ARGS)
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
{
|
|
|
|
struct stream *s;
|
|
|
|
unsigned int pid;
|
2018-01-25 02:53:40 +01:00
|
|
|
void *out_ctxt = NULL;
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
char tmp_buf[64];
|
|
|
|
int data_len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
|
|
|
|
|
|
|
|
client->bfd_client_reg_cnt++;
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_client_register msg from client %s: length=%d",
|
2018-03-06 23:57:33 +01:00
|
|
|
zebra_route_string(client->proto), hdr->length);
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
|
2018-03-07 00:08:37 +01:00
|
|
|
s = msg;
|
2017-11-10 14:51:34 +01:00
|
|
|
STREAM_GETL(s, pid);
|
2017-10-11 14:09:28 +02:00
|
|
|
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
if (ptm_cb.ptm_sock == -1) {
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL, &out_ctxt);
|
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s", ZEBRA_PTM_BFD_CLIENT_REG_CMD);
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_CMD_STR, tmp_buf);
|
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s",
|
|
|
|
zebra_route_string(client->proto));
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_CLIENT_FIELD,
|
|
|
|
tmp_buf);
|
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%d", pid);
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_SEQID_FIELD,
|
|
|
|
tmp_buf);
|
|
|
|
|
|
|
|
ptm_lib_complete_msg(ptm_hdl, out_ctxt, ptm_cb.out_data, &data_len);
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_SEND)
|
|
|
|
zlog_debug("%s: Sent message (%d) %s", __func__, data_len,
|
|
|
|
ptm_cb.out_data);
|
|
|
|
zebra_ptm_send_message(ptm_cb.out_data, data_len);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
|
|
|
SET_FLAG(ptm_cb.client_flags[client->proto],
|
|
|
|
ZEBRA_PTM_BFD_CLIENT_FLAG_REG);
|
2018-01-25 02:53:40 +01:00
|
|
|
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
2018-01-25 02:53:40 +01:00
|
|
|
|
2017-11-10 14:51:34 +01:00
|
|
|
stream_failure:
|
2018-02-14 05:39:09 +01:00
|
|
|
/*
|
|
|
|
* IF we ever add more STREAM_GETXXX functions after the out_ctxt
|
|
|
|
* is allocated then we need to add this code back in
|
|
|
|
*
|
|
|
|
* if (out_ctxt)
|
|
|
|
* ptm_lib_cleanup_msg(ptm_hdl, out_ctxt);
|
|
|
|
*/
|
2018-03-06 23:01:42 +01:00
|
|
|
return;
|
Support for multi-client and client reg msg
Ticket: CM-7615, CM-7773
Reviewed By: CCR-3610, CCR-3708
Testing Done: Unit, BGP Smoke and OSPF Smoke
Changes (70790261926b17200c8c9377c4576cd3b486fcef) ported from 2.5
Issue (related to CM-7615): 1. CM-7615: There is mismatch in the client name between ptm display of client BFD sessions and the zebra logs. For example, if bgpd added BFD session, zebra logs will show the client as “bgp” but the ptm display will show it as “quagga”
2. Bigger problem is when 2 clients (for example OSPF and BGP) from Quagga register for same BFD session and only one client de-registers the BFD session. This results in BFD session deletion from PTM even though other client still has the BFD registration.
Root Cause: Even though BGP, OSPF and OSPF6 are 3 different clients from Quagga that are trying to register/deregister BFD sessions with PTM, all 3 are represented as one client “quagga” from zebra. This makes it hard for PTM/BFD to distinguish between all three when BFD peer registration/deregistration happens from the clients.
Fix: Send the actual client name bgp, ospf or ospf6 from zebra with BFD reg/dereg messages instead of one unified client name “quagga”
CM-7773: BFD sessions are not getting cleaned from PTM even though no BGP peering exists in Quagga.
Root Cause: PTM cleans up stale BFD sessions from a client when it finds a change in seq id advertised by the client. But, if PTM never detects a change in the seq id then the stale BFD sessions never get cleaned up. The test restarts the quagga without saving the configuration, which results in no BGP peering. No BGP peers are registered with PTM after restart and PTM does not detect a client seq id change resulting in stale BFD sessions.
Fix: New client registration message was added in PTM. Every client that is interested in BFD monitoring will register with PTM with the client seq id. Client will register with a different seq id (typically pid) every time it restarts. This will help in detecting the change in seq id and cleanup of stale BFD sessions for a client.
Code Changes: To support the new client registration message following changes have been made
- Added support for client registration messaging in zebra for sending messages to PTM.
- Added support for client registration messaging between zebra and clients (BGP, OSPF and OSPF6) in BFD library.
- Expanded the reg/de reg peer messaging between zebra and clients to support client specific seq id to distinguish between multiple clients registering for BFD peer rather than one “quagga” client.
- Changes in bgpd, ospfd and ospf6d to send client registrations at the time of daemon initialization and on receiving BFD peer replay message.
2016-03-09 08:31:32 +01:00
|
|
|
}
|
|
|
|
|
2016-05-09 05:11:18 +02:00
|
|
|
/* BFD client deregister */
|
2018-04-22 23:03:52 +02:00
|
|
|
int zebra_ptm_bfd_client_deregister(struct zserv *client)
|
2016-05-09 05:11:18 +02:00
|
|
|
{
|
2018-04-22 23:03:52 +02:00
|
|
|
uint8_t proto = client->proto;
|
2016-05-09 05:11:18 +02:00
|
|
|
void *out_ctxt;
|
|
|
|
char tmp_buf[64];
|
|
|
|
int data_len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
|
|
|
|
|
2018-09-20 15:09:43 +02:00
|
|
|
if (!IS_BFD_ENABLED_PROTOCOL(proto))
|
2018-04-22 23:03:52 +02:00
|
|
|
return 0;
|
2016-05-09 05:11:18 +02:00
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
2018-08-16 22:10:32 +02:00
|
|
|
zlog_debug("bfd_client_deregister msg for client %s",
|
|
|
|
zebra_route_string(proto));
|
2016-05-09 05:11:18 +02:00
|
|
|
|
|
|
|
if (ptm_cb.ptm_sock == -1) {
|
2017-05-05 23:22:25 +02:00
|
|
|
ptm_cb.t_timer = NULL;
|
2022-05-20 20:19:08 +02:00
|
|
|
event_add_timer(zrouter.master, zebra_ptm_connect, NULL,
|
|
|
|
ptm_cb.reconnect_time, &ptm_cb.t_timer);
|
2018-04-22 23:03:52 +02:00
|
|
|
return 0;
|
2016-05-09 05:11:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL, &out_ctxt);
|
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s",
|
|
|
|
ZEBRA_PTM_BFD_CLIENT_DEREG_CMD);
|
2016-05-09 05:11:18 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_CMD_STR, tmp_buf);
|
|
|
|
|
2020-04-20 20:12:38 +02:00
|
|
|
snprintf(tmp_buf, sizeof(tmp_buf), "%s", zebra_route_string(proto));
|
2016-05-09 05:11:18 +02:00
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_BFD_CLIENT_FIELD,
|
|
|
|
tmp_buf);
|
|
|
|
|
|
|
|
ptm_lib_complete_msg(ptm_hdl, out_ctxt, ptm_cb.out_data, &data_len);
|
|
|
|
|
|
|
|
if (IS_ZEBRA_DEBUG_SEND)
|
|
|
|
zlog_debug("%s: Sent message (%d) %s", __func__, data_len,
|
|
|
|
ptm_cb.out_data);
|
2016-06-21 12:39:58 +02:00
|
|
|
|
2016-05-09 05:11:18 +02:00
|
|
|
zebra_ptm_send_message(ptm_cb.out_data, data_len);
|
2016-06-21 12:39:58 +02:00
|
|
|
UNSET_FLAG(ptm_cb.client_flags[proto], ZEBRA_PTM_BFD_CLIENT_FLAG_REG);
|
2018-04-22 23:03:52 +02:00
|
|
|
|
|
|
|
return 0;
|
2016-05-09 05:11:18 +02:00
|
|
|
}
|
|
|
|
|
2015-07-22 22:07:08 +02:00
|
|
|
int zebra_ptm_get_enable_state(void)
|
|
|
|
{
|
|
|
|
return ptm_cb.ptm_enable;
|
|
|
|
}
|
2015-10-09 20:18:09 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* zebra_ptm_get_status_str - Convert status to a display string.
|
|
|
|
*/
|
|
|
|
static const char *zebra_ptm_get_status_str(int status)
|
|
|
|
{
|
|
|
|
switch (status) {
|
|
|
|
case ZEBRA_PTM_STATUS_DOWN:
|
|
|
|
return "fail";
|
|
|
|
case ZEBRA_PTM_STATUS_UP:
|
|
|
|
return "pass";
|
|
|
|
case ZEBRA_PTM_STATUS_UNKNOWN:
|
|
|
|
default:
|
|
|
|
return "n/a";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-02 20:38:26 +02:00
|
|
|
void zebra_ptm_show_status(struct vty *vty, json_object *json,
|
|
|
|
struct interface *ifp)
|
2015-10-09 20:18:09 +02:00
|
|
|
{
|
2021-08-02 20:38:26 +02:00
|
|
|
const char *status;
|
|
|
|
|
|
|
|
if (ifp->ptm_enable)
|
|
|
|
status = zebra_ptm_get_status_str(ifp->ptm_status);
|
|
|
|
else
|
|
|
|
status = "disabled";
|
|
|
|
|
|
|
|
if (json)
|
|
|
|
json_object_string_add(json, "ptmStatus", status);
|
|
|
|
else
|
|
|
|
vty_out(vty, " PTM status: %s\n", status);
|
2015-10-09 20:18:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_send_status_req(void)
|
|
|
|
{
|
|
|
|
void *out_ctxt;
|
|
|
|
int len = ZEBRA_PTM_SEND_MAX_SOCKBUF;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-10-09 20:18:09 +02:00
|
|
|
if (ptm_cb.ptm_enable) {
|
|
|
|
ptm_lib_init_msg(ptm_hdl, 0, PTMLIB_MSG_TYPE_CMD, NULL,
|
|
|
|
&out_ctxt);
|
|
|
|
ptm_lib_append_msg(ptm_hdl, out_ctxt, ZEBRA_PTM_CMD_STR,
|
|
|
|
ZEBRA_PTM_GET_STATUS_CMD);
|
|
|
|
ptm_lib_complete_msg(ptm_hdl, out_ctxt, ptm_cb.out_data, &len);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-10-09 20:18:09 +02:00
|
|
|
zebra_ptm_send_message(ptm_cb.out_data, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_reset_status(int ptm_disable)
|
|
|
|
{
|
2016-10-29 18:37:11 +02:00
|
|
|
struct vrf *vrf;
|
2015-10-09 20:18:09 +02:00
|
|
|
struct interface *ifp;
|
|
|
|
int send_linkup;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-09-15 17:47:35 +02:00
|
|
|
RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id)
|
2017-10-06 20:25:58 +02:00
|
|
|
FOR_ALL_INTERFACES (vrf, ifp) {
|
2017-09-15 17:47:35 +02:00
|
|
|
send_linkup = 0;
|
|
|
|
if (ifp->ptm_enable) {
|
|
|
|
if (!if_is_operative(ifp))
|
|
|
|
send_linkup = 1;
|
|
|
|
|
|
|
|
if (ptm_disable)
|
|
|
|
ifp->ptm_enable =
|
|
|
|
ZEBRA_IF_PTM_ENABLE_OFF;
|
|
|
|
ifp->ptm_status = ZEBRA_PTM_STATUS_UNKNOWN;
|
|
|
|
|
|
|
|
if (if_is_operative(ifp) && send_linkup) {
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug(
|
|
|
|
"%s: Bringing up interface %s",
|
|
|
|
__func__, ifp->name);
|
2020-10-02 20:49:09 +02:00
|
|
|
if_up(ifp, true);
|
2017-09-15 17:47:35 +02:00
|
|
|
}
|
2016-04-08 15:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
2015-10-09 20:18:09 +02:00
|
|
|
}
|
2016-04-22 00:39:38 +02:00
|
|
|
|
|
|
|
void zebra_ptm_if_init(struct zebra_if *zebra_ifp)
|
|
|
|
{
|
|
|
|
zebra_ifp->ptm_enable = ZEBRA_IF_PTM_ENABLE_UNSPEC;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_if_set_ptm_state(struct interface *ifp,
|
|
|
|
struct zebra_if *zebra_ifp)
|
|
|
|
{
|
|
|
|
if (zebra_ifp && zebra_ifp->ptm_enable != ZEBRA_IF_PTM_ENABLE_UNSPEC)
|
|
|
|
ifp->ptm_enable = zebra_ifp->ptm_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_if_write(struct vty *vty, struct zebra_if *zebra_ifp)
|
|
|
|
{
|
|
|
|
if (zebra_ifp->ptm_enable == ZEBRA_IF_PTM_ENABLE_OFF)
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, " no ptm-enable\n");
|
2016-04-22 00:39:38 +02:00
|
|
|
}
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
#else /* HAVE_BFDD */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data structures.
|
|
|
|
*/
|
|
|
|
struct ptm_process {
|
|
|
|
struct zserv *pp_zs;
|
|
|
|
pid_t pp_pid;
|
|
|
|
|
|
|
|
TAILQ_ENTRY(ptm_process) pp_entry;
|
|
|
|
};
|
|
|
|
TAILQ_HEAD(ppqueue, ptm_process) ppqueue;
|
|
|
|
|
|
|
|
DEFINE_MTYPE_STATIC(ZEBRA, ZEBRA_PTM_BFD_PROCESS,
|
2023-05-18 17:02:40 +02:00
|
|
|
"PTM BFD process reg table");
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prototypes.
|
|
|
|
*/
|
|
|
|
static struct ptm_process *pp_new(pid_t pid, struct zserv *zs);
|
|
|
|
static struct ptm_process *pp_lookup_byzs(struct zserv *zs);
|
|
|
|
static void pp_free(struct ptm_process *pp);
|
2018-07-25 18:39:58 +02:00
|
|
|
static void pp_free_all(void);
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
static void zebra_ptm_send_bfdd(struct stream *msg);
|
|
|
|
static void zebra_ptm_send_clients(struct stream *msg);
|
|
|
|
static int _zebra_ptm_bfd_client_deregister(struct zserv *zs);
|
2019-03-26 10:54:09 +01:00
|
|
|
static void _zebra_ptm_reroute(struct zserv *zs, struct zebra_vrf *zvrf,
|
|
|
|
struct stream *msg, uint32_t command);
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process PID registration.
|
|
|
|
*/
|
|
|
|
static struct ptm_process *pp_new(pid_t pid, struct zserv *zs)
|
|
|
|
{
|
|
|
|
struct ptm_process *pp;
|
|
|
|
|
|
|
|
#ifdef PTM_DEBUG
|
|
|
|
/* Sanity check: more than one client can't have the same PID. */
|
|
|
|
TAILQ_FOREACH(pp, &ppqueue, pp_entry) {
|
|
|
|
if (pp->pp_pid == pid && pp->pp_zs != zs)
|
|
|
|
zlog_err("%s:%d pid and client pointer doesn't match",
|
|
|
|
__FILE__, __LINE__);
|
|
|
|
}
|
|
|
|
#endif /* PTM_DEBUG */
|
|
|
|
|
|
|
|
/* Lookup for duplicates. */
|
|
|
|
pp = pp_lookup_byzs(zs);
|
|
|
|
if (pp != NULL)
|
|
|
|
return pp;
|
|
|
|
|
|
|
|
/* Allocate and register new process. */
|
|
|
|
pp = XCALLOC(MTYPE_ZEBRA_PTM_BFD_PROCESS, sizeof(*pp));
|
|
|
|
|
|
|
|
pp->pp_pid = pid;
|
|
|
|
pp->pp_zs = zs;
|
|
|
|
TAILQ_INSERT_HEAD(&ppqueue, pp, pp_entry);
|
|
|
|
|
|
|
|
return pp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ptm_process *pp_lookup_byzs(struct zserv *zs)
|
|
|
|
{
|
|
|
|
struct ptm_process *pp;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(pp, &ppqueue, pp_entry) {
|
|
|
|
if (pp->pp_zs != zs)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pp_free(struct ptm_process *pp)
|
|
|
|
{
|
|
|
|
if (pp == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&ppqueue, pp, pp_entry);
|
|
|
|
XFREE(MTYPE_ZEBRA_PTM_BFD_PROCESS, pp);
|
|
|
|
}
|
|
|
|
|
2018-07-25 18:39:58 +02:00
|
|
|
static void pp_free_all(void)
|
|
|
|
{
|
|
|
|
struct ptm_process *pp;
|
|
|
|
|
|
|
|
while (!TAILQ_EMPTY(&ppqueue)) {
|
|
|
|
pp = TAILQ_FIRST(&ppqueue);
|
|
|
|
pp_free(pp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the FRR's internal daemon implementation.
|
|
|
|
*/
|
|
|
|
static void zebra_ptm_send_bfdd(struct stream *msg)
|
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct zserv *client;
|
|
|
|
struct stream *msgc;
|
|
|
|
|
|
|
|
/* Create copy for replication. */
|
|
|
|
msgc = stream_dup(msg);
|
|
|
|
|
|
|
|
/* Send message to all running BFDd daemons. */
|
2019-01-11 19:38:19 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) {
|
2018-06-27 18:40:50 +02:00
|
|
|
if (client->proto != ZEBRA_ROUTE_BFD)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
zserv_send_message(client, msg);
|
|
|
|
|
|
|
|
/* Allocate more messages. */
|
|
|
|
msg = stream_dup(msgc);
|
|
|
|
}
|
|
|
|
|
|
|
|
stream_free(msgc);
|
2020-01-14 19:55:55 +01:00
|
|
|
stream_free(msg);
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void zebra_ptm_send_clients(struct stream *msg)
|
|
|
|
{
|
|
|
|
struct listnode *node;
|
|
|
|
struct zserv *client;
|
|
|
|
struct stream *msgc;
|
|
|
|
|
|
|
|
/* Create copy for replication. */
|
|
|
|
msgc = stream_dup(msg);
|
|
|
|
|
|
|
|
/* Send message to all running client daemons. */
|
2019-01-11 19:38:19 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) {
|
2018-09-20 15:09:43 +02:00
|
|
|
if (!IS_BFD_ENABLED_PROTOCOL(client->proto))
|
2018-06-27 18:40:50 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
zserv_send_message(client, msg);
|
|
|
|
|
|
|
|
/* Allocate more messages. */
|
|
|
|
msg = stream_dup(msgc);
|
|
|
|
}
|
|
|
|
|
|
|
|
stream_free(msgc);
|
2020-01-14 19:55:55 +01:00
|
|
|
stream_free(msg);
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int _zebra_ptm_bfd_client_deregister(struct zserv *zs)
|
|
|
|
{
|
|
|
|
struct stream *msg;
|
|
|
|
struct ptm_process *pp;
|
|
|
|
|
2018-09-20 15:09:43 +02:00
|
|
|
if (!IS_BFD_ENABLED_PROTOCOL(zs->proto))
|
2018-06-27 18:40:50 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Find daemon pid by zebra connection pointer. */
|
|
|
|
pp = pp_lookup_byzs(zs);
|
|
|
|
if (pp == NULL) {
|
|
|
|
zlog_err("%s:%d failed to find process pid registration",
|
|
|
|
__FILE__, __LINE__);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate, send message and free() daemon related data. */
|
|
|
|
msg = stream_new(ZEBRA_MAX_PACKET_SIZ);
|
|
|
|
if (msg == NULL) {
|
2018-08-16 22:10:32 +02:00
|
|
|
zlog_debug("%s: not enough memory", __func__);
|
2018-06-27 18:40:50 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-12-31 11:30:52 +01:00
|
|
|
* The message type will be ZEBRA_BFD_DEST_REPLAY so we can use only
|
2018-06-27 18:40:50 +02:00
|
|
|
* one callback at the `bfdd` side, however the real command
|
|
|
|
* number will be included right after the zebra header.
|
|
|
|
*/
|
|
|
|
zclient_create_header(msg, ZEBRA_BFD_DEST_REPLAY, 0);
|
|
|
|
stream_putl(msg, ZEBRA_BFD_CLIENT_DEREGISTER);
|
|
|
|
|
|
|
|
/* Put process PID. */
|
|
|
|
stream_putl(msg, pp->pp_pid);
|
|
|
|
|
|
|
|
/* Update the data pointers. */
|
|
|
|
stream_putw_at(msg, 0, stream_get_endp(msg));
|
|
|
|
|
|
|
|
zebra_ptm_send_bfdd(msg);
|
|
|
|
|
|
|
|
pp_free(pp);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_init(void)
|
|
|
|
{
|
|
|
|
/* Initialize the ptm process information list. */
|
|
|
|
TAILQ_INIT(&ppqueue);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send deregistration messages to BFD daemon when some other
|
|
|
|
* daemon closes. This will help avoid sending daemons
|
|
|
|
* unnecessary notification messages.
|
|
|
|
*/
|
|
|
|
hook_register(zserv_client_close, _zebra_ptm_bfd_client_deregister);
|
|
|
|
}
|
|
|
|
|
2018-07-25 18:39:58 +02:00
|
|
|
void zebra_ptm_finish(void)
|
|
|
|
{
|
|
|
|
/* Remove the client disconnect hook and free all memory. */
|
|
|
|
hook_unregister(zserv_client_close, _zebra_ptm_bfd_client_deregister);
|
|
|
|
pp_free_all();
|
|
|
|
}
|
|
|
|
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Message handling.
|
|
|
|
*/
|
2019-03-26 10:54:09 +01:00
|
|
|
static void _zebra_ptm_reroute(struct zserv *zs, struct zebra_vrf *zvrf,
|
|
|
|
struct stream *msg, uint32_t command)
|
2018-06-27 18:40:50 +02:00
|
|
|
{
|
|
|
|
struct stream *msgc;
|
2020-01-04 03:18:49 +01:00
|
|
|
char buf[ZEBRA_MAX_PACKET_SIZ];
|
2018-06-27 18:40:50 +02:00
|
|
|
pid_t ppid;
|
|
|
|
|
2020-01-04 03:18:49 +01:00
|
|
|
/* Create BFD header */
|
2018-06-27 18:40:50 +02:00
|
|
|
msgc = stream_new(ZEBRA_MAX_PACKET_SIZ);
|
2019-03-26 10:54:09 +01:00
|
|
|
zclient_create_header(msgc, ZEBRA_BFD_DEST_REPLAY, zvrf->vrf->vrf_id);
|
2018-06-27 18:40:50 +02:00
|
|
|
stream_putl(msgc, command);
|
|
|
|
|
2020-01-04 03:18:49 +01:00
|
|
|
if (STREAM_READABLE(msg) > STREAM_WRITEABLE(msgc)) {
|
|
|
|
zlog_warn("Cannot fit extended BFD header plus original message contents into ZAPI packet; dropping message");
|
|
|
|
goto stream_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy original message, excluding header, into new message */
|
|
|
|
stream_get_from(buf, msg, stream_get_getp(msg), STREAM_READABLE(msg));
|
|
|
|
stream_put(msgc, buf, STREAM_READABLE(msg));
|
|
|
|
|
|
|
|
/* Update length field */
|
|
|
|
stream_putw_at(msgc, 0, STREAM_READABLE(msgc));
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
zebra_ptm_send_bfdd(msgc);
|
2020-01-14 19:55:55 +01:00
|
|
|
msgc = NULL;
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/* Registrate process PID for shutdown hook. */
|
|
|
|
STREAM_GETL(msg, ppid);
|
|
|
|
pp_new(ppid, zs);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
stream_failure:
|
2020-01-14 19:55:55 +01:00
|
|
|
if (msgc)
|
|
|
|
stream_free(msgc);
|
2018-06-27 18:40:50 +02:00
|
|
|
zlog_err("%s:%d failed to registrate client pid", __FILE__, __LINE__);
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_bfd_dst_register(ZAPI_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_dst_register msg from client %s: length=%d",
|
|
|
|
zebra_route_string(client->proto), hdr->length);
|
|
|
|
|
2019-03-26 10:54:09 +01:00
|
|
|
_zebra_ptm_reroute(client, zvrf, msg, ZEBRA_BFD_DEST_REGISTER);
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_bfd_dst_deregister(ZAPI_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_dst_deregister msg from client %s: length=%d",
|
|
|
|
zebra_route_string(client->proto), hdr->length);
|
|
|
|
|
2019-03-26 10:54:09 +01:00
|
|
|
_zebra_ptm_reroute(client, zvrf, msg, ZEBRA_BFD_DEST_DEREGISTER);
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_bfd_client_register(ZAPI_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_client_register msg from client %s: length=%d",
|
|
|
|
zebra_route_string(client->proto), hdr->length);
|
|
|
|
|
2019-03-26 10:54:09 +01:00
|
|
|
_zebra_ptm_reroute(client, zvrf, msg, ZEBRA_BFD_CLIENT_REGISTER);
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_bfd_dst_replay(ZAPI_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct stream *msgc;
|
|
|
|
size_t zmsglen, zhdrlen;
|
2018-07-11 20:55:12 +02:00
|
|
|
uint32_t cmd;
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NOTE:
|
|
|
|
* Replay messages with HAVE_BFDD are meant to be replayed to
|
|
|
|
* the client daemons. These messages are composed and
|
|
|
|
* originated from the `bfdd` daemon.
|
|
|
|
*/
|
|
|
|
if (IS_ZEBRA_DEBUG_EVENT)
|
|
|
|
zlog_debug("bfd_dst_update msg from client %s: length=%d",
|
|
|
|
zebra_route_string(client->proto), hdr->length);
|
|
|
|
|
2018-07-11 20:55:12 +02:00
|
|
|
/*
|
|
|
|
* Client messages must be re-routed, otherwise do the `bfdd`
|
|
|
|
* special treatment.
|
|
|
|
*/
|
|
|
|
if (client->proto != ZEBRA_ROUTE_BFD) {
|
2019-03-26 10:54:09 +01:00
|
|
|
_zebra_ptm_reroute(client, zvrf, msg, ZEBRA_BFD_DEST_REPLAY);
|
2018-07-11 20:55:12 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out if this is an DEST_UPDATE or DEST_REPLAY. */
|
|
|
|
if (stream_getl2(msg, &cmd) == false) {
|
|
|
|
zlog_err("%s: expected at least 4 bytes (command)", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-06-27 18:40:50 +02:00
|
|
|
/*
|
|
|
|
* Don't modify message in the zebra API. In order to do that we
|
|
|
|
* need to allocate a new message stream and copy the message
|
|
|
|
* provided by zebra.
|
|
|
|
*/
|
|
|
|
msgc = stream_new(ZEBRA_MAX_PACKET_SIZ);
|
|
|
|
if (msgc == NULL) {
|
2018-08-16 22:10:32 +02:00
|
|
|
zlog_debug("%s: not enough memory", __func__);
|
2018-06-27 18:40:50 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate our header size plus the message contents. */
|
2018-07-11 20:55:12 +02:00
|
|
|
if (cmd != ZEBRA_BFD_DEST_REPLAY) {
|
|
|
|
zhdrlen = ZEBRA_HEADER_SIZE;
|
|
|
|
zmsglen = msg->endp - msg->getp;
|
|
|
|
memcpy(msgc->data + zhdrlen, msg->data + msg->getp, zmsglen);
|
2018-06-27 18:40:50 +02:00
|
|
|
|
2018-07-11 20:55:12 +02:00
|
|
|
zclient_create_header(msgc, cmd, zvrf_id(zvrf));
|
|
|
|
|
|
|
|
msgc->getp = 0;
|
|
|
|
msgc->endp = zhdrlen + zmsglen;
|
|
|
|
} else
|
|
|
|
zclient_create_header(msgc, cmd, zvrf_id(zvrf));
|
2018-06-27 18:40:50 +02:00
|
|
|
|
|
|
|
/* Update the data pointers. */
|
|
|
|
stream_putw_at(msgc, 0, stream_get_endp(msgc));
|
|
|
|
|
|
|
|
zebra_ptm_send_clients(msgc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unused functions.
|
|
|
|
*/
|
|
|
|
void zebra_ptm_if_init(struct zebra_if *zifp __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
/* NOTHING */
|
|
|
|
}
|
|
|
|
|
|
|
|
int zebra_ptm_get_enable_state(void)
|
|
|
|
{
|
2018-08-16 22:29:08 +02:00
|
|
|
return 0;
|
2018-06-27 18:40:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_show_status(struct vty *vty __attribute__((__unused__)),
|
2021-08-02 20:38:26 +02:00
|
|
|
json_object *json __attribute__((__unused__)),
|
2018-06-27 18:40:50 +02:00
|
|
|
struct interface *ifp __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
/* NOTHING */
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_write(struct vty *vty __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
/* NOTHING */
|
|
|
|
}
|
|
|
|
|
|
|
|
void zebra_ptm_if_write(struct vty *vty __attribute__((__unused__)),
|
|
|
|
struct zebra_if *zifp __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
/* NOTHING */
|
|
|
|
}
|
|
|
|
void zebra_ptm_if_set_ptm_state(struct interface *i __attribute__((__unused__)),
|
|
|
|
struct zebra_if *zi __attribute__((__unused__)))
|
|
|
|
{
|
|
|
|
/* NOTHING */
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* HAVE_BFDD */
|