forked from Mirror/frr
bgpd: Convert bgp_io.c to take struct peer_connection
bgp_io.c is clearly connection oriented so let's convert it over to using `struct peer_connection` Signed-off-by: Donald Sharp <sharpd@nvidia.com>
This commit is contained in:
parent
84d1abd3d9
commit
ccb51e8266
|
@ -149,10 +149,10 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
|||
from_peer->host, from_peer, from_peer->connection.fd,
|
||||
peer, peer->connection.fd);
|
||||
|
||||
bgp_writes_off(peer);
|
||||
bgp_reads_off(peer);
|
||||
bgp_writes_off(from_peer);
|
||||
bgp_reads_off(from_peer);
|
||||
bgp_writes_off(&peer->connection);
|
||||
bgp_reads_off(&peer->connection);
|
||||
bgp_writes_off(&from_peer->connection);
|
||||
bgp_reads_off(&from_peer->connection);
|
||||
|
||||
/*
|
||||
* Before exchanging FD remove doppelganger from
|
||||
|
@ -338,9 +338,9 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
|||
if (from_peer)
|
||||
bgp_replace_nexthop_by_peer(from_peer, peer);
|
||||
|
||||
bgp_reads_on(peer);
|
||||
bgp_writes_on(peer);
|
||||
event_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
bgp_reads_on(&peer->connection);
|
||||
bgp_writes_on(&peer->connection);
|
||||
event_add_event(bm->master, bgp_process_packet, &peer->connection, 0,
|
||||
&peer->t_process_packet);
|
||||
|
||||
return (peer);
|
||||
|
@ -1500,8 +1500,8 @@ enum bgp_fsm_state_progress bgp_stop(struct peer *peer)
|
|||
bgp_keepalives_off(peer);
|
||||
|
||||
/* Stop read and write threads. */
|
||||
bgp_writes_off(peer);
|
||||
bgp_reads_off(peer);
|
||||
bgp_writes_off(&peer->connection);
|
||||
bgp_reads_off(&peer->connection);
|
||||
|
||||
EVENT_OFF(peer->t_connect_check_r);
|
||||
EVENT_OFF(peer->t_connect_check_w);
|
||||
|
@ -1710,7 +1710,7 @@ static enum bgp_fsm_state_progress bgp_connect_success(struct peer *peer)
|
|||
__func__, peer->host, peer->connection.fd);
|
||||
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
|
||||
bgp_fsm_error_subcode(peer->status));
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
return BGP_FSM_FAILURE;
|
||||
}
|
||||
|
||||
|
@ -1720,7 +1720,7 @@ static enum bgp_fsm_state_progress bgp_connect_success(struct peer *peer)
|
|||
*/
|
||||
bgp_nht_interface_events(peer);
|
||||
|
||||
bgp_reads_on(peer);
|
||||
bgp_reads_on(&peer->connection);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer)) {
|
||||
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
|
||||
|
@ -1754,7 +1754,7 @@ bgp_connect_success_w_delayopen(struct peer *peer)
|
|||
__func__, peer->host, peer->connection.fd);
|
||||
bgp_notify_send(peer, BGP_NOTIFY_FSM_ERR,
|
||||
bgp_fsm_error_subcode(peer->status));
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
return BGP_FSM_FAILURE;
|
||||
}
|
||||
|
||||
|
@ -1764,7 +1764,7 @@ bgp_connect_success_w_delayopen(struct peer *peer)
|
|||
*/
|
||||
bgp_nht_interface_events(peer);
|
||||
|
||||
bgp_reads_on(peer);
|
||||
bgp_reads_on(&peer->connection);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer)) {
|
||||
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER))
|
||||
|
|
132
bgpd/bgp_io.c
132
bgpd/bgp_io.c
|
@ -29,11 +29,11 @@
|
|||
/* clang-format on */
|
||||
|
||||
/* forward declarations */
|
||||
static uint16_t bgp_write(struct peer *);
|
||||
static uint16_t bgp_read(struct peer *peer, int *code_p);
|
||||
static uint16_t bgp_write(struct peer_connection *connection);
|
||||
static uint16_t bgp_read(struct peer_connection *connection, int *code_p);
|
||||
static void bgp_process_writes(struct event *event);
|
||||
static void bgp_process_reads(struct event *event);
|
||||
static bool validate_header(struct peer *);
|
||||
static bool validate_header(struct peer_connection *connection);
|
||||
|
||||
/* generic i/o status codes */
|
||||
#define BGP_IO_TRANS_ERR (1 << 0) /* EAGAIN or similar occurred */
|
||||
|
@ -42,26 +42,29 @@ static bool validate_header(struct peer *);
|
|||
|
||||
/* Thread external API ----------------------------------------------------- */
|
||||
|
||||
void bgp_writes_on(struct peer *peer)
|
||||
void bgp_writes_on(struct peer_connection *connection)
|
||||
{
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
struct peer *peer = connection->peer;
|
||||
|
||||
assert(fpt->running);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->connection.obuf);
|
||||
assert(peer->connection.ibuf);
|
||||
assert(peer->connection.ibuf_work);
|
||||
assert(connection->obuf);
|
||||
assert(connection->ibuf);
|
||||
assert(connection->ibuf_work);
|
||||
assert(!peer->t_connect_check_r);
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->connection.fd);
|
||||
assert(connection->fd);
|
||||
|
||||
event_add_write(fpt->master, bgp_process_writes, peer,
|
||||
peer->connection.fd, &peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, connection,
|
||||
connection->fd, &peer->t_write);
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
|
||||
}
|
||||
|
||||
void bgp_writes_off(struct peer *peer)
|
||||
void bgp_writes_off(struct peer_connection *connection)
|
||||
{
|
||||
struct peer *peer = connection->peer;
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
assert(fpt->running);
|
||||
|
||||
|
@ -71,28 +74,30 @@ void bgp_writes_off(struct peer *peer)
|
|||
UNSET_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON);
|
||||
}
|
||||
|
||||
void bgp_reads_on(struct peer *peer)
|
||||
void bgp_reads_on(struct peer_connection *connection)
|
||||
{
|
||||
struct peer *peer = connection->peer;
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
assert(fpt->running);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->connection.ibuf);
|
||||
assert(peer->connection.fd);
|
||||
assert(peer->connection.ibuf_work);
|
||||
assert(peer->connection.obuf);
|
||||
assert(connection->ibuf);
|
||||
assert(connection->fd);
|
||||
assert(connection->ibuf_work);
|
||||
assert(connection->obuf);
|
||||
assert(!peer->t_connect_check_r);
|
||||
assert(!peer->t_connect_check_w);
|
||||
assert(peer->connection.fd);
|
||||
assert(connection->fd);
|
||||
|
||||
event_add_read(fpt->master, bgp_process_reads, peer,
|
||||
peer->connection.fd, &peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, connection,
|
||||
connection->fd, &peer->t_read);
|
||||
|
||||
SET_FLAG(peer->thread_flags, PEER_THREAD_READS_ON);
|
||||
}
|
||||
|
||||
void bgp_reads_off(struct peer *peer)
|
||||
void bgp_reads_off(struct peer_connection *connection)
|
||||
{
|
||||
struct peer *peer = connection->peer;
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
assert(fpt->running);
|
||||
|
||||
|
@ -111,19 +116,21 @@ void bgp_reads_off(struct peer *peer)
|
|||
static void bgp_process_writes(struct event *thread)
|
||||
{
|
||||
static struct peer *peer;
|
||||
peer = EVENT_ARG(thread);
|
||||
struct peer_connection *connection = EVENT_ARG(thread);
|
||||
uint16_t status;
|
||||
bool reschedule;
|
||||
bool fatal = false;
|
||||
|
||||
if (peer->connection.fd < 0)
|
||||
peer = connection->peer;
|
||||
|
||||
if (connection->fd < 0)
|
||||
return;
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
status = bgp_write(peer);
|
||||
reschedule = (stream_fifo_head(peer->connection.obuf) != NULL);
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
status = bgp_write(connection);
|
||||
reschedule = (stream_fifo_head(connection->obuf) != NULL);
|
||||
}
|
||||
|
||||
/* no problem */
|
||||
|
@ -142,26 +149,26 @@ static void bgp_process_writes(struct event *thread)
|
|||
* sent in the update message
|
||||
*/
|
||||
if (reschedule) {
|
||||
event_add_write(fpt->master, bgp_process_writes, peer,
|
||||
peer->connection.fd, &peer->t_write);
|
||||
event_add_write(fpt->master, bgp_process_writes, connection,
|
||||
connection->fd, &peer->t_write);
|
||||
} else if (!fatal) {
|
||||
BGP_UPDATE_GROUP_TIMER_ON(&peer->t_generate_updgrp_packets,
|
||||
bgp_generate_updgrp_packets);
|
||||
}
|
||||
}
|
||||
|
||||
static int read_ibuf_work(struct peer *peer)
|
||||
static int read_ibuf_work(struct peer_connection *connection)
|
||||
{
|
||||
/* static buffer for transferring packets */
|
||||
/* shorter alias to peer's input buffer */
|
||||
struct ringbuf *ibw = peer->connection.ibuf_work;
|
||||
struct ringbuf *ibw = connection->ibuf_work;
|
||||
/* packet size as given by header */
|
||||
uint16_t pktsize = 0;
|
||||
struct stream *pkt;
|
||||
|
||||
/* ============================================== */
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
if (peer->connection.ibuf->count >= bm->inq_limit)
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
if (connection->ibuf->count >= bm->inq_limit)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -170,7 +177,7 @@ static int read_ibuf_work(struct peer *peer)
|
|||
return 0;
|
||||
|
||||
/* check that header is valid */
|
||||
if (!validate_header(peer))
|
||||
if (!validate_header(connection))
|
||||
return -EBADMSG;
|
||||
|
||||
/* header is valid; retrieve packet size */
|
||||
|
@ -179,7 +186,7 @@ static int read_ibuf_work(struct peer *peer)
|
|||
pktsize = ntohs(pktsize);
|
||||
|
||||
/* if this fails we are seriously screwed */
|
||||
assert(pktsize <= peer->max_packet_size);
|
||||
assert(pktsize <= connection->peer->max_packet_size);
|
||||
|
||||
/*
|
||||
* If we have that much data, chuck it into its own
|
||||
|
@ -195,9 +202,9 @@ static int read_ibuf_work(struct peer *peer)
|
|||
assert(ringbuf_get(ibw, pkt->data, pktsize) == pktsize);
|
||||
stream_set_endp(pkt, pktsize);
|
||||
|
||||
frrtrace(2, frr_bgp, packet_read, peer, pkt);
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
stream_fifo_push(peer->connection.ibuf, pkt);
|
||||
frrtrace(2, frr_bgp, packet_read, connection->peer, pkt);
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
stream_fifo_push(connection->ibuf, pkt);
|
||||
}
|
||||
|
||||
return pktsize;
|
||||
|
@ -214,6 +221,7 @@ static int read_ibuf_work(struct peer *peer)
|
|||
static void bgp_process_reads(struct event *thread)
|
||||
{
|
||||
/* clang-format off */
|
||||
struct peer_connection *connection = EVENT_ARG(thread);
|
||||
static struct peer *peer; /* peer to read from */
|
||||
uint16_t status; /* bgp_read status code */
|
||||
bool fatal = false; /* whether fatal error occurred */
|
||||
|
@ -223,15 +231,15 @@ static void bgp_process_reads(struct event *thread)
|
|||
int ret = 1;
|
||||
/* clang-format on */
|
||||
|
||||
peer = EVENT_ARG(thread);
|
||||
peer = connection->peer;
|
||||
|
||||
if (bm->terminating || peer->connection.fd < 0)
|
||||
if (bm->terminating || connection->fd < 0)
|
||||
return;
|
||||
|
||||
struct frr_pthread *fpt = bgp_pth_io;
|
||||
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
status = bgp_read(peer, &code);
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
status = bgp_read(connection, &code);
|
||||
}
|
||||
|
||||
/* error checking phase */
|
||||
|
@ -247,13 +255,13 @@ static void bgp_process_reads(struct event *thread)
|
|||
/* Handle the error in the main pthread, include the
|
||||
* specific state change from 'bgp_read'.
|
||||
*/
|
||||
event_add_event(bm->master, bgp_packet_process_error, peer,
|
||||
code, &peer->t_process_packet_error);
|
||||
event_add_event(bm->master, bgp_packet_process_error,
|
||||
connection, code, &peer->t_process_packet_error);
|
||||
goto done;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
ret = read_ibuf_work(peer);
|
||||
ret = read_ibuf_work(connection);
|
||||
if (ret <= 0)
|
||||
break;
|
||||
|
||||
|
@ -283,14 +291,14 @@ done:
|
|||
/* handle invalid header */
|
||||
if (fatal) {
|
||||
/* wipe buffer just in case someone screwed up */
|
||||
ringbuf_wipe(peer->connection.ibuf_work);
|
||||
ringbuf_wipe(connection->ibuf_work);
|
||||
return;
|
||||
}
|
||||
|
||||
event_add_read(fpt->master, bgp_process_reads, peer,
|
||||
peer->connection.fd, &peer->t_read);
|
||||
event_add_read(fpt->master, bgp_process_reads, peer, connection->fd,
|
||||
&peer->t_read);
|
||||
if (added_pkt)
|
||||
event_add_event(bm->master, bgp_process_packet, peer, 0,
|
||||
event_add_event(bm->master, bgp_process_packet, connection, 0,
|
||||
&peer->t_process_packet);
|
||||
}
|
||||
|
||||
|
@ -307,8 +315,9 @@ done:
|
|||
* The return value is equal to the number of packets written
|
||||
* (which may be zero).
|
||||
*/
|
||||
static uint16_t bgp_write(struct peer *peer)
|
||||
static uint16_t bgp_write(struct peer_connection *connection)
|
||||
{
|
||||
struct peer *peer = connection->peer;
|
||||
uint8_t type;
|
||||
struct stream *s;
|
||||
int update_last_write = 0;
|
||||
|
@ -330,7 +339,7 @@ static uint16_t bgp_write(struct peer *peer)
|
|||
struct stream **streams = ostreams;
|
||||
struct iovec iov[wpkt_quanta_old];
|
||||
|
||||
s = stream_fifo_head(peer->connection.obuf);
|
||||
s = stream_fifo_head(connection->obuf);
|
||||
|
||||
if (!s)
|
||||
goto done;
|
||||
|
@ -350,7 +359,7 @@ static uint16_t bgp_write(struct peer *peer)
|
|||
total_written = 0;
|
||||
|
||||
do {
|
||||
num = writev(peer->connection.fd, iov, iovsz);
|
||||
num = writev(connection->fd, iov, iovsz);
|
||||
|
||||
if (num < 0) {
|
||||
if (!ERRNO_IO_RETRY(errno)) {
|
||||
|
@ -399,7 +408,7 @@ static uint16_t bgp_write(struct peer *peer)
|
|||
|
||||
/* Handle statistics */
|
||||
for (unsigned int i = 0; i < total_written; i++) {
|
||||
s = stream_fifo_pop(peer->connection.obuf);
|
||||
s = stream_fifo_pop(connection->obuf);
|
||||
|
||||
assert(s == ostreams[i]);
|
||||
|
||||
|
@ -490,14 +499,14 @@ uint8_t ibuf_scratch[BGP_EXTENDED_MESSAGE_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX]
|
|||
* per peer then we need to rethink the global ibuf_scratch
|
||||
* data structure above.
|
||||
*/
|
||||
static uint16_t bgp_read(struct peer *peer, int *code_p)
|
||||
static uint16_t bgp_read(struct peer_connection *connection, int *code_p)
|
||||
{
|
||||
size_t readsize; /* how many bytes we want to read */
|
||||
ssize_t nbytes; /* how many bytes we actually read */
|
||||
size_t ibuf_work_space; /* space we can read into the work buf */
|
||||
uint16_t status = 0;
|
||||
|
||||
ibuf_work_space = ringbuf_space(peer->connection.ibuf_work);
|
||||
ibuf_work_space = ringbuf_space(connection->ibuf_work);
|
||||
|
||||
if (ibuf_work_space == 0) {
|
||||
SET_FLAG(status, BGP_IO_WORK_FULL_ERR);
|
||||
|
@ -506,7 +515,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
|||
|
||||
readsize = MIN(ibuf_work_space, sizeof(ibuf_scratch));
|
||||
|
||||
nbytes = read(peer->connection.fd, ibuf_scratch, readsize);
|
||||
nbytes = read(connection->fd, ibuf_scratch, readsize);
|
||||
|
||||
/* EAGAIN or EWOULDBLOCK; come back later */
|
||||
if (nbytes < 0 && ERRNO_IO_RETRY(errno)) {
|
||||
|
@ -514,8 +523,8 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
|||
} else if (nbytes < 0) {
|
||||
/* Fatal error; tear down session */
|
||||
flog_err(EC_BGP_UPDATE_RCV,
|
||||
"%s [Error] bgp_read_packet error: %s", peer->host,
|
||||
safe_strerror(errno));
|
||||
"%s [Error] bgp_read_packet error: %s",
|
||||
connection->peer->host, safe_strerror(errno));
|
||||
|
||||
/* Handle the error in the main pthread. */
|
||||
if (code_p)
|
||||
|
@ -525,9 +534,9 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
|||
|
||||
} else if (nbytes == 0) {
|
||||
/* Received EOF / TCP session closed */
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
if (bgp_debug_neighbor_events(connection->peer))
|
||||
zlog_debug("%s [Event] BGP connection closed fd %d",
|
||||
peer->host, peer->connection.fd);
|
||||
connection->peer->host, connection->fd);
|
||||
|
||||
/* Handle the error in the main pthread. */
|
||||
if (code_p)
|
||||
|
@ -535,7 +544,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
|||
|
||||
SET_FLAG(status, BGP_IO_FATAL_ERR);
|
||||
} else {
|
||||
assert(ringbuf_put(peer->connection.ibuf_work, ibuf_scratch,
|
||||
assert(ringbuf_put(connection->ibuf_work, ibuf_scratch,
|
||||
nbytes) == (size_t)nbytes);
|
||||
}
|
||||
|
||||
|
@ -549,11 +558,12 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
|
|||
* Assumes that there are at least BGP_HEADER_SIZE readable bytes in the input
|
||||
* buffer.
|
||||
*/
|
||||
static bool validate_header(struct peer *peer)
|
||||
static bool validate_header(struct peer_connection *connection)
|
||||
{
|
||||
struct peer *peer = connection->peer;
|
||||
uint16_t size;
|
||||
uint8_t type;
|
||||
struct ringbuf *pkt = peer->connection.ibuf_work;
|
||||
struct ringbuf *pkt = connection->ibuf_work;
|
||||
|
||||
static const uint8_t m_correct[BGP_MARKER_SIZE] = {
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include "bgpd/bgpd.h"
|
||||
#include "frr_pthread.h"
|
||||
|
||||
struct peer_connection;
|
||||
|
||||
/**
|
||||
* Start function for write thread.
|
||||
*
|
||||
|
@ -33,57 +35,57 @@ extern int bgp_io_stop(void **result, struct frr_pthread *fpt);
|
|||
/**
|
||||
* Turns on packet writing for a peer.
|
||||
*
|
||||
* After this function is called, any packets placed on peer->obuf will be
|
||||
* written to peer->fd until no more packets remain.
|
||||
* After this function is called, any packets placed on connection->obuf will be
|
||||
* written to connection->fd until no more packets remain.
|
||||
*
|
||||
* Additionally, it becomes unsafe to perform socket actions on peer->fd.
|
||||
* Additionally, it becomes unsafe to perform socket actions on connection->fd.
|
||||
*
|
||||
* @param peer - peer to register
|
||||
*/
|
||||
extern void bgp_writes_on(struct peer *peer);
|
||||
extern void bgp_writes_on(struct peer_connection *peer);
|
||||
|
||||
/**
|
||||
* Turns off packet writing for a peer.
|
||||
*
|
||||
* After this function returns, packets placed on peer->obuf will not be
|
||||
* written to peer->fd by the I/O thread.
|
||||
* After this function returns, packets placed on connection->obuf will not be
|
||||
* written to connection->fd by the I/O thread.
|
||||
*
|
||||
* After this function returns it becomes safe to perform socket actions on
|
||||
* peer->fd.
|
||||
* connection->fd.
|
||||
*
|
||||
* @param peer - peer to deregister
|
||||
* @param connection - connection to deregister
|
||||
* @param flush - as described
|
||||
*/
|
||||
extern void bgp_writes_off(struct peer *peer);
|
||||
extern void bgp_writes_off(struct peer_connection *connection);
|
||||
|
||||
/**
|
||||
* Turns on packet reading for a peer.
|
||||
*
|
||||
* After this function is called, any packets received on peer->fd will be read
|
||||
* and copied into the FIFO queue peer->ibuf.
|
||||
* After this function is called, any packets received on connection->fd
|
||||
* will be read and copied into the FIFO queue connection->ibuf.
|
||||
*
|
||||
* Additionally, it becomes unsafe to perform socket actions on peer->fd.
|
||||
* Additionally, it becomes unsafe to perform socket actions on connection->fd.
|
||||
*
|
||||
* Whenever one or more packets are placed onto peer->ibuf, a task of type
|
||||
* Whenever one or more packets are placed onto connection->ibuf, a task of type
|
||||
* THREAD_EVENT will be placed on the main thread whose handler is
|
||||
*
|
||||
* bgp_packet.c:bgp_process_packet()
|
||||
*
|
||||
* @param peer - peer to register
|
||||
* @param connection - The connection to register
|
||||
*/
|
||||
extern void bgp_reads_on(struct peer *peer);
|
||||
extern void bgp_reads_on(struct peer_connection *connection);
|
||||
|
||||
/**
|
||||
* Turns off packet reading for a peer.
|
||||
*
|
||||
* After this function is called, any packets received on peer->fd will not be
|
||||
* read by the I/O thread.
|
||||
* After this function is called, any packets received on connection->fd
|
||||
* will not be read by the I/O thread.
|
||||
*
|
||||
* After this function returns it becomes safe to perform socket actions on
|
||||
* peer->fd.
|
||||
* connection->fd.
|
||||
*
|
||||
* @param peer - peer to deregister
|
||||
* @param connection - The connection to register for
|
||||
*/
|
||||
extern void bgp_reads_off(struct peer *peer);
|
||||
extern void bgp_reads_off(struct peer_connection *connection);
|
||||
|
||||
#endif /* _FRR_BGP_IO_H */
|
||||
|
|
|
@ -608,7 +608,7 @@ void bgp_generate_updgrp_packets(struct event *thread)
|
|||
(peer->connection.obuf->count <= bm->outq_limit));
|
||||
|
||||
if (generated)
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
|
||||
bgp_write_proceed_actions(peer);
|
||||
}
|
||||
|
@ -637,7 +637,7 @@ void bgp_keepalive_send(struct peer *peer)
|
|||
/* Add packet to the peer. */
|
||||
bgp_packet_add(peer, s);
|
||||
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -706,7 +706,7 @@ void bgp_open_send(struct peer *peer)
|
|||
/* Add packet to the peer. */
|
||||
bgp_packet_add(peer, s);
|
||||
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1181,7 +1181,7 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi,
|
|||
/* Add packet to the peer. */
|
||||
bgp_packet_add(peer, s);
|
||||
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1299,7 +1299,7 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi,
|
|||
/* Add packet to the peer. */
|
||||
bgp_packet_add(peer, s);
|
||||
|
||||
bgp_writes_on(peer);
|
||||
bgp_writes_on(&peer->connection);
|
||||
}
|
||||
|
||||
/* RFC1771 6.8 Connection collision detection. */
|
||||
|
@ -2989,11 +2989,13 @@ void bgp_process_packet(struct event *thread)
|
|||
{
|
||||
/* Yes first of all get peer pointer. */
|
||||
struct peer *peer; // peer
|
||||
struct peer_connection *connection;
|
||||
uint32_t rpkt_quanta_old; // how many packets to read
|
||||
int fsm_update_result; // return code of bgp_event_update()
|
||||
int mprc; // message processing return code
|
||||
|
||||
peer = EVENT_ARG(thread);
|
||||
connection = EVENT_ARG(thread);
|
||||
peer = connection->peer;
|
||||
rpkt_quanta_old = atomic_load_explicit(&peer->bgp->rpkt_quanta,
|
||||
memory_order_relaxed);
|
||||
fsm_update_result = 0;
|
||||
|
@ -3009,8 +3011,8 @@ void bgp_process_packet(struct event *thread)
|
|||
bgp_size_t size;
|
||||
char notify_data_length[2];
|
||||
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
peer->curr = stream_fifo_pop(peer->connection.ibuf);
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
peer->curr = stream_fifo_pop(connection->ibuf);
|
||||
}
|
||||
|
||||
if (peer->curr == NULL) // no packets to process, hmm...
|
||||
|
@ -3136,11 +3138,11 @@ void bgp_process_packet(struct event *thread)
|
|||
|
||||
if (fsm_update_result != FSM_PEER_TRANSFERRED
|
||||
&& fsm_update_result != FSM_PEER_STOPPED) {
|
||||
frr_with_mutex (&peer->connection.io_mtx) {
|
||||
frr_with_mutex (&connection->io_mtx) {
|
||||
// more work to do, come back later
|
||||
if (peer->connection.ibuf->count > 0)
|
||||
if (connection->ibuf->count > 0)
|
||||
event_add_event(bm->master, bgp_process_packet,
|
||||
peer, 0,
|
||||
connection, 0,
|
||||
&peer->t_process_packet);
|
||||
}
|
||||
}
|
||||
|
@ -3164,15 +3166,17 @@ void bgp_send_delayed_eor(struct bgp *bgp)
|
|||
*/
|
||||
void bgp_packet_process_error(struct event *thread)
|
||||
{
|
||||
struct peer_connection *connection;
|
||||
struct peer *peer;
|
||||
int code;
|
||||
|
||||
peer = EVENT_ARG(thread);
|
||||
connection = EVENT_ARG(thread);
|
||||
peer = connection->peer;
|
||||
code = EVENT_VAL(thread);
|
||||
|
||||
if (bgp_debug_neighbor_events(peer))
|
||||
zlog_debug("%s [Event] BGP error %d on fd %d", peer->host, code,
|
||||
peer->connection.fd);
|
||||
connection->fd);
|
||||
|
||||
/* Closed connection or error on the socket */
|
||||
if (peer_established(peer)) {
|
||||
|
|
|
@ -54,9 +54,9 @@ PKT_PROCESS_TRACEPOINT_INSTANCE(refresh_process)
|
|||
TRACEPOINT_EVENT(
|
||||
frr_bgp,
|
||||
packet_read,
|
||||
TP_ARGS(struct peer *, peer, struct stream *, pkt),
|
||||
TP_ARGS(struct peer_connection *, connection, struct stream *, pkt),
|
||||
TP_FIELDS(
|
||||
ctf_string(peer, PEER_HOSTNAME(peer))
|
||||
ctf_string(peer, PEER_HOSTNAME(connection->peer))
|
||||
ctf_sequence_hex(uint8_t, packet, pkt->data, size_t,
|
||||
STREAM_READABLE(pkt))
|
||||
)
|
||||
|
|
|
@ -1179,8 +1179,8 @@ static void peer_free(struct peer *peer)
|
|||
* but just to be sure..
|
||||
*/
|
||||
bgp_timer_set(peer);
|
||||
bgp_reads_off(peer);
|
||||
bgp_writes_off(peer);
|
||||
bgp_reads_off(&peer->connection);
|
||||
bgp_writes_off(&peer->connection);
|
||||
event_cancel_event_ready(bm->master, peer);
|
||||
FOREACH_AFI_SAFI (afi, safi)
|
||||
EVENT_OFF(peer->t_revalidate_all[afi][safi]);
|
||||
|
@ -2562,8 +2562,8 @@ int peer_delete(struct peer *peer)
|
|||
bgp_soft_reconfig_table_task_cancel(bgp, NULL, peer);
|
||||
|
||||
bgp_keepalives_off(peer);
|
||||
bgp_reads_off(peer);
|
||||
bgp_writes_off(peer);
|
||||
bgp_reads_off(&peer->connection);
|
||||
bgp_writes_off(&peer->connection);
|
||||
event_cancel_event_ready(bm->master, peer);
|
||||
FOREACH_AFI_SAFI (afi, safi)
|
||||
EVENT_OFF(peer->t_revalidate_all[afi][safi]);
|
||||
|
|
Loading…
Reference in a new issue