forked from Mirror/frr
bgpd: improve bgp thread startup characteristics
Replace atomic spinlock with condition variable. Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
This commit is contained in:
parent
0bbb9e72f2
commit
f09a656d24
|
@ -50,20 +50,37 @@ static bool validate_header(struct peer *);
|
|||
#define BGP_IO_TRANS_ERR (1 << 0) // EAGAIN or similar occurred
|
||||
#define BGP_IO_FATAL_ERR (1 << 1) // some kind of fatal TCP error
|
||||
|
||||
/* Start and stop routines for I/O pthread + control variables
|
||||
/* Plumbing & control variables for thread lifecycle
|
||||
* ------------------------------------------------------------------------ */
|
||||
_Atomic bool bgp_io_thread_run;
|
||||
_Atomic bool bgp_io_thread_started;
|
||||
|
||||
void bgp_io_init()
|
||||
{
|
||||
bgp_io_thread_run = false;
|
||||
bgp_io_thread_started = false;
|
||||
}
|
||||
bool bgp_io_thread_run;
|
||||
pthread_mutex_t *running_cond_mtx;
|
||||
pthread_cond_t *running_cond;
|
||||
|
||||
/* Unused callback for thread_add_read() */
|
||||
static int bgp_io_dummy(struct thread *thread) { return 0; }
|
||||
|
||||
/* Poison pill task */
|
||||
static int bgp_io_finish(struct thread *thread)
|
||||
{
|
||||
bgp_io_thread_run = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Extern lifecycle control functions. init -> start -> stop
|
||||
* ------------------------------------------------------------------------ */
|
||||
void bgp_io_init()
|
||||
{
|
||||
bgp_io_thread_run = false;
|
||||
|
||||
running_cond_mtx = XCALLOC(MTYPE_PTHREAD_PRIM, sizeof(pthread_mutex_t));
|
||||
running_cond = XCALLOC(MTYPE_PTHREAD_PRIM, sizeof(pthread_cond_t));
|
||||
|
||||
pthread_mutex_init(running_cond_mtx, NULL);
|
||||
pthread_cond_init(running_cond, NULL);
|
||||
|
||||
pthread_mutex_lock(running_cond_mtx);
|
||||
}
|
||||
|
||||
void *bgp_io_start(void *arg)
|
||||
{
|
||||
struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
|
||||
|
@ -79,9 +96,12 @@ void *bgp_io_start(void *arg)
|
|||
|
||||
struct thread task;
|
||||
|
||||
atomic_store_explicit(&bgp_io_thread_run, true, memory_order_seq_cst);
|
||||
atomic_store_explicit(&bgp_io_thread_started, true,
|
||||
memory_order_seq_cst);
|
||||
bgp_io_thread_run = true;
|
||||
pthread_mutex_lock(running_cond_mtx);
|
||||
{
|
||||
pthread_cond_signal(running_cond);
|
||||
}
|
||||
pthread_mutex_unlock(running_cond_mtx);
|
||||
|
||||
while (bgp_io_thread_run) {
|
||||
if (thread_fetch(fpt->master, &task)) {
|
||||
|
@ -95,29 +115,33 @@ void *bgp_io_start(void *arg)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int bgp_io_finish(struct thread *thread)
|
||||
void bgp_io_wait_running()
|
||||
{
|
||||
atomic_store_explicit(&bgp_io_thread_run, false, memory_order_seq_cst);
|
||||
return 0;
|
||||
while (!bgp_io_thread_run)
|
||||
pthread_cond_wait(running_cond, running_cond_mtx);
|
||||
}
|
||||
|
||||
int bgp_io_stop(void **result, struct frr_pthread *fpt)
|
||||
{
|
||||
thread_add_event(fpt->master, &bgp_io_finish, NULL, 0, NULL);
|
||||
pthread_join(fpt->thread, result);
|
||||
|
||||
pthread_mutex_unlock(running_cond_mtx);
|
||||
pthread_mutex_destroy(running_cond_mtx);
|
||||
pthread_cond_destroy(running_cond);
|
||||
|
||||
XFREE(MTYPE_PTHREAD_PRIM, running_cond_mtx);
|
||||
XFREE(MTYPE_PTHREAD_PRIM, running_cond);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Extern API -------------------------------------------------------------- */
|
||||
void bgp_io_running(void)
|
||||
{
|
||||
while (!atomic_load_explicit(&bgp_io_thread_started,
|
||||
memory_order_seq_cst))
|
||||
frr_pthread_yield();
|
||||
}
|
||||
|
||||
void bgp_writes_on(struct peer *peer)
|
||||
{
|
||||
assert(bgp_io_thread_run);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->obuf);
|
||||
assert(peer->ibuf);
|
||||
|
@ -135,6 +159,8 @@ void bgp_writes_on(struct peer *peer)
|
|||
|
||||
void bgp_writes_off(struct peer *peer)
|
||||
{
|
||||
assert(bgp_io_thread_run);
|
||||
|
||||
struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
|
||||
|
||||
thread_cancel_async(fpt->master, &peer->t_write, NULL);
|
||||
|
@ -145,6 +171,8 @@ void bgp_writes_off(struct peer *peer)
|
|||
|
||||
void bgp_reads_on(struct peer *peer)
|
||||
{
|
||||
assert(bgp_io_thread_run);
|
||||
|
||||
assert(peer->status != Deleted);
|
||||
assert(peer->ibuf);
|
||||
assert(peer->fd);
|
||||
|
@ -164,6 +192,8 @@ void bgp_reads_on(struct peer *peer)
|
|||
|
||||
void bgp_reads_off(struct peer *peer)
|
||||
{
|
||||
assert(bgp_io_thread_run);
|
||||
|
||||
struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
|
||||
|
||||
thread_cancel_async(fpt->master, &peer->t_read, NULL);
|
||||
|
|
|
@ -31,21 +31,11 @@
|
|||
/**
|
||||
* Initializes data structures and flags for the write thread.
|
||||
*
|
||||
* This function should be called from the main thread before
|
||||
* This function must be called from the main thread before
|
||||
* bgp_writes_start() is invoked.
|
||||
*/
|
||||
extern void bgp_io_init(void);
|
||||
|
||||
/**
|
||||
* Ensure that the BGP IO thread is actually up and running
|
||||
*
|
||||
* This function must be called immediately after the thread
|
||||
* has been created for running. This is because we want
|
||||
* to make sure that the io thread is ready before other
|
||||
* threads start attempting to use it.
|
||||
*/
|
||||
extern void bgp_io_running(void);
|
||||
|
||||
/**
|
||||
* Start function for write thread.
|
||||
*
|
||||
|
@ -53,6 +43,15 @@ extern void bgp_io_running(void);
|
|||
*/
|
||||
extern void *bgp_io_start(void *arg);
|
||||
|
||||
/**
|
||||
* Wait until the IO thread is ready to accept jobs.
|
||||
*
|
||||
* This function must be called immediately after the thread has been created
|
||||
* for running. Use of other functions before calling this one will result in
|
||||
* undefined behavior.
|
||||
*/
|
||||
extern void bgp_io_wait_running(void);
|
||||
|
||||
/**
|
||||
* Start function for write thread.
|
||||
*
|
||||
|
|
|
@ -7438,13 +7438,11 @@ void bgp_pthreads_run()
|
|||
pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
|
||||
|
||||
/*
|
||||
* Please ensure that the io thread is running
|
||||
* by calling bgp_io_running. The BGP threads
|
||||
* depend on it being running when we start
|
||||
* looking for it.
|
||||
* I/O related code assumes the thread is ready for work at all times,
|
||||
* so we wait until it is.
|
||||
*/
|
||||
frr_pthread_run(PTHREAD_IO, &attr, NULL);
|
||||
bgp_io_running();
|
||||
bgp_io_wait_running();
|
||||
|
||||
frr_pthread_run(PTHREAD_KEEPALIVES, &attr, NULL);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue