mirror of
https://github.com/FRRouting/frr.git
synced 2025-04-30 21:47:15 +02:00
lib: add frr_with_mutex() block-wrapper
frr_with_mutex(...) { ... } locks and automatically unlocks the listed mutex(es) when the block is exited. This adds a bit of safety against forgetting the unlock in error paths & co. and makes the code a slight bit more readable. Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
This commit is contained in:
parent
48373d46f1
commit
00dffa8cde
|
@ -28,6 +28,8 @@ ForEachMacros:
|
||||||
- frr_each
|
- frr_each
|
||||||
- frr_each_safe
|
- frr_each_safe
|
||||||
- frr_each_from
|
- frr_each_from
|
||||||
|
- frr_with_mutex
|
||||||
|
- frr_elevate_privs
|
||||||
- LIST_FOREACH
|
- LIST_FOREACH
|
||||||
- LIST_FOREACH_SAFE
|
- LIST_FOREACH_SAFE
|
||||||
- SLIST_FOREACH
|
- SLIST_FOREACH
|
||||||
|
|
|
@ -179,9 +179,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||||
* on various buffers. Those need to be transferred or dropped,
|
* on various buffers. Those need to be transferred or dropped,
|
||||||
* otherwise we'll get spurious failures during session establishment.
|
* otherwise we'll get spurious failures during session establishment.
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
|
||||||
pthread_mutex_lock(&from_peer->io_mtx);
|
|
||||||
{
|
|
||||||
fd = peer->fd;
|
fd = peer->fd;
|
||||||
peer->fd = from_peer->fd;
|
peer->fd = from_peer->fd;
|
||||||
from_peer->fd = fd;
|
from_peer->fd = fd;
|
||||||
|
@ -222,8 +220,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
|
||||||
ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
|
ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
|
||||||
ringbuf_remain(from_peer->ibuf_work));
|
ringbuf_remain(from_peer->ibuf_work));
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&from_peer->io_mtx);
|
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
peer->as = from_peer->as;
|
peer->as = from_peer->as;
|
||||||
peer->v_holdtime = from_peer->v_holdtime;
|
peer->v_holdtime = from_peer->v_holdtime;
|
||||||
|
@ -1166,8 +1162,7 @@ int bgp_stop(struct peer *peer)
|
||||||
BGP_TIMER_OFF(peer->t_routeadv);
|
BGP_TIMER_OFF(peer->t_routeadv);
|
||||||
|
|
||||||
/* Clear input and output buffer. */
|
/* Clear input and output buffer. */
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
if (peer->ibuf)
|
if (peer->ibuf)
|
||||||
stream_fifo_clean(peer->ibuf);
|
stream_fifo_clean(peer->ibuf);
|
||||||
if (peer->obuf)
|
if (peer->obuf)
|
||||||
|
@ -1183,7 +1178,6 @@ int bgp_stop(struct peer *peer)
|
||||||
peer->curr = NULL;
|
peer->curr = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
/* Close of file descriptor. */
|
/* Close of file descriptor. */
|
||||||
if (peer->fd >= 0) {
|
if (peer->fd >= 0) {
|
||||||
|
|
|
@ -132,12 +132,10 @@ static int bgp_process_writes(struct thread *thread)
|
||||||
|
|
||||||
struct frr_pthread *fpt = bgp_pth_io;
|
struct frr_pthread *fpt = bgp_pth_io;
|
||||||
|
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
status = bgp_write(peer);
|
status = bgp_write(peer);
|
||||||
reschedule = (stream_fifo_head(peer->obuf) != NULL);
|
reschedule = (stream_fifo_head(peer->obuf) != NULL);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
/* no problem */
|
/* no problem */
|
||||||
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
|
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
|
||||||
|
@ -184,11 +182,9 @@ static int bgp_process_reads(struct thread *thread)
|
||||||
|
|
||||||
struct frr_pthread *fpt = bgp_pth_io;
|
struct frr_pthread *fpt = bgp_pth_io;
|
||||||
|
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
status = bgp_read(peer);
|
status = bgp_read(peer);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
/* error checking phase */
|
/* error checking phase */
|
||||||
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
|
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
|
||||||
|
@ -237,11 +233,9 @@ static int bgp_process_reads(struct thread *thread)
|
||||||
assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
|
assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
|
||||||
stream_put(pkt, pktbuf, pktsize);
|
stream_put(pkt, pktbuf, pktsize);
|
||||||
|
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
stream_fifo_push(peer->ibuf, pkt);
|
stream_fifo_push(peer->ibuf, pkt);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
added_pkt = true;
|
added_pkt = true;
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -245,8 +245,7 @@ void bgp_keepalives_on(struct peer *peer)
|
||||||
*/
|
*/
|
||||||
assert(peerhash_mtx);
|
assert(peerhash_mtx);
|
||||||
|
|
||||||
pthread_mutex_lock(peerhash_mtx);
|
frr_with_mutex(peerhash_mtx) {
|
||||||
{
|
|
||||||
holder.peer = peer;
|
holder.peer = peer;
|
||||||
if (!hash_lookup(peerhash, &holder)) {
|
if (!hash_lookup(peerhash, &holder)) {
|
||||||
struct pkat *pkat = pkat_new(peer);
|
struct pkat *pkat = pkat_new(peer);
|
||||||
|
@ -255,7 +254,6 @@ void bgp_keepalives_on(struct peer *peer)
|
||||||
}
|
}
|
||||||
SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
|
SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(peerhash_mtx);
|
|
||||||
bgp_keepalives_wake();
|
bgp_keepalives_wake();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,8 +273,7 @@ void bgp_keepalives_off(struct peer *peer)
|
||||||
*/
|
*/
|
||||||
assert(peerhash_mtx);
|
assert(peerhash_mtx);
|
||||||
|
|
||||||
pthread_mutex_lock(peerhash_mtx);
|
frr_with_mutex(peerhash_mtx) {
|
||||||
{
|
|
||||||
holder.peer = peer;
|
holder.peer = peer;
|
||||||
struct pkat *res = hash_release(peerhash, &holder);
|
struct pkat *res = hash_release(peerhash, &holder);
|
||||||
if (res) {
|
if (res) {
|
||||||
|
@ -285,16 +282,13 @@ void bgp_keepalives_off(struct peer *peer)
|
||||||
}
|
}
|
||||||
UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
|
UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(peerhash_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bgp_keepalives_wake(void)
|
void bgp_keepalives_wake(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(peerhash_mtx);
|
frr_with_mutex(peerhash_mtx) {
|
||||||
{
|
|
||||||
pthread_cond_signal(peerhash_cond);
|
pthread_cond_signal(peerhash_cond);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(peerhash_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
|
int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
|
||||||
|
|
|
@ -124,9 +124,9 @@ int bgp_packet_set_size(struct stream *s)
|
||||||
*/
|
*/
|
||||||
static void bgp_packet_add(struct peer *peer, struct stream *s)
|
static void bgp_packet_add(struct peer *peer, struct stream *s)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
stream_fifo_push(peer->obuf, s);
|
stream_fifo_push(peer->obuf, s);
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
|
static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
|
||||||
|
@ -665,7 +665,7 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
|
||||||
struct stream *s;
|
struct stream *s;
|
||||||
|
|
||||||
/* Lock I/O mutex to prevent other threads from pushing packets */
|
/* Lock I/O mutex to prevent other threads from pushing packets */
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_mutex_lock_autounlock(&peer->io_mtx);
|
||||||
/* ============================================== */
|
/* ============================================== */
|
||||||
|
|
||||||
/* Allocate new stream. */
|
/* Allocate new stream. */
|
||||||
|
@ -756,9 +756,6 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
|
||||||
stream_fifo_push(peer->obuf, s);
|
stream_fifo_push(peer->obuf, s);
|
||||||
|
|
||||||
bgp_write_notify(peer);
|
bgp_write_notify(peer);
|
||||||
|
|
||||||
/* ============================================== */
|
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2237,11 +2234,9 @@ int bgp_process_packet(struct thread *thread)
|
||||||
bgp_size_t size;
|
bgp_size_t size;
|
||||||
char notify_data_length[2];
|
char notify_data_length[2];
|
||||||
|
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
peer->curr = stream_fifo_pop(peer->ibuf);
|
peer->curr = stream_fifo_pop(peer->ibuf);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
|
|
||||||
if (peer->curr == NULL) // no packets to process, hmm...
|
if (peer->curr == NULL) // no packets to process, hmm...
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2360,15 +2355,13 @@ int bgp_process_packet(struct thread *thread)
|
||||||
|
|
||||||
if (fsm_update_result != FSM_PEER_TRANSFERRED
|
if (fsm_update_result != FSM_PEER_TRANSFERRED
|
||||||
&& fsm_update_result != FSM_PEER_STOPPED) {
|
&& fsm_update_result != FSM_PEER_STOPPED) {
|
||||||
pthread_mutex_lock(&peer->io_mtx);
|
frr_with_mutex(&peer->io_mtx) {
|
||||||
{
|
|
||||||
// more work to do, come back later
|
// more work to do, come back later
|
||||||
if (peer->ibuf->count > 0)
|
if (peer->ibuf->count > 0)
|
||||||
thread_add_timer_msec(
|
thread_add_timer_msec(
|
||||||
bm->master, bgp_process_packet, peer, 0,
|
bm->master, bgp_process_packet, peer, 0,
|
||||||
&peer->t_process_packet);
|
&peer->t_process_packet);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&peer->io_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -1282,8 +1282,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
|
||||||
* since this peer is not on the I/O thread, this lock is not strictly
|
* since this peer is not on the I/O thread, this lock is not strictly
|
||||||
* necessary, but serves as a reminder to those who may meddle...
|
* necessary, but serves as a reminder to those who may meddle...
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&rfd->peer->io_mtx);
|
frr_with_mutex(&rfd->peer->io_mtx) {
|
||||||
{
|
|
||||||
// we don't need any I/O related facilities
|
// we don't need any I/O related facilities
|
||||||
if (rfd->peer->ibuf)
|
if (rfd->peer->ibuf)
|
||||||
stream_fifo_free(rfd->peer->ibuf);
|
stream_fifo_free(rfd->peer->ibuf);
|
||||||
|
@ -1300,7 +1299,6 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
|
||||||
rfd->peer->obuf_work = NULL;
|
rfd->peer->obuf_work = NULL;
|
||||||
rfd->peer->ibuf_work = NULL;
|
rfd->peer->ibuf_work = NULL;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&rfd->peer->io_mtx);
|
|
||||||
|
|
||||||
{ /* base code assumes have valid host pointer */
|
{ /* base code assumes have valid host pointer */
|
||||||
char buf[BUFSIZ];
|
char buf[BUFSIZ];
|
||||||
|
|
|
@ -191,8 +191,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
|
||||||
* is not strictly necessary, but serves as a reminder
|
* is not strictly necessary, but serves as a reminder
|
||||||
* to those who may meddle...
|
* to those who may meddle...
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&vncHD1VR.peer->io_mtx);
|
frr_with_mutex(&vncHD1VR.peer->io_mtx) {
|
||||||
{
|
|
||||||
// we don't need any I/O related facilities
|
// we don't need any I/O related facilities
|
||||||
if (vncHD1VR.peer->ibuf)
|
if (vncHD1VR.peer->ibuf)
|
||||||
stream_fifo_free(vncHD1VR.peer->ibuf);
|
stream_fifo_free(vncHD1VR.peer->ibuf);
|
||||||
|
@ -209,7 +208,6 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
|
||||||
vncHD1VR.peer->obuf_work = NULL;
|
vncHD1VR.peer->obuf_work = NULL;
|
||||||
vncHD1VR.peer->ibuf_work = NULL;
|
vncHD1VR.peer->ibuf_work = NULL;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&vncHD1VR.peer->io_mtx);
|
|
||||||
|
|
||||||
/* base code assumes have valid host pointer */
|
/* base code assumes have valid host pointer */
|
||||||
vncHD1VR.peer->host =
|
vncHD1VR.peer->host =
|
||||||
|
|
21
lib/ferr.c
21
lib/ferr.c
|
@ -33,6 +33,7 @@
|
||||||
#include "command.h"
|
#include "command.h"
|
||||||
#include "json.h"
|
#include "json.h"
|
||||||
#include "linklist.h"
|
#include "linklist.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
|
|
||||||
DEFINE_MTYPE_STATIC(LIB, ERRINFO, "error information")
|
DEFINE_MTYPE_STATIC(LIB, ERRINFO, "error information")
|
||||||
|
|
||||||
|
@ -83,14 +84,12 @@ void log_ref_add(struct log_ref *ref)
|
||||||
{
|
{
|
||||||
uint32_t i = 0;
|
uint32_t i = 0;
|
||||||
|
|
||||||
pthread_mutex_lock(&refs_mtx);
|
frr_with_mutex(&refs_mtx) {
|
||||||
{
|
|
||||||
while (ref[i].code != END_FERR) {
|
while (ref[i].code != END_FERR) {
|
||||||
hash_get(refs, &ref[i], hash_alloc_intern);
|
hash_get(refs, &ref[i], hash_alloc_intern);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&refs_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct log_ref *log_ref_get(uint32_t code)
|
struct log_ref *log_ref_get(uint32_t code)
|
||||||
|
@ -99,11 +98,9 @@ struct log_ref *log_ref_get(uint32_t code)
|
||||||
struct log_ref *ref;
|
struct log_ref *ref;
|
||||||
|
|
||||||
holder.code = code;
|
holder.code = code;
|
||||||
pthread_mutex_lock(&refs_mtx);
|
frr_with_mutex(&refs_mtx) {
|
||||||
{
|
|
||||||
ref = hash_lookup(refs, &holder);
|
ref = hash_lookup(refs, &holder);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&refs_mtx);
|
|
||||||
|
|
||||||
return ref;
|
return ref;
|
||||||
}
|
}
|
||||||
|
@ -118,11 +115,9 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
|
||||||
if (json)
|
if (json)
|
||||||
top = json_object_new_object();
|
top = json_object_new_object();
|
||||||
|
|
||||||
pthread_mutex_lock(&refs_mtx);
|
frr_with_mutex(&refs_mtx) {
|
||||||
{
|
|
||||||
errlist = code ? list_new() : hash_to_list(refs);
|
errlist = code ? list_new() : hash_to_list(refs);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&refs_mtx);
|
|
||||||
|
|
||||||
if (code) {
|
if (code) {
|
||||||
ref = log_ref_get(code);
|
ref = log_ref_get(code);
|
||||||
|
@ -189,23 +184,19 @@ DEFUN_NOSH(show_error_code,
|
||||||
|
|
||||||
void log_ref_init(void)
|
void log_ref_init(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&refs_mtx);
|
frr_with_mutex(&refs_mtx) {
|
||||||
{
|
|
||||||
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
|
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
|
||||||
"Error Reference Texts");
|
"Error Reference Texts");
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&refs_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void log_ref_fini(void)
|
void log_ref_fini(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&refs_mtx);
|
frr_with_mutex(&refs_mtx) {
|
||||||
{
|
|
||||||
hash_clean(refs, NULL);
|
hash_clean(refs, NULL);
|
||||||
hash_free(refs);
|
hash_free(refs);
|
||||||
refs = NULL;
|
refs = NULL;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&refs_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void log_ref_vty_init(void)
|
void log_ref_vty_init(void)
|
||||||
|
|
|
@ -49,21 +49,17 @@ static struct list *frr_pthread_list;
|
||||||
|
|
||||||
void frr_pthread_init(void)
|
void frr_pthread_init(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&frr_pthread_list_mtx);
|
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||||
{
|
|
||||||
frr_pthread_list = list_new();
|
frr_pthread_list = list_new();
|
||||||
frr_pthread_list->del = (void (*)(void *))&frr_pthread_destroy;
|
frr_pthread_list->del = (void (*)(void *))&frr_pthread_destroy;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&frr_pthread_list_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void frr_pthread_finish(void)
|
void frr_pthread_finish(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&frr_pthread_list_mtx);
|
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||||
{
|
|
||||||
list_delete(&frr_pthread_list);
|
list_delete(&frr_pthread_list);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&frr_pthread_list_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
|
struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
|
||||||
|
@ -94,11 +90,9 @@ struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
|
||||||
pthread_mutex_init(fpt->running_cond_mtx, NULL);
|
pthread_mutex_init(fpt->running_cond_mtx, NULL);
|
||||||
pthread_cond_init(fpt->running_cond, NULL);
|
pthread_cond_init(fpt->running_cond, NULL);
|
||||||
|
|
||||||
pthread_mutex_lock(&frr_pthread_list_mtx);
|
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||||
{
|
|
||||||
listnode_add(frr_pthread_list, fpt);
|
listnode_add(frr_pthread_list, fpt);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&frr_pthread_list_mtx);
|
|
||||||
|
|
||||||
return fpt;
|
return fpt;
|
||||||
}
|
}
|
||||||
|
@ -162,23 +156,19 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
|
||||||
|
|
||||||
void frr_pthread_wait_running(struct frr_pthread *fpt)
|
void frr_pthread_wait_running(struct frr_pthread *fpt)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(fpt->running_cond_mtx);
|
frr_with_mutex(fpt->running_cond_mtx) {
|
||||||
{
|
|
||||||
while (!fpt->running)
|
while (!fpt->running)
|
||||||
pthread_cond_wait(fpt->running_cond,
|
pthread_cond_wait(fpt->running_cond,
|
||||||
fpt->running_cond_mtx);
|
fpt->running_cond_mtx);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(fpt->running_cond_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void frr_pthread_notify_running(struct frr_pthread *fpt)
|
void frr_pthread_notify_running(struct frr_pthread *fpt)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(fpt->running_cond_mtx);
|
frr_with_mutex(fpt->running_cond_mtx) {
|
||||||
{
|
|
||||||
fpt->running = true;
|
fpt->running = true;
|
||||||
pthread_cond_signal(fpt->running_cond);
|
pthread_cond_signal(fpt->running_cond);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(fpt->running_cond_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int frr_pthread_stop(struct frr_pthread *fpt, void **result)
|
int frr_pthread_stop(struct frr_pthread *fpt, void **result)
|
||||||
|
@ -190,14 +180,12 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
|
||||||
|
|
||||||
void frr_pthread_stop_all(void)
|
void frr_pthread_stop_all(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&frr_pthread_list_mtx);
|
frr_with_mutex(&frr_pthread_list_mtx) {
|
||||||
{
|
|
||||||
struct listnode *n;
|
struct listnode *n;
|
||||||
struct frr_pthread *fpt;
|
struct frr_pthread *fpt;
|
||||||
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt))
|
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt))
|
||||||
frr_pthread_stop(fpt, NULL);
|
frr_pthread_stop(fpt, NULL);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&frr_pthread_list_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -215,6 +215,54 @@ void frr_pthread_stop_all(void);
|
||||||
#define pthread_condattr_setclock(A, B)
|
#define pthread_condattr_setclock(A, B)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* mutex auto-lock/unlock */
|
||||||
|
|
||||||
|
/* variant 1:
|
||||||
|
* (for short blocks, multiple mutexes supported)
|
||||||
|
* break & return can be used for aborting the block
|
||||||
|
*
|
||||||
|
* frr_with_mutex(&mtx, &mtx2) {
|
||||||
|
* if (error)
|
||||||
|
* break;
|
||||||
|
* ...
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
#define _frr_with_mutex(mutex) \
|
||||||
|
*NAMECTR(_mtx_) __attribute__(( \
|
||||||
|
unused, cleanup(_frr_mtx_unlock))) = _frr_mtx_lock(mutex), \
|
||||||
|
/* end */
|
||||||
|
|
||||||
|
#define frr_with_mutex(...) \
|
||||||
|
for (pthread_mutex_t MACRO_REPEAT(_frr_with_mutex, ##__VA_ARGS__) \
|
||||||
|
*_once = NULL; _once == NULL; _once = (void *)1) \
|
||||||
|
/* end */
|
||||||
|
|
||||||
|
/* variant 2:
|
||||||
|
* (more suitable for long blocks, no extra indentation)
|
||||||
|
*
|
||||||
|
* frr_mutex_lock_autounlock(&mtx);
|
||||||
|
* ...
|
||||||
|
*/
|
||||||
|
#define frr_mutex_lock_autounlock(mutex) \
|
||||||
|
pthread_mutex_t *NAMECTR(_mtx_) \
|
||||||
|
__attribute__((unused, cleanup(_frr_mtx_unlock))) = \
|
||||||
|
_frr_mtx_lock(mutex) \
|
||||||
|
/* end */
|
||||||
|
|
||||||
|
static inline pthread_mutex_t *_frr_mtx_lock(pthread_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
pthread_mutex_lock(mutex);
|
||||||
|
return mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void _frr_mtx_unlock(pthread_mutex_t **mutex)
|
||||||
|
{
|
||||||
|
if (!*mutex)
|
||||||
|
return;
|
||||||
|
pthread_mutex_unlock(*mutex);
|
||||||
|
*mutex = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "vty.h"
|
#include "vty.h"
|
||||||
#include "command.h"
|
#include "command.h"
|
||||||
#include "libfrr.h"
|
#include "libfrr.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
|
|
||||||
DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
|
DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
|
||||||
DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
|
DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
|
||||||
|
@ -54,14 +55,12 @@ struct hash *hash_create_size(unsigned int size,
|
||||||
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
|
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
|
||||||
hash->stats.empty = hash->size;
|
hash->stats.empty = hash->size;
|
||||||
|
|
||||||
pthread_mutex_lock(&_hashes_mtx);
|
frr_with_mutex(&_hashes_mtx) {
|
||||||
{
|
|
||||||
if (!_hashes)
|
if (!_hashes)
|
||||||
_hashes = list_new();
|
_hashes = list_new();
|
||||||
|
|
||||||
listnode_add(_hashes, hash);
|
listnode_add(_hashes, hash);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&_hashes_mtx);
|
|
||||||
|
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
|
@ -311,8 +310,7 @@ struct list *hash_to_list(struct hash *hash)
|
||||||
|
|
||||||
void hash_free(struct hash *hash)
|
void hash_free(struct hash *hash)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&_hashes_mtx);
|
frr_with_mutex(&_hashes_mtx) {
|
||||||
{
|
|
||||||
if (_hashes) {
|
if (_hashes) {
|
||||||
listnode_delete(_hashes, hash);
|
listnode_delete(_hashes, hash);
|
||||||
if (_hashes->count == 0) {
|
if (_hashes->count == 0) {
|
||||||
|
@ -320,7 +318,6 @@ void hash_free(struct hash *hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&_hashes_mtx);
|
|
||||||
|
|
||||||
XFREE(MTYPE_HASH, hash->name);
|
XFREE(MTYPE_HASH, hash->name);
|
||||||
|
|
||||||
|
|
107
lib/log.c
107
lib/log.c
|
@ -31,6 +31,7 @@
|
||||||
#include "lib_errors.h"
|
#include "lib_errors.h"
|
||||||
#include "lib/hook.h"
|
#include "lib/hook.h"
|
||||||
#include "printfrr.h"
|
#include "printfrr.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
|
|
||||||
#ifndef SUNOS_5
|
#ifndef SUNOS_5
|
||||||
#include <sys/un.h>
|
#include <sys/un.h>
|
||||||
|
@ -83,89 +84,70 @@ static int zlog_filter_lookup(const char *lookup)
|
||||||
|
|
||||||
void zlog_filter_clear(void)
|
void zlog_filter_clear(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
zlog_filter_count = 0;
|
zlog_filter_count = 0;
|
||||||
pthread_mutex_unlock(&loglock);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int zlog_filter_add(const char *filter)
|
int zlog_filter_add(const char *filter)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
|
if (zlog_filter_count >= ZLOG_FILTERS_MAX)
|
||||||
|
return 1;
|
||||||
|
|
||||||
int ret = 0;
|
if (zlog_filter_lookup(filter) != -1)
|
||||||
|
|
||||||
if (zlog_filter_count >= ZLOG_FILTERS_MAX) {
|
|
||||||
ret = 1;
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (zlog_filter_lookup(filter) != -1) {
|
|
||||||
/* Filter already present */
|
/* Filter already present */
|
||||||
ret = -1;
|
return -1;
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
strlcpy(zlog_filters[zlog_filter_count], filter,
|
strlcpy(zlog_filters[zlog_filter_count], filter,
|
||||||
sizeof(zlog_filters[0]));
|
sizeof(zlog_filters[0]));
|
||||||
|
|
||||||
if (zlog_filters[zlog_filter_count][0] == '\0') {
|
if (zlog_filters[zlog_filter_count][0] == '\0')
|
||||||
/* Filter was either empty or didn't get copied correctly */
|
/* Filter was either empty or didn't get copied
|
||||||
ret = -1;
|
* correctly
|
||||||
goto done;
|
*/
|
||||||
}
|
return -1;
|
||||||
|
|
||||||
zlog_filter_count++;
|
zlog_filter_count++;
|
||||||
|
}
|
||||||
done:
|
return 0;
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int zlog_filter_del(const char *filter)
|
int zlog_filter_del(const char *filter)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
|
|
||||||
int found_idx = zlog_filter_lookup(filter);
|
int found_idx = zlog_filter_lookup(filter);
|
||||||
int last_idx = zlog_filter_count - 1;
|
int last_idx = zlog_filter_count - 1;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (found_idx == -1) {
|
if (found_idx == -1)
|
||||||
/* Didn't find the filter to delete */
|
/* Didn't find the filter to delete */
|
||||||
ret = -1;
|
return -1;
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Adjust the filter array */
|
/* Adjust the filter array */
|
||||||
memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
|
memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
|
||||||
(last_idx - found_idx) * sizeof(zlog_filters[0]));
|
(last_idx - found_idx) * sizeof(zlog_filters[0]));
|
||||||
|
|
||||||
zlog_filter_count--;
|
zlog_filter_count--;
|
||||||
|
}
|
||||||
done:
|
return 0;
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump all filters to buffer, delimited by new line */
|
/* Dump all filters to buffer, delimited by new line */
|
||||||
int zlog_filter_dump(char *buf, size_t max_size)
|
int zlog_filter_dump(char *buf, size_t max_size)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
|
||||||
|
|
||||||
int ret = 0;
|
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
|
frr_with_mutex(&loglock) {
|
||||||
for (int i = 0; i < zlog_filter_count; i++) {
|
for (int i = 0; i < zlog_filter_count; i++) {
|
||||||
|
int ret;
|
||||||
ret = snprintf(buf + len, max_size - len, " %s\n",
|
ret = snprintf(buf + len, max_size - len, " %s\n",
|
||||||
zlog_filters[i]);
|
zlog_filters[i]);
|
||||||
len += ret;
|
len += ret;
|
||||||
if ((ret < 0) || ((size_t)len >= max_size)) {
|
if ((ret < 0) || ((size_t)len >= max_size))
|
||||||
len = -1;
|
return -1;
|
||||||
goto done;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,7 +345,7 @@ search:
|
||||||
/* va_list version of zlog. */
|
/* va_list version of zlog. */
|
||||||
void vzlog(int priority, const char *format, va_list args)
|
void vzlog(int priority, const char *format, va_list args)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_mutex_lock_autounlock(&loglock);
|
||||||
|
|
||||||
char proto_str[32] = "";
|
char proto_str[32] = "";
|
||||||
int original_errno = errno;
|
int original_errno = errno;
|
||||||
|
@ -430,36 +412,31 @@ out:
|
||||||
if (msg != buf)
|
if (msg != buf)
|
||||||
XFREE(MTYPE_TMP, msg);
|
XFREE(MTYPE_TMP, msg);
|
||||||
errno = original_errno;
|
errno = original_errno;
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int vzlog_test(int priority)
|
int vzlog_test(int priority)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_mutex_lock_autounlock(&loglock);
|
||||||
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
struct zlog *zl = zlog_default;
|
struct zlog *zl = zlog_default;
|
||||||
|
|
||||||
/* When zlog_default is also NULL, use stderr for logging. */
|
/* When zlog_default is also NULL, use stderr for logging. */
|
||||||
if (zl == NULL)
|
if (zl == NULL)
|
||||||
ret = 1;
|
return 1;
|
||||||
/* Syslog output */
|
/* Syslog output */
|
||||||
else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
|
else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
|
||||||
ret = 1;
|
return 1;
|
||||||
/* File output. */
|
/* File output. */
|
||||||
else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
|
else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
|
||||||
ret = 1;
|
return 1;
|
||||||
/* stdout output. */
|
/* stdout output. */
|
||||||
else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT])
|
else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT])
|
||||||
ret = 1;
|
return 1;
|
||||||
/* Terminal monitor. */
|
/* Terminal monitor. */
|
||||||
else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR])
|
else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR])
|
||||||
ret = 1;
|
return 1;
|
||||||
|
|
||||||
pthread_mutex_unlock(&loglock);
|
return 0;
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -870,9 +847,9 @@ void openzlog(const char *progname, const char *protoname,
|
||||||
|
|
||||||
openlog(progname, syslog_flags, zl->facility);
|
openlog(progname, syslog_flags, zl->facility);
|
||||||
|
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
zlog_default = zl;
|
zlog_default = zl;
|
||||||
pthread_mutex_unlock(&loglock);
|
}
|
||||||
|
|
||||||
#ifdef HAVE_GLIBC_BACKTRACE
|
#ifdef HAVE_GLIBC_BACKTRACE
|
||||||
/* work around backtrace() using lazily resolved dynamically linked
|
/* work around backtrace() using lazily resolved dynamically linked
|
||||||
|
@ -889,7 +866,8 @@ void openzlog(const char *progname, const char *protoname,
|
||||||
|
|
||||||
void closezlog(void)
|
void closezlog(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_mutex_lock_autounlock(&loglock);
|
||||||
|
|
||||||
struct zlog *zl = zlog_default;
|
struct zlog *zl = zlog_default;
|
||||||
|
|
||||||
closelog();
|
closelog();
|
||||||
|
@ -901,15 +879,14 @@ void closezlog(void)
|
||||||
|
|
||||||
XFREE(MTYPE_ZLOG, zl);
|
XFREE(MTYPE_ZLOG, zl);
|
||||||
zlog_default = NULL;
|
zlog_default = NULL;
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called from command.c. */
|
/* Called from command.c. */
|
||||||
void zlog_set_level(zlog_dest_t dest, int log_level)
|
void zlog_set_level(zlog_dest_t dest, int log_level)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
zlog_default->maxlvl[dest] = log_level;
|
zlog_default->maxlvl[dest] = log_level;
|
||||||
pthread_mutex_unlock(&loglock);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int zlog_set_file(const char *filename, int log_level)
|
int zlog_set_file(const char *filename, int log_level)
|
||||||
|
@ -929,7 +906,7 @@ int zlog_set_file(const char *filename, int log_level)
|
||||||
if (fp == NULL) {
|
if (fp == NULL) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else {
|
} else {
|
||||||
pthread_mutex_lock(&loglock);
|
frr_with_mutex(&loglock) {
|
||||||
zl = zlog_default;
|
zl = zlog_default;
|
||||||
|
|
||||||
/* Set flags. */
|
/* Set flags. */
|
||||||
|
@ -937,7 +914,7 @@ int zlog_set_file(const char *filename, int log_level)
|
||||||
zl->maxlvl[ZLOG_DEST_FILE] = log_level;
|
zl->maxlvl[ZLOG_DEST_FILE] = log_level;
|
||||||
zl->fp = fp;
|
zl->fp = fp;
|
||||||
logfile_fd = fileno(fp);
|
logfile_fd = fileno(fp);
|
||||||
pthread_mutex_unlock(&loglock);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -946,7 +923,7 @@ int zlog_set_file(const char *filename, int log_level)
|
||||||
/* Reset opend file. */
|
/* Reset opend file. */
|
||||||
int zlog_reset_file(void)
|
int zlog_reset_file(void)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&loglock);
|
frr_mutex_lock_autounlock(&loglock);
|
||||||
|
|
||||||
struct zlog *zl = zlog_default;
|
struct zlog *zl = zlog_default;
|
||||||
|
|
||||||
|
@ -959,8 +936,6 @@ int zlog_reset_file(void)
|
||||||
XFREE(MTYPE_ZLOG, zl->filename);
|
XFREE(MTYPE_ZLOG, zl->filename);
|
||||||
zl->filename = NULL;
|
zl->filename = NULL;
|
||||||
|
|
||||||
pthread_mutex_unlock(&loglock);
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "command.h"
|
#include "command.h"
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
#include "db.h"
|
#include "db.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
#include "northbound.h"
|
#include "northbound.h"
|
||||||
#include "northbound_cli.h"
|
#include "northbound_cli.h"
|
||||||
#include "northbound_db.h"
|
#include "northbound_db.h"
|
||||||
|
@ -723,8 +724,7 @@ int nb_running_lock(enum nb_client client, const void *user)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
pthread_mutex_lock(&running_config_mgmt_lock.mtx);
|
frr_with_mutex(&running_config_mgmt_lock.mtx) {
|
||||||
{
|
|
||||||
if (!running_config_mgmt_lock.locked) {
|
if (!running_config_mgmt_lock.locked) {
|
||||||
running_config_mgmt_lock.locked = true;
|
running_config_mgmt_lock.locked = true;
|
||||||
running_config_mgmt_lock.owner_client = client;
|
running_config_mgmt_lock.owner_client = client;
|
||||||
|
@ -732,7 +732,6 @@ int nb_running_lock(enum nb_client client, const void *user)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -741,8 +740,7 @@ int nb_running_unlock(enum nb_client client, const void *user)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
pthread_mutex_lock(&running_config_mgmt_lock.mtx);
|
frr_with_mutex(&running_config_mgmt_lock.mtx) {
|
||||||
{
|
|
||||||
if (running_config_mgmt_lock.locked
|
if (running_config_mgmt_lock.locked
|
||||||
&& running_config_mgmt_lock.owner_client == client
|
&& running_config_mgmt_lock.owner_client == client
|
||||||
&& running_config_mgmt_lock.owner_user == user) {
|
&& running_config_mgmt_lock.owner_user == user) {
|
||||||
|
@ -752,7 +750,6 @@ int nb_running_unlock(enum nb_client client, const void *user)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -761,14 +758,12 @@ int nb_running_lock_check(enum nb_client client, const void *user)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
pthread_mutex_lock(&running_config_mgmt_lock.mtx);
|
frr_with_mutex(&running_config_mgmt_lock.mtx) {
|
||||||
{
|
|
||||||
if (!running_config_mgmt_lock.locked
|
if (!running_config_mgmt_lock.locked
|
||||||
|| (running_config_mgmt_lock.owner_client == client
|
|| (running_config_mgmt_lock.owner_client == client
|
||||||
&& running_config_mgmt_lock.owner_user == user))
|
&& running_config_mgmt_lock.owner_user == user))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "privs.h"
|
#include "privs.h"
|
||||||
#include "memory.h"
|
#include "memory.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
#include "lib_errors.h"
|
#include "lib_errors.h"
|
||||||
#include "lib/queue.h"
|
#include "lib/queue.h"
|
||||||
|
|
||||||
|
@ -760,8 +761,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
|
||||||
* Serialize 'raise' operations; particularly important for
|
* Serialize 'raise' operations; particularly important for
|
||||||
* OSes where privs are process-wide.
|
* OSes where privs are process-wide.
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&(privs->mutex));
|
frr_with_mutex(&(privs->mutex)) {
|
||||||
{
|
|
||||||
/* Locate ref-counting object to use */
|
/* Locate ref-counting object to use */
|
||||||
refs = get_privs_refs(privs);
|
refs = get_privs_refs(privs);
|
||||||
|
|
||||||
|
@ -775,7 +775,6 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
|
||||||
refs->raised_in_funcname = funcname;
|
refs->raised_in_funcname = funcname;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&(privs->mutex));
|
|
||||||
|
|
||||||
return privs;
|
return privs;
|
||||||
}
|
}
|
||||||
|
@ -791,8 +790,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
|
||||||
/* Serialize 'lower privs' operation - particularly important
|
/* Serialize 'lower privs' operation - particularly important
|
||||||
* when OS privs are process-wide.
|
* when OS privs are process-wide.
|
||||||
*/
|
*/
|
||||||
pthread_mutex_lock(&(*privs)->mutex);
|
frr_with_mutex(&(*privs)->mutex) {
|
||||||
{
|
|
||||||
refs = get_privs_refs(*privs);
|
refs = get_privs_refs(*privs);
|
||||||
|
|
||||||
if (--(refs->refcount) == 0) {
|
if (--(refs->refcount) == 0) {
|
||||||
|
@ -806,7 +804,6 @@ void _zprivs_lower(struct zebra_privs_t **privs)
|
||||||
refs->raised_in_funcname = NULL;
|
refs->raised_in_funcname = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&(*privs)->mutex);
|
|
||||||
|
|
||||||
*privs = NULL;
|
*privs = NULL;
|
||||||
}
|
}
|
||||||
|
|
17
lib/stream.c
17
lib/stream.c
|
@ -28,6 +28,7 @@
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
#include "prefix.h"
|
#include "prefix.h"
|
||||||
#include "log.h"
|
#include "log.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
#include "lib_errors.h"
|
#include "lib_errors.h"
|
||||||
|
|
||||||
DEFINE_MTYPE_STATIC(LIB, STREAM, "Stream")
|
DEFINE_MTYPE_STATIC(LIB, STREAM, "Stream")
|
||||||
|
@ -1136,11 +1137,9 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
|
||||||
|
|
||||||
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
|
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&fifo->mtx);
|
frr_with_mutex(&fifo->mtx) {
|
||||||
{
|
|
||||||
stream_fifo_push(fifo, s);
|
stream_fifo_push(fifo, s);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&fifo->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Delete first stream from fifo. */
|
/* Delete first stream from fifo. */
|
||||||
|
@ -1170,11 +1169,9 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
|
||||||
{
|
{
|
||||||
struct stream *ret;
|
struct stream *ret;
|
||||||
|
|
||||||
pthread_mutex_lock(&fifo->mtx);
|
frr_with_mutex(&fifo->mtx) {
|
||||||
{
|
|
||||||
ret = stream_fifo_pop(fifo);
|
ret = stream_fifo_pop(fifo);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&fifo->mtx);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1188,11 +1185,9 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
|
||||||
{
|
{
|
||||||
struct stream *ret;
|
struct stream *ret;
|
||||||
|
|
||||||
pthread_mutex_lock(&fifo->mtx);
|
frr_with_mutex(&fifo->mtx) {
|
||||||
{
|
|
||||||
ret = stream_fifo_head(fifo);
|
ret = stream_fifo_head(fifo);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&fifo->mtx);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1212,11 +1207,9 @@ void stream_fifo_clean(struct stream_fifo *fifo)
|
||||||
|
|
||||||
void stream_fifo_clean_safe(struct stream_fifo *fifo)
|
void stream_fifo_clean_safe(struct stream_fifo *fifo)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&fifo->mtx);
|
frr_with_mutex(&fifo->mtx) {
|
||||||
{
|
|
||||||
stream_fifo_clean(fifo);
|
stream_fifo_clean(fifo);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&fifo->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t stream_fifo_count_safe(struct stream_fifo *fifo)
|
size_t stream_fifo_count_safe(struct stream_fifo *fifo)
|
||||||
|
|
120
lib/thread.c
120
lib/thread.c
|
@ -33,6 +33,7 @@
|
||||||
#include "network.h"
|
#include "network.h"
|
||||||
#include "jhash.h"
|
#include "jhash.h"
|
||||||
#include "frratomic.h"
|
#include "frratomic.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
#include "lib_errors.h"
|
#include "lib_errors.h"
|
||||||
|
|
||||||
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
|
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
|
||||||
|
@ -173,8 +174,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
|
||||||
tmp.funcname = "TOTAL";
|
tmp.funcname = "TOTAL";
|
||||||
tmp.types = filter;
|
tmp.types = filter;
|
||||||
|
|
||||||
pthread_mutex_lock(&masters_mtx);
|
frr_with_mutex(&masters_mtx) {
|
||||||
{
|
|
||||||
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
||||||
const char *name = m->name ? m->name : "main";
|
const char *name = m->name ? m->name : "main";
|
||||||
|
|
||||||
|
@ -206,7 +206,6 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
|
||||||
vty_out(vty, "\n");
|
vty_out(vty, "\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&masters_mtx);
|
|
||||||
|
|
||||||
vty_out(vty, "\n");
|
vty_out(vty, "\n");
|
||||||
vty_out(vty, "Total thread statistics\n");
|
vty_out(vty, "Total thread statistics\n");
|
||||||
|
@ -240,11 +239,9 @@ static void cpu_record_clear(uint8_t filter)
|
||||||
struct thread_master *m;
|
struct thread_master *m;
|
||||||
struct listnode *ln;
|
struct listnode *ln;
|
||||||
|
|
||||||
pthread_mutex_lock(&masters_mtx);
|
frr_with_mutex(&masters_mtx) {
|
||||||
{
|
|
||||||
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
|
||||||
void *args[2] = {tmp, m->cpu_record};
|
void *args[2] = {tmp, m->cpu_record};
|
||||||
hash_iterate(
|
hash_iterate(
|
||||||
m->cpu_record,
|
m->cpu_record,
|
||||||
|
@ -252,10 +249,8 @@ static void cpu_record_clear(uint8_t filter)
|
||||||
void *))cpu_record_hash_clear,
|
void *))cpu_record_hash_clear,
|
||||||
args);
|
args);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&masters_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t parse_filter(const char *filterstr)
|
static uint8_t parse_filter(const char *filterstr)
|
||||||
|
@ -370,13 +365,11 @@ DEFUN (show_thread_poll,
|
||||||
struct listnode *node;
|
struct listnode *node;
|
||||||
struct thread_master *m;
|
struct thread_master *m;
|
||||||
|
|
||||||
pthread_mutex_lock(&masters_mtx);
|
frr_with_mutex(&masters_mtx) {
|
||||||
{
|
|
||||||
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
|
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
|
||||||
show_thread_poll_helper(vty, m);
|
show_thread_poll_helper(vty, m);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&masters_mtx);
|
|
||||||
|
|
||||||
return CMD_SUCCESS;
|
return CMD_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -487,26 +480,22 @@ struct thread_master *thread_master_create(const char *name)
|
||||||
sizeof(struct pollfd) * rv->handler.pfdsize);
|
sizeof(struct pollfd) * rv->handler.pfdsize);
|
||||||
|
|
||||||
/* add to list of threadmasters */
|
/* add to list of threadmasters */
|
||||||
pthread_mutex_lock(&masters_mtx);
|
frr_with_mutex(&masters_mtx) {
|
||||||
{
|
|
||||||
if (!masters)
|
if (!masters)
|
||||||
masters = list_new();
|
masters = list_new();
|
||||||
|
|
||||||
listnode_add(masters, rv);
|
listnode_add(masters, rv);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&masters_mtx);
|
|
||||||
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_master_set_name(struct thread_master *master, const char *name)
|
void thread_master_set_name(struct thread_master *master, const char *name)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&master->mtx);
|
frr_with_mutex(&master->mtx) {
|
||||||
{
|
|
||||||
XFREE(MTYPE_THREAD_MASTER, master->name);
|
XFREE(MTYPE_THREAD_MASTER, master->name);
|
||||||
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
|
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&master->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define THREAD_UNUSED_DEPTH 10
|
#define THREAD_UNUSED_DEPTH 10
|
||||||
|
@ -569,13 +558,11 @@ static void thread_array_free(struct thread_master *m,
|
||||||
*/
|
*/
|
||||||
void thread_master_free_unused(struct thread_master *m)
|
void thread_master_free_unused(struct thread_master *m)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
|
||||||
struct thread *t;
|
struct thread *t;
|
||||||
while ((t = thread_list_pop(&m->unuse)))
|
while ((t = thread_list_pop(&m->unuse)))
|
||||||
thread_free(m, t);
|
thread_free(m, t);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stop thread scheduler. */
|
/* Stop thread scheduler. */
|
||||||
|
@ -583,14 +570,12 @@ void thread_master_free(struct thread_master *m)
|
||||||
{
|
{
|
||||||
struct thread *t;
|
struct thread *t;
|
||||||
|
|
||||||
pthread_mutex_lock(&masters_mtx);
|
frr_with_mutex(&masters_mtx) {
|
||||||
{
|
|
||||||
listnode_delete(masters, m);
|
listnode_delete(masters, m);
|
||||||
if (masters->count == 0) {
|
if (masters->count == 0) {
|
||||||
list_delete(&masters);
|
list_delete(&masters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&masters_mtx);
|
|
||||||
|
|
||||||
thread_array_free(m, m->read);
|
thread_array_free(m, m->read);
|
||||||
thread_array_free(m, m->write);
|
thread_array_free(m, m->write);
|
||||||
|
@ -621,11 +606,9 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
|
||||||
{
|
{
|
||||||
int64_t remain;
|
int64_t remain;
|
||||||
|
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
|
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
|
|
||||||
return remain < 0 ? 0 : remain;
|
return remain < 0 ? 0 : remain;
|
||||||
}
|
}
|
||||||
|
@ -642,11 +625,9 @@ unsigned long thread_timer_remain_second(struct thread *thread)
|
||||||
struct timeval thread_timer_remain(struct thread *thread)
|
struct timeval thread_timer_remain(struct thread *thread)
|
||||||
{
|
{
|
||||||
struct timeval remain;
|
struct timeval remain;
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
monotime_until(&thread->u.sands, &remain);
|
monotime_until(&thread->u.sands, &remain);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
return remain;
|
return remain;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -770,14 +751,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
|
||||||
struct thread **thread_array;
|
struct thread **thread_array;
|
||||||
|
|
||||||
assert(fd >= 0 && fd < m->fd_limit);
|
assert(fd >= 0 && fd < m->fd_limit);
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
if (t_ptr && *t_ptr)
|
||||||
if (t_ptr
|
// thread is already scheduled; don't reschedule
|
||||||
&& *t_ptr) // thread is already scheduled; don't reschedule
|
break;
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* default to a new pollfd */
|
/* default to a new pollfd */
|
||||||
nfds_t queuepos = m->handler.pfdcount;
|
nfds_t queuepos = m->handler.pfdcount;
|
||||||
|
@ -817,12 +794,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
|
||||||
m->handler.pfdcount++;
|
m->handler.pfdcount++;
|
||||||
|
|
||||||
if (thread) {
|
if (thread) {
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
thread->u.fd = fd;
|
thread->u.fd = fd;
|
||||||
thread_array[thread->u.fd] = thread;
|
thread_array[thread->u.fd] = thread;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
|
|
||||||
if (t_ptr) {
|
if (t_ptr) {
|
||||||
*t_ptr = thread;
|
*t_ptr = thread;
|
||||||
|
@ -832,7 +807,6 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
|
||||||
|
|
||||||
AWAKEN(m);
|
AWAKEN(m);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
|
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
@ -850,19 +824,14 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
|
||||||
assert(type == THREAD_TIMER);
|
assert(type == THREAD_TIMER);
|
||||||
assert(time_relative);
|
assert(time_relative);
|
||||||
|
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
if (t_ptr && *t_ptr)
|
||||||
if (t_ptr
|
// thread is already scheduled; don't reschedule
|
||||||
&& *t_ptr) // thread is already scheduled; don't reschedule
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
thread = thread_get(m, type, func, arg, debugargpass);
|
thread = thread_get(m, type, func, arg, debugargpass);
|
||||||
|
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
monotime(&thread->u.sands);
|
monotime(&thread->u.sands);
|
||||||
timeradd(&thread->u.sands, time_relative,
|
timeradd(&thread->u.sands, time_relative,
|
||||||
&thread->u.sands);
|
&thread->u.sands);
|
||||||
|
@ -872,11 +841,9 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
|
||||||
thread->ref = t_ptr;
|
thread->ref = t_ptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
|
|
||||||
AWAKEN(m);
|
AWAKEN(m);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
|
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
@ -933,26 +900,20 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
|
||||||
void *arg, int val,
|
void *arg, int val,
|
||||||
struct thread **t_ptr, debugargdef)
|
struct thread **t_ptr, debugargdef)
|
||||||
{
|
{
|
||||||
struct thread *thread;
|
struct thread *thread = NULL;
|
||||||
|
|
||||||
assert(m != NULL);
|
assert(m != NULL);
|
||||||
|
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
if (t_ptr && *t_ptr)
|
||||||
if (t_ptr
|
// thread is already scheduled; don't reschedule
|
||||||
&& *t_ptr) // thread is already scheduled; don't reschedule
|
break;
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
|
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
thread->u.val = val;
|
thread->u.val = val;
|
||||||
thread_list_add_tail(&m->event, thread);
|
thread_list_add_tail(&m->event, thread);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
|
|
||||||
if (t_ptr) {
|
if (t_ptr) {
|
||||||
*t_ptr = thread;
|
*t_ptr = thread;
|
||||||
|
@ -961,7 +922,6 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
|
||||||
|
|
||||||
AWAKEN(m);
|
AWAKEN(m);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
|
|
||||||
return thread;
|
return thread;
|
||||||
}
|
}
|
||||||
|
@ -1143,15 +1103,13 @@ void thread_cancel_event(struct thread_master *master, void *arg)
|
||||||
{
|
{
|
||||||
assert(master->owner == pthread_self());
|
assert(master->owner == pthread_self());
|
||||||
|
|
||||||
pthread_mutex_lock(&master->mtx);
|
frr_with_mutex(&master->mtx) {
|
||||||
{
|
|
||||||
struct cancel_req *cr =
|
struct cancel_req *cr =
|
||||||
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
||||||
cr->eventobj = arg;
|
cr->eventobj = arg;
|
||||||
listnode_add(master->cancel_req, cr);
|
listnode_add(master->cancel_req, cr);
|
||||||
do_thread_cancel(master);
|
do_thread_cancel(master);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&master->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1167,15 +1125,13 @@ void thread_cancel(struct thread *thread)
|
||||||
|
|
||||||
assert(master->owner == pthread_self());
|
assert(master->owner == pthread_self());
|
||||||
|
|
||||||
pthread_mutex_lock(&master->mtx);
|
frr_with_mutex(&master->mtx) {
|
||||||
{
|
|
||||||
struct cancel_req *cr =
|
struct cancel_req *cr =
|
||||||
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
||||||
cr->thread = thread;
|
cr->thread = thread;
|
||||||
listnode_add(master->cancel_req, cr);
|
listnode_add(master->cancel_req, cr);
|
||||||
do_thread_cancel(master);
|
do_thread_cancel(master);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&master->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1208,8 +1164,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
|
||||||
assert(!(thread && eventobj) && (thread || eventobj));
|
assert(!(thread && eventobj) && (thread || eventobj));
|
||||||
assert(master->owner != pthread_self());
|
assert(master->owner != pthread_self());
|
||||||
|
|
||||||
pthread_mutex_lock(&master->mtx);
|
frr_with_mutex(&master->mtx) {
|
||||||
{
|
|
||||||
master->canceled = false;
|
master->canceled = false;
|
||||||
|
|
||||||
if (thread) {
|
if (thread) {
|
||||||
|
@ -1228,7 +1183,6 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
|
||||||
while (!master->canceled)
|
while (!master->canceled)
|
||||||
pthread_cond_wait(&master->cancel_cond, &master->mtx);
|
pthread_cond_wait(&master->cancel_cond, &master->mtx);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&master->mtx);
|
|
||||||
}
|
}
|
||||||
/* ------------------------------------------------------------------------- */
|
/* ------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
@ -1527,22 +1481,18 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
|
||||||
int thread_should_yield(struct thread *thread)
|
int thread_should_yield(struct thread *thread)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
result = monotime_since(&thread->real, NULL)
|
result = monotime_since(&thread->real, NULL)
|
||||||
> (int64_t)thread->yield;
|
> (int64_t)thread->yield;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
|
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
thread->yield = yield_time;
|
thread->yield = yield_time;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread_getrusage(RUSAGE_T *r)
|
void thread_getrusage(RUSAGE_T *r)
|
||||||
|
@ -1637,20 +1587,16 @@ void funcname_thread_execute(struct thread_master *m,
|
||||||
struct thread *thread;
|
struct thread *thread;
|
||||||
|
|
||||||
/* Get or allocate new thread to execute. */
|
/* Get or allocate new thread to execute. */
|
||||||
pthread_mutex_lock(&m->mtx);
|
frr_with_mutex(&m->mtx) {
|
||||||
{
|
|
||||||
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
|
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
|
||||||
|
|
||||||
/* Set its event value. */
|
/* Set its event value. */
|
||||||
pthread_mutex_lock(&thread->mtx);
|
frr_with_mutex(&thread->mtx) {
|
||||||
{
|
|
||||||
thread->add_type = THREAD_EXECUTE;
|
thread->add_type = THREAD_EXECUTE;
|
||||||
thread->u.val = val;
|
thread->u.val = val;
|
||||||
thread->ref = &thread;
|
thread->ref = &thread;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&thread->mtx);
|
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
|
|
||||||
/* Execute thread doing all accounting. */
|
/* Execute thread doing all accounting. */
|
||||||
thread_call(thread);
|
thread_call(thread);
|
||||||
|
|
23
tools/coccinelle/frr_with_mutex.cocci
Normal file
23
tools/coccinelle/frr_with_mutex.cocci
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
@@
|
||||||
|
expression E;
|
||||||
|
iterator name frr_with_mutex;
|
||||||
|
@@
|
||||||
|
|
||||||
|
- pthread_mutex_lock(E);
|
||||||
|
+ frr_with_mutex(E) {
|
||||||
|
- {
|
||||||
|
...
|
||||||
|
- }
|
||||||
|
- pthread_mutex_unlock(E);
|
||||||
|
+ }
|
||||||
|
|
||||||
|
|
||||||
|
@@
|
||||||
|
expression E;
|
||||||
|
@@
|
||||||
|
|
||||||
|
- pthread_mutex_lock(E);
|
||||||
|
+ frr_with_mutex(E) {
|
||||||
|
...
|
||||||
|
- pthread_mutex_unlock(E);
|
||||||
|
+ }
|
|
@ -37,6 +37,7 @@
|
||||||
#include "vrf.h"
|
#include "vrf.h"
|
||||||
#include "workqueue.h"
|
#include "workqueue.h"
|
||||||
#include "nexthop_group_private.h"
|
#include "nexthop_group_private.h"
|
||||||
|
#include "frr_pthread.h"
|
||||||
|
|
||||||
#include "zebra/zebra_router.h"
|
#include "zebra/zebra_router.h"
|
||||||
#include "zebra/connected.h"
|
#include "zebra/connected.h"
|
||||||
|
@ -3204,12 +3205,10 @@ static int rib_process_dplane_results(struct thread *thread)
|
||||||
TAILQ_INIT(&ctxlist);
|
TAILQ_INIT(&ctxlist);
|
||||||
|
|
||||||
/* Take lock controlling queue of results */
|
/* Take lock controlling queue of results */
|
||||||
pthread_mutex_lock(&dplane_mutex);
|
frr_with_mutex(&dplane_mutex) {
|
||||||
{
|
|
||||||
/* Dequeue list of context structs */
|
/* Dequeue list of context structs */
|
||||||
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
|
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&dplane_mutex);
|
|
||||||
|
|
||||||
/* Dequeue context block */
|
/* Dequeue context block */
|
||||||
ctx = dplane_ctx_dequeue(&ctxlist);
|
ctx = dplane_ctx_dequeue(&ctxlist);
|
||||||
|
@ -3300,12 +3299,10 @@ static int rib_process_dplane_results(struct thread *thread)
|
||||||
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
|
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
|
||||||
{
|
{
|
||||||
/* Take lock controlling queue of results */
|
/* Take lock controlling queue of results */
|
||||||
pthread_mutex_lock(&dplane_mutex);
|
frr_with_mutex(&dplane_mutex) {
|
||||||
{
|
|
||||||
/* Enqueue context blocks */
|
/* Enqueue context blocks */
|
||||||
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
|
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&dplane_mutex);
|
|
||||||
|
|
||||||
/* Ensure event is signalled to zebra main pthread */
|
/* Ensure event is signalled to zebra main pthread */
|
||||||
thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
|
thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
|
||||||
|
|
|
@ -231,13 +231,11 @@ static int zserv_write(struct thread *thread)
|
||||||
|
|
||||||
cache = stream_fifo_new();
|
cache = stream_fifo_new();
|
||||||
|
|
||||||
pthread_mutex_lock(&client->obuf_mtx);
|
frr_with_mutex(&client->obuf_mtx) {
|
||||||
{
|
|
||||||
while (stream_fifo_head(client->obuf_fifo))
|
while (stream_fifo_head(client->obuf_fifo))
|
||||||
stream_fifo_push(cache,
|
stream_fifo_push(cache,
|
||||||
stream_fifo_pop(client->obuf_fifo));
|
stream_fifo_pop(client->obuf_fifo));
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&client->obuf_mtx);
|
|
||||||
|
|
||||||
if (cache->tail) {
|
if (cache->tail) {
|
||||||
msg = cache->tail;
|
msg = cache->tail;
|
||||||
|
@ -427,13 +425,11 @@ static int zserv_read(struct thread *thread)
|
||||||
memory_order_relaxed);
|
memory_order_relaxed);
|
||||||
|
|
||||||
/* publish read packets on client's input queue */
|
/* publish read packets on client's input queue */
|
||||||
pthread_mutex_lock(&client->ibuf_mtx);
|
frr_with_mutex(&client->ibuf_mtx) {
|
||||||
{
|
|
||||||
while (cache->head)
|
while (cache->head)
|
||||||
stream_fifo_push(client->ibuf_fifo,
|
stream_fifo_push(client->ibuf_fifo,
|
||||||
stream_fifo_pop(cache));
|
stream_fifo_pop(cache));
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&client->ibuf_mtx);
|
|
||||||
|
|
||||||
/* Schedule job to process those packets */
|
/* Schedule job to process those packets */
|
||||||
zserv_event(client, ZSERV_PROCESS_MESSAGES);
|
zserv_event(client, ZSERV_PROCESS_MESSAGES);
|
||||||
|
@ -499,8 +495,7 @@ static int zserv_process_messages(struct thread *thread)
|
||||||
uint32_t p2p = zrouter.packets_to_process;
|
uint32_t p2p = zrouter.packets_to_process;
|
||||||
bool need_resched = false;
|
bool need_resched = false;
|
||||||
|
|
||||||
pthread_mutex_lock(&client->ibuf_mtx);
|
frr_with_mutex(&client->ibuf_mtx) {
|
||||||
{
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
|
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
|
||||||
++i) {
|
++i) {
|
||||||
|
@ -516,7 +511,6 @@ static int zserv_process_messages(struct thread *thread)
|
||||||
if (stream_fifo_head(client->ibuf_fifo))
|
if (stream_fifo_head(client->ibuf_fifo))
|
||||||
need_resched = true;
|
need_resched = true;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&client->ibuf_mtx);
|
|
||||||
|
|
||||||
while (stream_fifo_head(cache)) {
|
while (stream_fifo_head(cache)) {
|
||||||
msg = stream_fifo_pop(cache);
|
msg = stream_fifo_pop(cache);
|
||||||
|
@ -535,11 +529,9 @@ static int zserv_process_messages(struct thread *thread)
|
||||||
|
|
||||||
int zserv_send_message(struct zserv *client, struct stream *msg)
|
int zserv_send_message(struct zserv *client, struct stream *msg)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock(&client->obuf_mtx);
|
frr_with_mutex(&client->obuf_mtx) {
|
||||||
{
|
|
||||||
stream_fifo_push(client->obuf_fifo, msg);
|
stream_fifo_push(client->obuf_fifo, msg);
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&client->obuf_mtx);
|
|
||||||
|
|
||||||
zserv_client_event(client, ZSERV_CLIENT_WRITE);
|
zserv_client_event(client, ZSERV_CLIENT_WRITE);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue