2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Thread management routine
|
|
|
|
* Copyright (C) 1998, 2000 Kunihiro Ishiguro <kunihiro@zebra.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* #define DEBUG */
|
|
|
|
|
|
|
|
#include <zebra.h>
|
2024-01-05 15:36:09 +01:00
|
|
|
|
|
|
|
#include <signal.h>
|
2015-08-11 22:14:40 +02:00
|
|
|
#include <sys/resource.h>
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2023-03-07 20:22:48 +01:00
|
|
|
#include "frrevent.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "memory.h"
|
2017-04-03 00:51:20 +02:00
|
|
|
#include "frrcu.h"
|
2002-12-13 21:15:29 +01:00
|
|
|
#include "log.h"
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
#include "hash.h"
|
|
|
|
#include "command.h"
|
2004-07-22 21:14:27 +02:00
|
|
|
#include "sigevent.h"
|
2017-05-10 20:09:49 +02:00
|
|
|
#include "network.h"
|
2017-09-04 00:50:35 +02:00
|
|
|
#include "jhash.h"
|
2018-04-20 23:27:16 +02:00
|
|
|
#include "frratomic.h"
|
2019-06-21 10:58:02 +02:00
|
|
|
#include "frr_pthread.h"
|
2018-08-20 15:45:06 +02:00
|
|
|
#include "lib_errors.h"
|
2020-09-28 21:49:22 +02:00
|
|
|
#include "libfrr_trace.h"
|
2020-11-17 19:30:05 +01:00
|
|
|
#include "libfrr.h"
|
2012-05-24 09:44:43 +02:00
|
|
|
|
2015-05-29 05:48:31 +02:00
|
|
|
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread");
|
2022-12-10 15:28:31 +01:00
|
|
|
DEFINE_MTYPE_STATIC(LIB, EVENT_MASTER, "Thread master");
|
|
|
|
DEFINE_MTYPE_STATIC(LIB, EVENT_POLL, "Thread Poll Info");
|
|
|
|
DEFINE_MTYPE_STATIC(LIB, EVENT_STATS, "Thread stats");
|
2015-05-29 05:48:31 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
DECLARE_LIST(event_list, struct event, eventitem);
|
2019-01-31 02:12:38 +01:00
|
|
|
|
2021-02-03 18:22:19 +01:00
|
|
|
struct cancel_req {
|
|
|
|
int flags;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2021-02-03 18:22:19 +01:00
|
|
|
void *eventobj;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event **threadref;
|
2021-02-03 18:22:19 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Flags for task cancellation */
|
2022-12-10 15:08:37 +01:00
|
|
|
#define EVENT_CANCEL_FLAG_READY 0x01
|
2021-02-03 18:22:19 +01:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
static int event_timer_cmp(const struct event *a, const struct event *b)
|
2019-01-31 02:30:35 +01:00
|
|
|
{
|
|
|
|
if (a->u.sands.tv_sec < b->u.sands.tv_sec)
|
|
|
|
return -1;
|
|
|
|
if (a->u.sands.tv_sec > b->u.sands.tv_sec)
|
|
|
|
return 1;
|
|
|
|
if (a->u.sands.tv_usec < b->u.sands.tv_usec)
|
|
|
|
return -1;
|
|
|
|
if (a->u.sands.tv_usec > b->u.sands.tv_usec)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
DECLARE_HEAP(event_timer_list, struct event, timeritem, event_timer_cmp);
|
2019-01-31 02:30:35 +01:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
#define AWAKEN(m) \
|
|
|
|
do { \
|
2019-11-20 17:26:59 +01:00
|
|
|
const unsigned char wakebyte = 0x01; \
|
2017-05-10 20:09:49 +02:00
|
|
|
write(m->io_pipe[1], &wakebyte, 1); \
|
2023-03-23 12:37:28 +01:00
|
|
|
} while (0)
|
2017-05-10 20:09:49 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* control variable for initializer */
|
2019-04-03 22:34:18 +02:00
|
|
|
static pthread_once_t init_once = PTHREAD_ONCE_INIT;
|
2017-06-15 18:05:19 +02:00
|
|
|
pthread_key_t thread_current;
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2019-04-03 22:34:18 +02:00
|
|
|
static pthread_mutex_t masters_mtx = PTHREAD_MUTEX_INITIALIZER;
|
2017-06-15 21:10:57 +02:00
|
|
|
static struct list *masters;
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_free(struct event_loop *master, struct event *thread);
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2023-09-24 19:41:24 +02:00
|
|
|
bool cputime_enabled = true;
|
2021-04-13 20:38:09 +02:00
|
|
|
unsigned long cputime_threshold = CONSUMED_TIME_CHECK;
|
|
|
|
unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
|
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* CLI start ---------------------------------------------------------------- */
|
2022-02-28 16:40:31 +01:00
|
|
|
#include "lib/event_clippy.c"
|
2021-04-13 20:38:09 +02:00
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
static uint32_t cpu_record_hash_key(const struct cpu_event_history *a)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2018-06-08 19:30:32 +02:00
|
|
|
int size = sizeof(a->func);
|
2017-09-04 00:50:35 +02:00
|
|
|
|
|
|
|
return jhash(&a->func, size, 0);
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
static int cpu_record_hash_cmp(const struct cpu_event_history *a,
|
|
|
|
const struct cpu_event_history *b)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2023-09-07 11:48:22 +02:00
|
|
|
return numcmp((uintptr_t)a->func, (uintptr_t)b->func);
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
DECLARE_HASH(cpu_records, struct cpu_event_history, item, cpu_record_hash_cmp,
|
|
|
|
cpu_record_hash_key);
|
|
|
|
|
|
|
|
static struct cpu_event_history *cpu_records_get(struct event_loop *loop,
|
|
|
|
void (*func)(struct event *e),
|
|
|
|
const char *funcname)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2023-09-07 11:48:22 +02:00
|
|
|
struct cpu_event_history ref = { .func = func }, *res;
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
res = cpu_records_find(loop->cpu_records, &ref);
|
|
|
|
if (!res) {
|
|
|
|
res = XCALLOC(MTYPE_EVENT_STATS, sizeof(*res));
|
|
|
|
res->func = func;
|
|
|
|
res->funcname = funcname;
|
|
|
|
cpu_records_add(loop->cpu_records, res);
|
|
|
|
}
|
|
|
|
return res;
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
static void cpu_records_free(struct cpu_event_history **p)
|
[bgpd] Stability fixes including bugs 397, 492
I've spent the last several weeks working on stability fixes to bgpd.
These patches fix all of the numerous crashes, assertion failures, memory
leaks and memory stomping I could find. Valgrind was used extensively.
Added new function bgp_exit() to help catch problems. If "debug bgp" is
configured and bgpd exits with status of 0, statistics on remaining
lib/memory.c allocations are printed to stderr. It is my hope that other
developers will use this to stay on top of memory issues.
Example questionable exit:
bgpd: memstats: Current memory utilization in module LIB:
bgpd: memstats: Link List : 6
bgpd: memstats: Link Node : 5
bgpd: memstats: Hash : 8
bgpd: memstats: Hash Bucket : 2
bgpd: memstats: Hash Index : 8
bgpd: memstats: Work queue : 3
bgpd: memstats: Work queue item : 2
bgpd: memstats: Work queue name string : 3
bgpd: memstats: Current memory utilization in module BGP:
bgpd: memstats: BGP instance : 1
bgpd: memstats: BGP peer : 1
bgpd: memstats: BGP peer hostname : 1
bgpd: memstats: BGP attribute : 1
bgpd: memstats: BGP extra attributes : 1
bgpd: memstats: BGP aspath : 1
bgpd: memstats: BGP aspath str : 1
bgpd: memstats: BGP table : 24
bgpd: memstats: BGP node : 1
bgpd: memstats: BGP route : 1
bgpd: memstats: BGP synchronise : 8
bgpd: memstats: BGP Process queue : 1
bgpd: memstats: BGP node clear queue : 1
bgpd: memstats: NOTE: If configuration exists, utilization may be expected.
Example clean exit:
bgpd: memstats: No remaining tracked memory utilization.
This patch fixes bug #397: "Invalid free in bgp_announce_check()".
This patch fixes bug #492: "SIGBUS in bgpd/bgp_route.c:
bgp_clear_route_node()".
My apologies for not separating out these changes into individual patches.
The complexity of doing so boggled what is left of my brain. I hope this
is all still useful to the community.
This code has been production tested, in non-route-server-client mode, on
a linux 32-bit box and a 64-bit box.
Release/reset functions, used by bgp_exit(), added to:
bgpd/bgp_attr.c,h
bgpd/bgp_community.c,h
bgpd/bgp_dump.c,h
bgpd/bgp_ecommunity.c,h
bgpd/bgp_filter.c,h
bgpd/bgp_nexthop.c,h
bgpd/bgp_route.c,h
lib/routemap.c,h
File by file analysis:
* bgpd/bgp_aspath.c: Prevent re-use of ashash after it is released.
* bgpd/bgp_attr.c: #if removed uncalled cluster_dup().
* bgpd/bgp_clist.c,h: Allow community_list_terminate() to be called from
bgp_exit().
* bgpd/bgp_filter.c: Fix aslist->name use without allocation check, and
also fix memory leak.
* bgpd/bgp_main.c: Created bgp_exit() exit routine. This function frees
allocations made as part of bgpd initialization and, to some extent,
configuration. If "debug bgp" is configured, memory stats are printed
as described above.
* bgpd/bgp_nexthop.c: zclient_new() already allocates stream for
ibuf/obuf, so bgp_scan_init() shouldn't do it too. Also, made it so
zlookup is global so bgp_exit() can use it.
* bgpd/bgp_packet.c: bgp_capability_msg_parse() call to bgp_clear_route()
adjusted to use new BGP_CLEAR_ROUTE_NORMAL flag.
* bgpd/bgp_route.h: Correct reference counter "lock" to be signed.
bgp_clear_route() now accepts a bgp_clear_route_type of either
BGP_CLEAR_ROUTE_NORMAL or BGP_CLEAR_ROUTE_MY_RSCLIENT.
* bgpd/bgp_route.c:
- bgp_process_rsclient(): attr was being zero'ed and then
bgp_attr_extra_free() was being called with it, even though it was
never filled with valid data.
- bgp_process_rsclient(): Make sure rsclient->group is not NULL before
use.
- bgp_processq_del(): Add call to bgp_table_unlock().
- bgp_process(): Add call to bgp_table_lock().
- bgp_update_rsclient(): memset clearing of new_attr not needed since
declarationw with "= { 0 }" does it. memset was already commented
out.
- bgp_update_rsclient(): Fix screwed up misleading indentation.
- bgp_withdraw_rsclient(): Fix screwed up misleading indentation.
- bgp_clear_route_node(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_node_queue_del(): Add call to bgp_table_unlock() and also
free struct bgp_clear_node_queue used for work item.
- bgp_clear_node_complete(): Do peer_unlock() after BGP_EVENT_ADD() in
case peer is released by peer_unlock() call.
- bgp_clear_route_table(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT. Use
struct bgp_clear_node_queue to supply data to worker. Add call to
bgp_table_lock().
- bgp_clear_route(): Add support for BGP_CLEAR_ROUTE_NORMAL or
BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_route_all(): Use BGP_CLEAR_ROUTE_NORMAL.
Bug 397 fixes:
- bgp_default_originate()
- bgp_announce_table()
* bgpd/bgp_table.h:
- struct bgp_table: Added reference count. Changed type of owner to be
"struct peer *" rather than "void *".
- struct bgp_node: Correct reference counter "lock" to be signed.
* bgpd/bgp_table.c:
- Added bgp_table reference counting.
- bgp_table_free(): Fixed cleanup code. Call peer_unlock() on owner if
set.
- bgp_unlock_node(): Added assertion.
- bgp_node_get(): Added call to bgp_lock_node() to code path that it was
missing from.
* bgpd/bgp_vty.c:
- peer_rsclient_set_vty(): Call peer_lock() as part of peer assignment
to owner. Handle failure gracefully.
- peer_rsclient_unset_vty(): Add call to bgp_clear_route() with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
* bgpd/bgp_zebra.c: Made it so zclient is global so bgp_exit() can use it.
* bgpd/bgpd.c:
- peer_lock(): Allow to be called when status is "Deleted".
- peer_deactivate(): Supply BGP_CLEAR_ROUTE_NORMAL purpose to
bgp_clear_route() call.
- peer_delete(): Common variable listnode pn. Fix bug in which rsclient
was only dealt with if not part of a peer group. Call
bgp_clear_route() for rsclient, if appropriate, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- peer_group_get(): Use XSTRDUP() instead of strdup() for conf->host.
- peer_group_bind(): Call bgp_clear_route() for rsclient, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- bgp_create(): Use XSTRDUP() instead of strdup() for peer_self->host.
- bgp_delete(): Delete peers before groups, rather than after. And then
rather than deleting rsclients, verify that there are none at this
point.
- bgp_unlock(): Add assertion.
- bgp_free(): Call bgp_table_finish() rather than doing XFREE() itself.
* lib/command.c,h: Compiler warning fixes. Add cmd_terminate(). Fixed
massive leak in install_element() in which cmd_make_descvec() was being
called more than once for the same cmd->strvec/string/doc.
* lib/log.c: Make closezlog() check fp before calling fclose().
* lib/memory.c: Catch when alloc count goes negative by using signed
counts. Correct #endif comment. Add log_memstats_stderr().
* lib/memory.h: Add log_memstats_stderr().
* lib/thread.c: thread->funcname was being accessed in thread_call() after
it had been freed. Rearranged things so that thread_call() frees
funcname. Also made it so thread_master_free() cleans up cpu_record.
* lib/vty.c,h: Use global command_cr. Add vty_terminate().
* lib/zclient.c,h: Re-enable zclient_free().
2009-07-18 07:44:03 +02:00
|
|
|
{
|
2023-09-07 11:48:22 +02:00
|
|
|
XFREE(MTYPE_EVENT_STATS, *p);
|
[bgpd] Stability fixes including bugs 397, 492
I've spent the last several weeks working on stability fixes to bgpd.
These patches fix all of the numerous crashes, assertion failures, memory
leaks and memory stomping I could find. Valgrind was used extensively.
Added new function bgp_exit() to help catch problems. If "debug bgp" is
configured and bgpd exits with status of 0, statistics on remaining
lib/memory.c allocations are printed to stderr. It is my hope that other
developers will use this to stay on top of memory issues.
Example questionable exit:
bgpd: memstats: Current memory utilization in module LIB:
bgpd: memstats: Link List : 6
bgpd: memstats: Link Node : 5
bgpd: memstats: Hash : 8
bgpd: memstats: Hash Bucket : 2
bgpd: memstats: Hash Index : 8
bgpd: memstats: Work queue : 3
bgpd: memstats: Work queue item : 2
bgpd: memstats: Work queue name string : 3
bgpd: memstats: Current memory utilization in module BGP:
bgpd: memstats: BGP instance : 1
bgpd: memstats: BGP peer : 1
bgpd: memstats: BGP peer hostname : 1
bgpd: memstats: BGP attribute : 1
bgpd: memstats: BGP extra attributes : 1
bgpd: memstats: BGP aspath : 1
bgpd: memstats: BGP aspath str : 1
bgpd: memstats: BGP table : 24
bgpd: memstats: BGP node : 1
bgpd: memstats: BGP route : 1
bgpd: memstats: BGP synchronise : 8
bgpd: memstats: BGP Process queue : 1
bgpd: memstats: BGP node clear queue : 1
bgpd: memstats: NOTE: If configuration exists, utilization may be expected.
Example clean exit:
bgpd: memstats: No remaining tracked memory utilization.
This patch fixes bug #397: "Invalid free in bgp_announce_check()".
This patch fixes bug #492: "SIGBUS in bgpd/bgp_route.c:
bgp_clear_route_node()".
My apologies for not separating out these changes into individual patches.
The complexity of doing so boggled what is left of my brain. I hope this
is all still useful to the community.
This code has been production tested, in non-route-server-client mode, on
a linux 32-bit box and a 64-bit box.
Release/reset functions, used by bgp_exit(), added to:
bgpd/bgp_attr.c,h
bgpd/bgp_community.c,h
bgpd/bgp_dump.c,h
bgpd/bgp_ecommunity.c,h
bgpd/bgp_filter.c,h
bgpd/bgp_nexthop.c,h
bgpd/bgp_route.c,h
lib/routemap.c,h
File by file analysis:
* bgpd/bgp_aspath.c: Prevent re-use of ashash after it is released.
* bgpd/bgp_attr.c: #if removed uncalled cluster_dup().
* bgpd/bgp_clist.c,h: Allow community_list_terminate() to be called from
bgp_exit().
* bgpd/bgp_filter.c: Fix aslist->name use without allocation check, and
also fix memory leak.
* bgpd/bgp_main.c: Created bgp_exit() exit routine. This function frees
allocations made as part of bgpd initialization and, to some extent,
configuration. If "debug bgp" is configured, memory stats are printed
as described above.
* bgpd/bgp_nexthop.c: zclient_new() already allocates stream for
ibuf/obuf, so bgp_scan_init() shouldn't do it too. Also, made it so
zlookup is global so bgp_exit() can use it.
* bgpd/bgp_packet.c: bgp_capability_msg_parse() call to bgp_clear_route()
adjusted to use new BGP_CLEAR_ROUTE_NORMAL flag.
* bgpd/bgp_route.h: Correct reference counter "lock" to be signed.
bgp_clear_route() now accepts a bgp_clear_route_type of either
BGP_CLEAR_ROUTE_NORMAL or BGP_CLEAR_ROUTE_MY_RSCLIENT.
* bgpd/bgp_route.c:
- bgp_process_rsclient(): attr was being zero'ed and then
bgp_attr_extra_free() was being called with it, even though it was
never filled with valid data.
- bgp_process_rsclient(): Make sure rsclient->group is not NULL before
use.
- bgp_processq_del(): Add call to bgp_table_unlock().
- bgp_process(): Add call to bgp_table_lock().
- bgp_update_rsclient(): memset clearing of new_attr not needed since
declarationw with "= { 0 }" does it. memset was already commented
out.
- bgp_update_rsclient(): Fix screwed up misleading indentation.
- bgp_withdraw_rsclient(): Fix screwed up misleading indentation.
- bgp_clear_route_node(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_node_queue_del(): Add call to bgp_table_unlock() and also
free struct bgp_clear_node_queue used for work item.
- bgp_clear_node_complete(): Do peer_unlock() after BGP_EVENT_ADD() in
case peer is released by peer_unlock() call.
- bgp_clear_route_table(): Support BGP_CLEAR_ROUTE_MY_RSCLIENT. Use
struct bgp_clear_node_queue to supply data to worker. Add call to
bgp_table_lock().
- bgp_clear_route(): Add support for BGP_CLEAR_ROUTE_NORMAL or
BGP_CLEAR_ROUTE_MY_RSCLIENT.
- bgp_clear_route_all(): Use BGP_CLEAR_ROUTE_NORMAL.
Bug 397 fixes:
- bgp_default_originate()
- bgp_announce_table()
* bgpd/bgp_table.h:
- struct bgp_table: Added reference count. Changed type of owner to be
"struct peer *" rather than "void *".
- struct bgp_node: Correct reference counter "lock" to be signed.
* bgpd/bgp_table.c:
- Added bgp_table reference counting.
- bgp_table_free(): Fixed cleanup code. Call peer_unlock() on owner if
set.
- bgp_unlock_node(): Added assertion.
- bgp_node_get(): Added call to bgp_lock_node() to code path that it was
missing from.
* bgpd/bgp_vty.c:
- peer_rsclient_set_vty(): Call peer_lock() as part of peer assignment
to owner. Handle failure gracefully.
- peer_rsclient_unset_vty(): Add call to bgp_clear_route() with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
* bgpd/bgp_zebra.c: Made it so zclient is global so bgp_exit() can use it.
* bgpd/bgpd.c:
- peer_lock(): Allow to be called when status is "Deleted".
- peer_deactivate(): Supply BGP_CLEAR_ROUTE_NORMAL purpose to
bgp_clear_route() call.
- peer_delete(): Common variable listnode pn. Fix bug in which rsclient
was only dealt with if not part of a peer group. Call
bgp_clear_route() for rsclient, if appropriate, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- peer_group_get(): Use XSTRDUP() instead of strdup() for conf->host.
- peer_group_bind(): Call bgp_clear_route() for rsclient, and do so with
BGP_CLEAR_ROUTE_MY_RSCLIENT purpose.
- bgp_create(): Use XSTRDUP() instead of strdup() for peer_self->host.
- bgp_delete(): Delete peers before groups, rather than after. And then
rather than deleting rsclients, verify that there are none at this
point.
- bgp_unlock(): Add assertion.
- bgp_free(): Call bgp_table_finish() rather than doing XFREE() itself.
* lib/command.c,h: Compiler warning fixes. Add cmd_terminate(). Fixed
massive leak in install_element() in which cmd_make_descvec() was being
called more than once for the same cmd->strvec/string/doc.
* lib/log.c: Make closezlog() check fp before calling fclose().
* lib/memory.c: Catch when alloc count goes negative by using signed
counts. Correct #endif comment. Add log_memstats_stderr().
* lib/memory.h: Add log_memstats_stderr().
* lib/thread.c: thread->funcname was being accessed in thread_call() after
it had been freed. Rearranged things so that thread_call() frees
funcname. Also made it so thread_master_free() cleans up cpu_record.
* lib/vty.c,h: Use global command_cr. Add vty_terminate().
* lib/zclient.c,h: Re-enable zclient_free().
2009-07-18 07:44:03 +02:00
|
|
|
}
|
|
|
|
|
2022-12-11 17:36:01 +01:00
|
|
|
static void vty_out_cpu_event_history(struct vty *vty,
|
|
|
|
struct cpu_event_history *a)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2022-02-10 20:10:26 +01:00
|
|
|
vty_out(vty,
|
|
|
|
"%5zu %10zu.%03zu %9zu %8zu %9zu %8zu %9zu %9zu %9zu %10zu",
|
2020-11-03 23:07:08 +01:00
|
|
|
a->total_active, a->cpu.total / 1000, a->cpu.total % 1000,
|
2021-02-02 19:24:31 +01:00
|
|
|
a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max,
|
|
|
|
(a->real.total / a->total_calls), a->real.max,
|
2022-02-10 20:10:26 +01:00
|
|
|
a->total_cpu_warn, a->total_wall_warn, a->total_starv_warn);
|
2021-02-02 19:24:31 +01:00
|
|
|
vty_out(vty, " %c%c%c%c%c %s\n",
|
2022-12-10 15:28:31 +01:00
|
|
|
a->types & (1 << EVENT_READ) ? 'R' : ' ',
|
|
|
|
a->types & (1 << EVENT_WRITE) ? 'W' : ' ',
|
|
|
|
a->types & (1 << EVENT_TIMER) ? 'T' : ' ',
|
|
|
|
a->types & (1 << EVENT_EVENT) ? 'E' : ' ',
|
|
|
|
a->types & (1 << EVENT_EXECUTE) ? 'X' : ' ', a->funcname);
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
static void cpu_record_print_one(struct vty *vty, uint8_t filter,
|
|
|
|
struct cpu_event_history *totals,
|
|
|
|
const struct cpu_event_history *a)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2022-12-11 17:36:01 +01:00
|
|
|
struct cpu_event_history copy;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2018-04-20 23:27:16 +02:00
|
|
|
copy.total_active =
|
|
|
|
atomic_load_explicit(&a->total_active, memory_order_seq_cst);
|
|
|
|
copy.total_calls =
|
|
|
|
atomic_load_explicit(&a->total_calls, memory_order_seq_cst);
|
2021-02-02 19:24:31 +01:00
|
|
|
copy.total_cpu_warn =
|
|
|
|
atomic_load_explicit(&a->total_cpu_warn, memory_order_seq_cst);
|
|
|
|
copy.total_wall_warn =
|
|
|
|
atomic_load_explicit(&a->total_wall_warn, memory_order_seq_cst);
|
2022-02-10 20:10:26 +01:00
|
|
|
copy.total_starv_warn = atomic_load_explicit(&a->total_starv_warn,
|
|
|
|
memory_order_seq_cst);
|
2018-04-20 23:27:16 +02:00
|
|
|
copy.cpu.total =
|
|
|
|
atomic_load_explicit(&a->cpu.total, memory_order_seq_cst);
|
|
|
|
copy.cpu.max = atomic_load_explicit(&a->cpu.max, memory_order_seq_cst);
|
|
|
|
copy.real.total =
|
|
|
|
atomic_load_explicit(&a->real.total, memory_order_seq_cst);
|
|
|
|
copy.real.max =
|
|
|
|
atomic_load_explicit(&a->real.max, memory_order_seq_cst);
|
|
|
|
copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
|
|
|
|
copy.funcname = a->funcname;
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
if (!(copy.types & filter))
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
return;
|
2018-04-20 23:27:16 +02:00
|
|
|
|
2022-12-11 17:36:01 +01:00
|
|
|
vty_out_cpu_event_history(vty, ©);
|
2018-04-20 23:27:16 +02:00
|
|
|
totals->total_active += copy.total_active;
|
|
|
|
totals->total_calls += copy.total_calls;
|
2021-02-02 19:24:31 +01:00
|
|
|
totals->total_cpu_warn += copy.total_cpu_warn;
|
|
|
|
totals->total_wall_warn += copy.total_wall_warn;
|
2022-02-10 20:10:26 +01:00
|
|
|
totals->total_starv_warn += copy.total_starv_warn;
|
2018-04-20 23:27:16 +02:00
|
|
|
totals->real.total += copy.real.total;
|
|
|
|
if (totals->real.max < copy.real.max)
|
|
|
|
totals->real.max = copy.real.max;
|
|
|
|
totals->cpu.total += copy.cpu.total;
|
|
|
|
if (totals->cpu.max < copy.cpu.max)
|
|
|
|
totals->cpu.max = copy.cpu.max;
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2018-04-20 23:27:16 +02:00
|
|
|
static void cpu_record_print(struct vty *vty, uint8_t filter)
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
{
|
2022-12-11 17:36:01 +01:00
|
|
|
struct cpu_event_history tmp;
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m;
|
2017-06-15 21:10:57 +02:00
|
|
|
struct listnode *ln;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
if (!cputime_enabled)
|
|
|
|
vty_out(vty,
|
|
|
|
"\n"
|
|
|
|
"Collecting CPU time statistics is currently disabled. Following statistics\n"
|
|
|
|
"will be zero or may display data from when collection was enabled. Use the\n"
|
|
|
|
" \"service cputime-stats\" command to start collecting data.\n"
|
|
|
|
"\nCounters and wallclock times are always maintained and should be accurate.\n");
|
|
|
|
|
2020-03-08 20:43:26 +01:00
|
|
|
memset(&tmp, 0, sizeof(tmp));
|
2013-11-18 23:04:27 +01:00
|
|
|
tmp.funcname = "TOTAL";
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
tmp.types = filter;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&masters_mtx) {
|
2017-06-15 21:10:57 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
2017-06-15 22:17:44 +02:00
|
|
|
const char *name = m->name ? m->name : "main";
|
|
|
|
char underline[strlen(name) + 1];
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2017-06-15 22:17:44 +02:00
|
|
|
memset(underline, '-', sizeof(underline));
|
2018-04-20 22:40:46 +02:00
|
|
|
underline[sizeof(underline) - 1] = '\0';
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 19:04:25 +02:00
|
|
|
vty_out(vty, "\n");
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "Showing statistics for pthread %s\n",
|
|
|
|
name);
|
|
|
|
vty_out(vty, "-------------------------------%s\n",
|
|
|
|
underline);
|
2021-01-28 17:25:51 +01:00
|
|
|
vty_out(vty, "%30s %18s %18s\n", "",
|
2017-07-13 17:49:13 +02:00
|
|
|
"CPU (user+system):", "Real (wall-clock):");
|
2017-06-15 21:10:57 +02:00
|
|
|
vty_out(vty,
|
|
|
|
"Active Runtime(ms) Invoked Avg uSec Max uSecs");
|
|
|
|
vty_out(vty, " Avg uSec Max uSecs");
|
2022-02-10 20:10:26 +01:00
|
|
|
vty_out(vty,
|
2024-01-07 02:41:20 +01:00
|
|
|
" CPU_Warn Wall_Warn Starv_Warn Type Event\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
if (cpu_records_count(m->cpu_records)) {
|
|
|
|
struct cpu_event_history *rec;
|
|
|
|
|
|
|
|
frr_each (cpu_records, m->cpu_records, rec)
|
|
|
|
cpu_record_print_one(vty, filter, &tmp,
|
|
|
|
rec);
|
|
|
|
} else
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "No data to display yet.\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-07-13 20:17:06 +02:00
|
|
|
vty_out(vty, "\n");
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-07-13 20:17:06 +02:00
|
|
|
vty_out(vty, "\n");
|
2024-01-07 02:41:20 +01:00
|
|
|
vty_out(vty, "Total Event statistics\n");
|
2017-07-13 17:49:13 +02:00
|
|
|
vty_out(vty, "-------------------------\n");
|
2021-01-28 17:25:51 +01:00
|
|
|
vty_out(vty, "%30s %18s %18s\n", "",
|
2017-07-13 17:49:13 +02:00
|
|
|
"CPU (user+system):", "Real (wall-clock):");
|
2017-06-15 22:17:44 +02:00
|
|
|
vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs");
|
2024-01-07 02:41:20 +01:00
|
|
|
vty_out(vty, " Avg uSec Max uSecs CPU_Warn Wall_Warn Starv_Warn");
|
|
|
|
vty_out(vty, " Type Event\n");
|
2017-07-17 14:03:14 +02:00
|
|
|
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
if (tmp.total_calls > 0)
|
2022-12-11 17:36:01 +01:00
|
|
|
vty_out_cpu_event_history(vty, &tmp);
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
}
|
|
|
|
|
2018-04-20 23:27:16 +02:00
|
|
|
static void cpu_record_clear(uint8_t filter)
|
2010-01-09 17:15:00 +01:00
|
|
|
{
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m;
|
2017-06-15 21:10:57 +02:00
|
|
|
struct listnode *ln;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&masters_mtx) {
|
2017-06-15 21:10:57 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2023-09-07 11:48:22 +02:00
|
|
|
struct cpu_event_history *item;
|
|
|
|
struct cpu_records_head old[1];
|
|
|
|
|
|
|
|
cpu_records_init(old);
|
|
|
|
cpu_records_swap_all(old, m->cpu_records);
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
while ((item = cpu_records_pop(old))) {
|
|
|
|
if (item->types & filter)
|
|
|
|
cpu_records_free(&item);
|
|
|
|
else
|
|
|
|
cpu_records_add(m->cpu_records,
|
|
|
|
item);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_records_fini(old);
|
2017-06-15 21:10:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-20 23:27:16 +02:00
|
|
|
static uint8_t parse_filter(const char *filterstr)
|
2017-06-15 21:10:57 +02:00
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
int filter = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
while (filterstr[i] != '\0') {
|
|
|
|
switch (filterstr[i]) {
|
|
|
|
case 'r':
|
|
|
|
case 'R':
|
2022-12-10 15:28:31 +01:00
|
|
|
filter |= (1 << EVENT_READ);
|
2017-06-15 21:10:57 +02:00
|
|
|
break;
|
|
|
|
case 'w':
|
|
|
|
case 'W':
|
2022-12-10 15:28:31 +01:00
|
|
|
filter |= (1 << EVENT_WRITE);
|
2017-06-15 21:10:57 +02:00
|
|
|
break;
|
|
|
|
case 't':
|
|
|
|
case 'T':
|
2022-12-10 15:28:31 +01:00
|
|
|
filter |= (1 << EVENT_TIMER);
|
2017-06-15 21:10:57 +02:00
|
|
|
break;
|
|
|
|
case 'e':
|
|
|
|
case 'E':
|
2022-12-10 15:28:31 +01:00
|
|
|
filter |= (1 << EVENT_EVENT);
|
2017-06-15 21:10:57 +02:00
|
|
|
break;
|
|
|
|
case 'x':
|
|
|
|
case 'X':
|
2022-12-10 15:28:31 +01:00
|
|
|
filter |= (1 << EVENT_EXECUTE);
|
2017-06-15 21:10:57 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
return filter;
|
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
#if CONFDATE > 20240707
|
|
|
|
CPP_NOTICE("Remove `show thread ...` commands")
|
|
|
|
#endif
|
|
|
|
DEFUN_NOSH (show_event_cpu,
|
|
|
|
show_event_cpu_cmd,
|
|
|
|
"show event cpu [FILTER]",
|
|
|
|
SHOW_STR
|
|
|
|
"Event information\n"
|
|
|
|
"Event CPU usage\n"
|
|
|
|
"Display filter (rwtexb)\n")
|
2017-06-15 21:10:57 +02:00
|
|
|
{
|
2018-04-20 23:27:16 +02:00
|
|
|
uint8_t filter = (uint8_t)-1U;
|
2017-06-15 21:10:57 +02:00
|
|
|
int idx = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
if (argv_find(argv, argc, "FILTER", &idx)) {
|
|
|
|
filter = parse_filter(argv[idx]->arg);
|
|
|
|
if (!filter) {
|
2017-07-13 18:50:29 +02:00
|
|
|
vty_out(vty,
|
2020-03-27 12:35:23 +01:00
|
|
|
"Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
|
2017-07-13 18:50:29 +02:00
|
|
|
argv[idx]->arg);
|
2017-06-15 21:10:57 +02:00
|
|
|
return CMD_WARNING;
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
cpu_record_print(vty, filter);
|
|
|
|
return CMD_SUCCESS;
|
2010-01-09 17:15:00 +01:00
|
|
|
}
|
2021-04-13 20:38:09 +02:00
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
ALIAS(show_event_cpu,
|
|
|
|
show_thread_cpu_cmd,
|
|
|
|
"show thread cpu [FILTER]",
|
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Thread CPU usage\n"
|
|
|
|
"Display filter (rwtex)\n")
|
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
DEFPY (service_cputime_stats,
|
|
|
|
service_cputime_stats_cmd,
|
|
|
|
"[no] service cputime-stats",
|
|
|
|
NO_STR
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Collect CPU usage statistics\n")
|
|
|
|
{
|
|
|
|
cputime_enabled = !no;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (service_cputime_warning,
|
|
|
|
service_cputime_warning_cmd,
|
2023-08-11 17:11:03 +02:00
|
|
|
"[no] service cputime-warning ![(1-4294967295)]",
|
2021-04-13 20:38:09 +02:00
|
|
|
NO_STR
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Warn for tasks exceeding CPU usage threshold\n"
|
|
|
|
"Warning threshold in milliseconds\n")
|
|
|
|
{
|
|
|
|
if (no)
|
|
|
|
cputime_threshold = 0;
|
|
|
|
else
|
|
|
|
cputime_threshold = cputime_warning * 1000;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFPY (service_walltime_warning,
|
|
|
|
service_walltime_warning_cmd,
|
2023-08-11 17:11:03 +02:00
|
|
|
"[no] service walltime-warning ![(1-4294967295)]",
|
2021-04-13 20:38:09 +02:00
|
|
|
NO_STR
|
|
|
|
"Set up miscellaneous service\n"
|
|
|
|
"Warn for tasks exceeding total wallclock threshold\n"
|
|
|
|
"Warning threshold in milliseconds\n")
|
|
|
|
{
|
|
|
|
if (no)
|
|
|
|
walltime_threshold = 0;
|
|
|
|
else
|
|
|
|
walltime_threshold = walltime_warning * 1000;
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
static void show_event_poll_helper(struct vty *vty, struct event_loop *m)
|
2018-06-17 00:12:54 +02:00
|
|
|
{
|
|
|
|
const char *name = m->name ? m->name : "main";
|
|
|
|
char underline[strlen(name) + 1];
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2018-06-17 00:12:54 +02:00
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
memset(underline, '-', sizeof(underline));
|
|
|
|
underline[sizeof(underline) - 1] = '\0';
|
|
|
|
|
|
|
|
vty_out(vty, "\nShowing poll FD's for %s\n", name);
|
|
|
|
vty_out(vty, "----------------------%s\n", underline);
|
2019-05-24 13:53:32 +02:00
|
|
|
vty_out(vty, "Count: %u/%d\n", (uint32_t)m->handler.pfdcount,
|
|
|
|
m->fd_limit);
|
2019-06-13 04:36:45 +02:00
|
|
|
for (i = 0; i < m->handler.pfdcount; i++) {
|
|
|
|
vty_out(vty, "\t%6d fd:%6d events:%2d revents:%2d\t\t", i,
|
|
|
|
m->handler.pfds[i].fd, m->handler.pfds[i].events,
|
2018-06-17 00:12:54 +02:00
|
|
|
m->handler.pfds[i].revents);
|
2019-06-13 04:36:45 +02:00
|
|
|
|
|
|
|
if (m->handler.pfds[i].events & POLLIN) {
|
|
|
|
thread = m->read[m->handler.pfds[i].fd];
|
|
|
|
|
|
|
|
if (!thread)
|
|
|
|
vty_out(vty, "ERROR ");
|
|
|
|
else
|
2020-04-28 09:30:50 +02:00
|
|
|
vty_out(vty, "%s ", thread->xref->funcname);
|
2019-06-13 04:36:45 +02:00
|
|
|
} else
|
|
|
|
vty_out(vty, " ");
|
|
|
|
|
|
|
|
if (m->handler.pfds[i].events & POLLOUT) {
|
|
|
|
thread = m->write[m->handler.pfds[i].fd];
|
|
|
|
|
|
|
|
if (!thread)
|
|
|
|
vty_out(vty, "ERROR\n");
|
|
|
|
else
|
2020-04-28 09:30:50 +02:00
|
|
|
vty_out(vty, "%s\n", thread->xref->funcname);
|
2019-06-13 04:36:45 +02:00
|
|
|
} else
|
|
|
|
vty_out(vty, "\n");
|
|
|
|
}
|
2018-06-17 00:12:54 +02:00
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
DEFUN_NOSH (show_event_poll,
|
|
|
|
show_event_poll_cmd,
|
|
|
|
"show event poll",
|
|
|
|
SHOW_STR
|
|
|
|
"Event information\n"
|
|
|
|
"Event Poll Information\n")
|
2018-06-17 00:12:54 +02:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m;
|
2018-06-17 00:12:54 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&masters_mtx) {
|
2023-03-23 12:37:28 +01:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(masters, node, m))
|
2024-01-07 12:46:04 +01:00
|
|
|
show_event_poll_helper(vty, m);
|
2018-06-17 00:12:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
ALIAS(show_event_poll,
|
|
|
|
show_thread_poll_cmd,
|
|
|
|
"show thread poll",
|
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Show poll FD's and information\n")
|
2018-06-17 00:12:54 +02:00
|
|
|
|
2016-09-17 13:25:35 +02:00
|
|
|
DEFUN (clear_thread_cpu,
|
|
|
|
clear_thread_cpu_cmd,
|
|
|
|
"clear thread cpu [FILTER]",
|
2017-06-15 21:10:57 +02:00
|
|
|
"Clear stored data in all pthreads\n"
|
2016-09-17 13:25:35 +02:00
|
|
|
"Thread information\n"
|
|
|
|
"Thread CPU usage\n"
|
|
|
|
"Display filter (rwtexb)\n")
|
2010-01-09 17:15:00 +01:00
|
|
|
{
|
2018-04-20 23:27:16 +02:00
|
|
|
uint8_t filter = (uint8_t)-1U;
|
2017-06-15 21:10:57 +02:00
|
|
|
int idx = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
if (argv_find(argv, argc, "FILTER", &idx)) {
|
|
|
|
filter = parse_filter(argv[idx]->arg);
|
|
|
|
if (!filter) {
|
2017-07-13 18:50:29 +02:00
|
|
|
vty_out(vty,
|
2020-03-27 12:35:23 +01:00
|
|
|
"Invalid filter \"%s\" specified; must contain at leastone of 'RWTEXB'\n",
|
2017-07-13 18:50:29 +02:00
|
|
|
argv[idx]->arg);
|
2017-06-15 21:10:57 +02:00
|
|
|
return CMD_WARNING;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
}
|
|
|
|
|
2010-01-09 17:15:00 +01:00
|
|
|
cpu_record_clear(filter);
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
2014-06-04 06:53:35 +02:00
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
static void show_event_timers_helper(struct vty *vty, struct event_loop *m)
|
2022-02-23 16:14:53 +01:00
|
|
|
{
|
|
|
|
const char *name = m->name ? m->name : "main";
|
|
|
|
char underline[strlen(name) + 1];
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2022-02-23 16:14:53 +01:00
|
|
|
|
|
|
|
memset(underline, '-', sizeof(underline));
|
|
|
|
underline[sizeof(underline) - 1] = '\0';
|
|
|
|
|
|
|
|
vty_out(vty, "\nShowing timers for %s\n", name);
|
|
|
|
vty_out(vty, "-------------------%s\n", underline);
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
frr_each (event_timer_list, &m->timer, thread) {
|
2022-02-23 16:14:53 +01:00
|
|
|
vty_out(vty, " %-50s%pTH\n", thread->hist->funcname, thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
DEFPY_NOSH (show_event_timers,
|
|
|
|
show_event_timers_cmd,
|
|
|
|
"show event timers",
|
|
|
|
SHOW_STR
|
|
|
|
"Event information\n"
|
|
|
|
"Show all timers and how long they have in the system\n")
|
2022-02-23 16:14:53 +01:00
|
|
|
{
|
|
|
|
struct listnode *node;
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m;
|
2022-02-23 16:14:53 +01:00
|
|
|
|
|
|
|
frr_with_mutex (&masters_mtx) {
|
|
|
|
for (ALL_LIST_ELEMENTS_RO(masters, node, m))
|
2024-01-07 12:46:04 +01:00
|
|
|
show_event_timers_helper(vty, m);
|
2022-02-23 16:14:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return CMD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-01-07 12:46:04 +01:00
|
|
|
ALIAS(show_event_timers,
|
|
|
|
show_thread_timers_cmd,
|
|
|
|
"show thread timers",
|
|
|
|
SHOW_STR
|
|
|
|
"Thread information\n"
|
|
|
|
"Show all timers and how long they have in the system\n")
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
void event_cmd_init(void)
|
2016-11-16 07:00:52 +01:00
|
|
|
{
|
|
|
|
install_element(VIEW_NODE, &show_thread_cpu_cmd);
|
2024-01-07 12:46:04 +01:00
|
|
|
install_element(VIEW_NODE, &show_event_cpu_cmd);
|
2018-06-17 00:12:54 +02:00
|
|
|
install_element(VIEW_NODE, &show_thread_poll_cmd);
|
2024-01-07 12:46:04 +01:00
|
|
|
install_element(VIEW_NODE, &show_event_poll_cmd);
|
2016-11-16 07:00:52 +01:00
|
|
|
install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
|
2021-04-13 20:38:09 +02:00
|
|
|
|
|
|
|
install_element(CONFIG_NODE, &service_cputime_stats_cmd);
|
|
|
|
install_element(CONFIG_NODE, &service_cputime_warning_cmd);
|
|
|
|
install_element(CONFIG_NODE, &service_walltime_warning_cmd);
|
2022-02-23 16:14:53 +01:00
|
|
|
|
|
|
|
install_element(VIEW_NODE, &show_thread_timers_cmd);
|
2024-01-07 12:46:04 +01:00
|
|
|
install_element(VIEW_NODE, &show_event_timers_cmd);
|
2016-11-16 07:00:52 +01:00
|
|
|
}
|
2017-06-15 21:10:57 +02:00
|
|
|
/* CLI end ------------------------------------------------------------------ */
|
|
|
|
|
2016-11-16 07:00:52 +01:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
static void cancelreq_del(void *cr)
|
|
|
|
{
|
|
|
|
XFREE(MTYPE_TMP, cr);
|
|
|
|
}
|
|
|
|
|
2017-06-15 18:05:19 +02:00
|
|
|
/* initializer, only ever called once */
|
2019-01-24 10:12:36 +01:00
|
|
|
static void initializer(void)
|
2017-06-15 18:05:19 +02:00
|
|
|
{
|
|
|
|
pthread_key_create(&thread_current, NULL);
|
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *event_master_create(const char *name)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *rv;
|
2015-08-11 22:14:40 +02:00
|
|
|
struct rlimit limit;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 18:05:19 +02:00
|
|
|
pthread_once(&init_once, &initializer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
rv = XCALLOC(MTYPE_EVENT_MASTER, sizeof(struct event_loop));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* Initialize master mutex */
|
2017-03-03 20:01:49 +01:00
|
|
|
pthread_mutex_init(&rv->mtx, NULL);
|
2017-06-07 22:34:09 +02:00
|
|
|
pthread_cond_init(&rv->cancel_cond, NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 22:17:44 +02:00
|
|
|
/* Set name */
|
2020-09-16 02:11:39 +02:00
|
|
|
name = name ? name : "default";
|
2022-12-10 15:28:31 +01:00
|
|
|
rv->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* Initialize I/O task data structures */
|
2020-11-17 19:30:05 +01:00
|
|
|
|
|
|
|
/* Use configured limit if present, ulimit otherwise. */
|
|
|
|
rv->fd_limit = frr_get_fd_limit();
|
|
|
|
if (rv->fd_limit == 0) {
|
|
|
|
getrlimit(RLIMIT_NOFILE, &limit);
|
|
|
|
rv->fd_limit = (int)limit.rlim_cur;
|
|
|
|
}
|
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
rv->read = XCALLOC(MTYPE_EVENT_POLL,
|
2022-03-01 22:18:12 +01:00
|
|
|
sizeof(struct event *) * rv->fd_limit);
|
2018-08-23 03:05:29 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
rv->write = XCALLOC(MTYPE_EVENT_POLL,
|
2022-03-01 22:18:12 +01:00
|
|
|
sizeof(struct event *) * rv->fd_limit);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-16 02:11:39 +02:00
|
|
|
char tmhashname[strlen(name) + 32];
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2020-09-16 02:11:39 +02:00
|
|
|
snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
|
|
|
|
name);
|
2023-09-07 11:48:22 +02:00
|
|
|
cpu_records_init(rv->cpu_records);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_init(&rv->event);
|
|
|
|
event_list_init(&rv->ready);
|
|
|
|
event_list_init(&rv->unuse);
|
|
|
|
event_timer_list_init(&rv->timer);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 13:51:16 +01:00
|
|
|
/* Initialize event_fetch() settings */
|
2017-04-29 00:45:59 +02:00
|
|
|
rv->spin = true;
|
|
|
|
rv->handle_signals = true;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* Set pthread owner, should be updated by actual owner */
|
2017-05-17 19:11:34 +02:00
|
|
|
rv->owner = pthread_self();
|
2017-06-07 22:34:09 +02:00
|
|
|
rv->cancel_req = list_new();
|
|
|
|
rv->cancel_req->del = cancelreq_del;
|
2017-06-09 05:40:27 +02:00
|
|
|
rv->canceled = true;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* Initialize pipe poker */
|
2017-05-10 20:09:49 +02:00
|
|
|
pipe(rv->io_pipe);
|
|
|
|
set_nonblocking(rv->io_pipe[0]);
|
2017-05-31 19:30:53 +02:00
|
|
|
set_nonblocking(rv->io_pipe[1]);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
/* Initialize data structures for poll() */
|
2016-03-25 16:26:20 +01:00
|
|
|
rv->handler.pfdsize = rv->fd_limit;
|
2016-03-04 07:28:29 +01:00
|
|
|
rv->handler.pfdcount = 0;
|
2022-12-10 15:28:31 +01:00
|
|
|
rv->handler.pfds = XCALLOC(MTYPE_EVENT_MASTER,
|
2017-02-07 10:54:40 +01:00
|
|
|
sizeof(struct pollfd) * rv->handler.pfdsize);
|
2022-12-10 15:28:31 +01:00
|
|
|
rv->handler.copy = XCALLOC(MTYPE_EVENT_MASTER,
|
2017-06-01 01:21:40 +02:00
|
|
|
sizeof(struct pollfd) * rv->handler.pfdsize);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-08 18:54:28 +02:00
|
|
|
/* add to list of threadmasters */
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&masters_mtx) {
|
2017-08-08 18:54:28 +02:00
|
|
|
if (!masters)
|
|
|
|
masters = list_new();
|
|
|
|
|
2017-06-15 21:10:57 +02:00
|
|
|
listnode_add(masters, rv);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2013-11-19 15:11:42 +01:00
|
|
|
return rv;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_master_set_name(struct event_loop *master, const char *name)
|
2018-03-05 23:58:22 +01:00
|
|
|
{
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&master->mtx) {
|
2022-12-10 15:28:31 +01:00
|
|
|
XFREE(MTYPE_EVENT_MASTER, master->name);
|
|
|
|
master->name = XSTRDUP(MTYPE_EVENT_MASTER, name);
|
2018-03-05 23:58:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
#define EVENT_UNUSED_DEPTH 10
|
2018-08-23 02:59:46 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Move thread to unuse list. */
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_add_unuse(struct event_loop *m, struct event *thread)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2018-09-25 17:05:53 +02:00
|
|
|
pthread_mutex_t mtxc = thread->mtx;
|
|
|
|
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
assert(m != NULL && thread != NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-10 20:25:39 +01:00
|
|
|
thread->hist->total_active--;
|
2022-03-01 22:18:12 +01:00
|
|
|
memset(thread, 0, sizeof(struct event));
|
2022-12-10 15:28:31 +01:00
|
|
|
thread->type = EVENT_UNUSED;
|
2018-08-23 02:59:46 +02:00
|
|
|
|
2018-09-25 17:05:53 +02:00
|
|
|
/* Restore the thread mutex context. */
|
|
|
|
thread->mtx = mtxc;
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
if (event_list_count(&m->unuse) < EVENT_UNUSED_DEPTH) {
|
|
|
|
event_list_add_tail(&m->unuse, thread);
|
2018-09-25 17:05:53 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread_free(m, thread);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Free all unused thread. */
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_list_free(struct event_loop *m, struct event_list_head *list)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
while ((t = event_list_pop(list)))
|
2018-09-25 17:05:53 +02:00
|
|
|
thread_free(m, t);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_array_free(struct event_loop *m, struct event **thread_array)
|
2015-08-11 22:14:40 +02:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t;
|
2015-08-11 22:14:40 +02:00
|
|
|
int index;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-11 22:14:40 +02:00
|
|
|
for (index = 0; index < m->fd_limit; ++index) {
|
|
|
|
t = thread_array[index];
|
|
|
|
if (t) {
|
|
|
|
thread_array[index] = NULL;
|
2018-09-25 17:05:53 +02:00
|
|
|
thread_free(m, t);
|
2015-08-11 22:14:40 +02:00
|
|
|
}
|
|
|
|
}
|
2022-12-10 15:28:31 +01:00
|
|
|
XFREE(MTYPE_EVENT_POLL, thread_array);
|
2015-08-11 22:14:40 +02:00
|
|
|
}
|
|
|
|
|
2015-09-02 14:19:44 +02:00
|
|
|
/*
|
2023-03-04 15:14:52 +01:00
|
|
|
* event_master_free_unused
|
2015-09-02 14:19:44 +02:00
|
|
|
*
|
|
|
|
* As threads are finished with they are put on the
|
|
|
|
* unuse list for later reuse.
|
|
|
|
* If we are shutting down, Free up unused threads
|
|
|
|
* So we can see if we forget to shut anything off
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_master_free_unused(struct event_loop *m)
|
2015-09-02 14:19:44 +02:00
|
|
|
{
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t;
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
while ((t = event_list_pop(&m->unuse)))
|
2018-09-25 17:05:53 +02:00
|
|
|
thread_free(m, t);
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2015-09-02 14:19:44 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Stop thread scheduler. */
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_master_free(struct event_loop *m)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2023-09-07 11:48:22 +02:00
|
|
|
struct cpu_event_history *record;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t;
|
2019-01-31 02:30:35 +01:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&masters_mtx) {
|
2017-07-11 16:08:54 +02:00
|
|
|
listnode_delete(masters, m);
|
2023-03-23 12:37:28 +01:00
|
|
|
if (masters->count == 0)
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&masters);
|
2017-07-11 16:08:54 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2015-08-11 22:14:40 +02:00
|
|
|
thread_array_free(m, m->read);
|
|
|
|
thread_array_free(m, m->write);
|
2022-12-11 17:27:26 +01:00
|
|
|
while ((t = event_timer_list_pop(&m->timer)))
|
2019-01-31 02:30:35 +01:00
|
|
|
thread_free(m, t);
|
2002-12-13 21:15:29 +01:00
|
|
|
thread_list_free(m, &m->event);
|
|
|
|
thread_list_free(m, &m->ready);
|
|
|
|
thread_list_free(m, &m->unuse);
|
2017-03-03 20:01:49 +01:00
|
|
|
pthread_mutex_destroy(&m->mtx);
|
2017-09-25 22:01:08 +02:00
|
|
|
pthread_cond_destroy(&m->cancel_cond);
|
2017-05-10 20:09:49 +02:00
|
|
|
close(m->io_pipe[0]);
|
|
|
|
close(m->io_pipe[1]);
|
2018-10-02 11:39:51 +02:00
|
|
|
list_delete(&m->cancel_req);
|
2017-09-25 21:41:28 +02:00
|
|
|
m->cancel_req = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
while ((record = cpu_records_pop(m->cpu_records)))
|
|
|
|
cpu_records_free(&record);
|
|
|
|
cpu_records_fini(m->cpu_records);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
XFREE(MTYPE_EVENT_MASTER, m->name);
|
|
|
|
XFREE(MTYPE_EVENT_MASTER, m->handler.pfds);
|
|
|
|
XFREE(MTYPE_EVENT_MASTER, m->handler.copy);
|
|
|
|
XFREE(MTYPE_EVENT_MASTER, m);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2022-04-02 13:48:18 +02:00
|
|
|
/* Return remain time in milliseconds. */
|
2022-12-11 14:19:00 +01:00
|
|
|
unsigned long event_timer_remain_msec(struct event *thread)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-03-03 20:01:49 +01:00
|
|
|
int64_t remain;
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
if (!event_is_scheduled(thread))
|
2022-07-18 15:28:30 +02:00
|
|
|
return 0;
|
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2018-10-10 15:06:17 +02:00
|
|
|
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
|
|
|
|
2017-01-23 18:45:43 +01:00
|
|
|
return remain < 0 ? 0 : remain;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2018-10-10 15:06:17 +02:00
|
|
|
/* Return remain time in seconds. */
|
2022-12-11 14:19:00 +01:00
|
|
|
unsigned long event_timer_remain_second(struct event *thread)
|
2018-10-10 15:06:17 +02:00
|
|
|
{
|
2022-12-11 14:19:00 +01:00
|
|
|
return event_timer_remain_msec(thread) / 1000LL;
|
2018-10-10 15:06:17 +02:00
|
|
|
}
|
|
|
|
|
2022-12-11 14:19:00 +01:00
|
|
|
struct timeval event_timer_remain(struct event *thread)
|
2015-11-10 18:04:41 +01:00
|
|
|
{
|
2017-01-17 22:57:57 +01:00
|
|
|
struct timeval remain;
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2017-03-03 20:01:49 +01:00
|
|
|
monotime_until(&thread->u.sands, &remain);
|
|
|
|
}
|
2017-01-17 22:57:57 +01:00
|
|
|
return remain;
|
2015-11-10 18:04:41 +01:00
|
|
|
}
|
|
|
|
|
2020-03-27 15:30:20 +01:00
|
|
|
static int time_hhmmss(char *buf, int buf_size, long sec)
|
|
|
|
{
|
|
|
|
long hh;
|
|
|
|
long mm;
|
|
|
|
int wr;
|
|
|
|
|
2021-04-23 11:25:44 +02:00
|
|
|
assert(buf_size >= 8);
|
2020-03-27 15:30:20 +01:00
|
|
|
|
|
|
|
hh = sec / 3600;
|
|
|
|
sec %= 3600;
|
|
|
|
mm = sec / 60;
|
|
|
|
sec %= 60;
|
|
|
|
|
|
|
|
wr = snprintf(buf, buf_size, "%02ld:%02ld:%02ld", hh, mm, sec);
|
|
|
|
|
|
|
|
return wr != 8;
|
|
|
|
}
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
char *event_timer_to_hhmmss(char *buf, int buf_size, struct event *t_timer)
|
2020-03-27 15:30:20 +01:00
|
|
|
{
|
2023-03-23 12:37:28 +01:00
|
|
|
if (t_timer)
|
2022-12-11 14:19:00 +01:00
|
|
|
time_hhmmss(buf, buf_size, event_timer_remain_second(t_timer));
|
2023-03-23 12:37:28 +01:00
|
|
|
else
|
2020-03-27 15:30:20 +01:00
|
|
|
snprintf(buf, buf_size, "--:--:--");
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2020-03-27 15:30:20 +01:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Get new thread. */
|
2023-03-07 20:14:41 +01:00
|
|
|
static struct event *thread_get(struct event_loop *m, uint8_t type,
|
2022-03-01 22:18:12 +01:00
|
|
|
void (*func)(struct event *), void *arg,
|
2022-12-11 17:33:30 +01:00
|
|
|
const struct xref_eventsched *xref)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-12-11 17:27:26 +01:00
|
|
|
struct event *thread = event_list_pop(&m->unuse);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2012-05-07 18:53:14 +02:00
|
|
|
if (!thread) {
|
2022-03-01 22:18:12 +01:00
|
|
|
thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
|
2017-03-03 20:01:49 +01:00
|
|
|
/* mutex only needs to be initialized at struct creation. */
|
|
|
|
pthread_mutex_init(&thread->mtx, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
m->alloc++;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
thread->type = type;
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
thread->add_type = type;
|
2002-12-13 21:15:29 +01:00
|
|
|
thread->master = m;
|
|
|
|
thread->arg = arg;
|
2022-12-11 13:55:02 +01:00
|
|
|
thread->yield = EVENT_YIELD_TIME_SLOT; /* default */
|
2023-09-20 15:27:23 +02:00
|
|
|
/* thread->ref is zeroed either by XCALLOC above or by memset before
|
|
|
|
* being put on the "unuse" list by thread_add_unuse().
|
|
|
|
* Setting it here again makes coverity complain about a missing
|
|
|
|
* lock :(
|
|
|
|
*/
|
|
|
|
/* thread->ref = NULL; */
|
2022-01-19 20:56:25 +01:00
|
|
|
thread->ignore_timer_late = false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2016-11-10 19:58:40 +01:00
|
|
|
/*
|
|
|
|
* So if the passed in funcname is not what we have
|
|
|
|
* stored that means the thread->hist needs to be
|
|
|
|
* updated. We keep the last one around in unused
|
|
|
|
* under the assumption that we are probably
|
|
|
|
* going to immediately allocate the same
|
|
|
|
* type of thread.
|
|
|
|
* This hopefully saves us some serious
|
|
|
|
* hash_get lookups.
|
|
|
|
*/
|
2020-04-28 09:30:50 +02:00
|
|
|
if ((thread->xref && thread->xref->funcname != xref->funcname)
|
2023-09-07 11:48:22 +02:00
|
|
|
|| thread->func != func)
|
|
|
|
thread->hist = cpu_records_get(m, func, xref->funcname);
|
|
|
|
|
2016-11-10 20:25:39 +01:00
|
|
|
thread->hist->total_active++;
|
2016-11-10 19:58:40 +01:00
|
|
|
thread->func = func;
|
2020-04-28 09:30:50 +02:00
|
|
|
thread->xref = xref;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_free(struct event_loop *master, struct event *thread)
|
2018-09-25 17:05:53 +02:00
|
|
|
{
|
|
|
|
/* Update statistics. */
|
|
|
|
assert(master->alloc > 0);
|
|
|
|
master->alloc--;
|
|
|
|
|
|
|
|
/* Free allocated resources. */
|
|
|
|
pthread_mutex_destroy(&thread->mtx);
|
|
|
|
XFREE(MTYPE_THREAD, thread);
|
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static int fd_poll(struct event_loop *m, const struct timeval *timer_wait,
|
2020-09-21 21:57:59 +02:00
|
|
|
bool *eintr_p)
|
2016-03-04 00:53:28 +01:00
|
|
|
{
|
2020-09-21 21:57:59 +02:00
|
|
|
sigset_t origsigs;
|
|
|
|
unsigned char trash[64];
|
|
|
|
nfds_t count = m->handler.copycount;
|
|
|
|
|
2020-08-12 15:46:44 +02:00
|
|
|
/*
|
|
|
|
* If timer_wait is null here, that means poll() should block
|
2023-03-04 15:14:52 +01:00
|
|
|
* indefinitely, unless the event_master has overridden it by setting
|
2017-05-10 20:09:49 +02:00
|
|
|
* ->selectpoll_timeout.
|
2020-08-12 15:46:44 +02:00
|
|
|
*
|
2017-05-10 20:09:49 +02:00
|
|
|
* If the value is positive, it specifies the maximum number of
|
2020-08-12 15:46:44 +02:00
|
|
|
* milliseconds to wait. If the timeout is -1, it specifies that
|
|
|
|
* we should never wait and always return immediately even if no
|
|
|
|
* event is detected. If the value is zero, the behavior is default.
|
|
|
|
*/
|
2016-03-04 07:28:29 +01:00
|
|
|
int timeout = -1;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
/* number of file descriptors with events */
|
|
|
|
int num;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
if (timer_wait != NULL && m->selectpoll_timeout == 0) {
|
|
|
|
/* use the default value */
|
2016-03-04 07:28:29 +01:00
|
|
|
timeout = (timer_wait->tv_sec * 1000)
|
|
|
|
+ (timer_wait->tv_usec / 1000);
|
2023-03-23 12:37:28 +01:00
|
|
|
} else if (m->selectpoll_timeout > 0) {
|
|
|
|
/* use the user's timeout */
|
2017-04-29 00:45:59 +02:00
|
|
|
timeout = m->selectpoll_timeout;
|
2023-03-23 12:37:28 +01:00
|
|
|
} else if (m->selectpoll_timeout < 0) {
|
|
|
|
/* effect a poll (return immediately) */
|
2017-04-29 00:45:59 +02:00
|
|
|
timeout = 0;
|
2023-03-23 12:37:28 +01:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-06 06:40:17 +02:00
|
|
|
zlog_tls_buffer_flush();
|
2017-04-03 00:51:20 +02:00
|
|
|
rcu_read_unlock();
|
|
|
|
rcu_assert_read_unlocked();
|
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
/* add poll pipe poker */
|
2020-09-21 21:57:59 +02:00
|
|
|
assert(count + 1 < m->handler.pfdsize);
|
|
|
|
m->handler.copy[count].fd = m->io_pipe[0];
|
|
|
|
m->handler.copy[count].events = POLLIN;
|
|
|
|
m->handler.copy[count].revents = 0x00;
|
|
|
|
|
|
|
|
/* We need to deal with a signal-handling race here: we
|
|
|
|
* don't want to miss a crucial signal, such as SIGTERM or SIGINT,
|
|
|
|
* that may arrive just before we enter poll(). We will block the
|
|
|
|
* key signals, then check whether any have arrived - if so, we return
|
|
|
|
* before calling poll(). If not, we'll re-enable the signals
|
|
|
|
* in the ppoll() call.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sigemptyset(&origsigs);
|
|
|
|
if (m->handle_signals) {
|
|
|
|
/* Main pthread that handles the app signals */
|
|
|
|
if (frr_sigevent_check(&origsigs)) {
|
|
|
|
/* Signal to process - restore signal mask and return */
|
|
|
|
pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
|
|
|
|
num = -1;
|
|
|
|
*eintr_p = true;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Don't make any changes for the non-main pthreads */
|
|
|
|
pthread_sigmask(SIG_SETMASK, NULL, &origsigs);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-21 21:57:59 +02:00
|
|
|
#if defined(HAVE_PPOLL)
|
|
|
|
struct timespec ts, *tsp;
|
|
|
|
|
|
|
|
if (timeout >= 0) {
|
|
|
|
ts.tv_sec = timeout / 1000;
|
|
|
|
ts.tv_nsec = (timeout % 1000) * 1000000;
|
|
|
|
tsp = &ts;
|
|
|
|
} else
|
|
|
|
tsp = NULL;
|
|
|
|
|
|
|
|
num = ppoll(m->handler.copy, count + 1, tsp, &origsigs);
|
|
|
|
pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
|
|
|
|
#else
|
|
|
|
/* Not ideal - there is a race after we restore the signal mask */
|
|
|
|
pthread_sigmask(SIG_SETMASK, &origsigs, NULL);
|
|
|
|
num = poll(m->handler.copy, count + 1, timeout);
|
|
|
|
#endif
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-09-21 21:57:59 +02:00
|
|
|
done:
|
|
|
|
|
|
|
|
if (num < 0 && errno == EINTR)
|
|
|
|
*eintr_p = true;
|
|
|
|
|
|
|
|
if (num > 0 && m->handler.copy[count].revents != 0 && num--)
|
2017-05-10 20:09:49 +02:00
|
|
|
while (read(m->io_pipe[0], &trash, sizeof(trash)) > 0)
|
|
|
|
;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-04-03 00:51:20 +02:00
|
|
|
rcu_read_lock();
|
|
|
|
|
2016-03-04 07:28:29 +01:00
|
|
|
return num;
|
2016-03-04 00:53:28 +01:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Add new read thread. */
|
2022-12-11 17:33:30 +01:00
|
|
|
void _event_add_read_write(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m, void (*func)(struct event *),
|
2022-12-11 17:20:40 +01:00
|
|
|
void *arg, int fd, struct event **t_ptr)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-12-10 15:28:31 +01:00
|
|
|
int dir = xref->event_type;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread = NULL;
|
|
|
|
struct event **thread_array;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
if (dir == EVENT_READ)
|
2021-02-02 21:05:50 +01:00
|
|
|
frrtrace(9, frr_libfrr, schedule_read, m,
|
|
|
|
xref->funcname, xref->xref.file, xref->xref.line,
|
|
|
|
t_ptr, fd, 0, arg, 0);
|
2020-09-15 00:04:33 +02:00
|
|
|
else
|
2021-02-02 21:05:50 +01:00
|
|
|
frrtrace(9, frr_libfrr, schedule_write, m,
|
|
|
|
xref->funcname, xref->xref.file, xref->xref.line,
|
|
|
|
t_ptr, fd, 0, arg, 0);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2021-05-02 13:39:36 +02:00
|
|
|
assert(fd >= 0);
|
|
|
|
if (fd >= m->fd_limit)
|
|
|
|
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
|
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2023-03-23 12:37:28 +01:00
|
|
|
/* Thread is already scheduled; don't reschedule */
|
2019-06-21 10:58:02 +02:00
|
|
|
if (t_ptr && *t_ptr)
|
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
/* default to a new pollfd */
|
|
|
|
nfds_t queuepos = m->handler.pfdcount;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
if (dir == EVENT_READ)
|
2019-06-13 04:27:29 +02:00
|
|
|
thread_array = m->read;
|
|
|
|
else
|
|
|
|
thread_array = m->write;
|
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
/*
|
|
|
|
* if we already have a pollfd for our file descriptor, find and
|
|
|
|
* use it
|
|
|
|
*/
|
2017-05-10 20:09:49 +02:00
|
|
|
for (nfds_t i = 0; i < m->handler.pfdcount; i++)
|
|
|
|
if (m->handler.pfds[i].fd == fd) {
|
|
|
|
queuepos = i;
|
2019-06-13 04:27:29 +02:00
|
|
|
|
|
|
|
#ifdef DEV_BUILD
|
|
|
|
/*
|
|
|
|
* What happens if we have a thread already
|
|
|
|
* created for this event?
|
|
|
|
*/
|
|
|
|
if (thread_array[fd])
|
|
|
|
assert(!"Thread already scheduled for file descriptor");
|
|
|
|
#endif
|
2017-05-10 20:09:49 +02:00
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
/* make sure we have room for this fd + pipe poker fd */
|
|
|
|
assert(queuepos + 1 < m->handler.pfdsize);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-04-28 09:30:50 +02:00
|
|
|
thread = thread_get(m, dir, func, arg, xref);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
m->handler.pfds[queuepos].fd = fd;
|
|
|
|
m->handler.pfds[queuepos].events |=
|
2022-12-10 15:28:31 +01:00
|
|
|
(dir == EVENT_READ ? POLLIN : POLLOUT);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
if (queuepos == m->handler.pfdcount)
|
|
|
|
m->handler.pfdcount++;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-03-03 20:01:49 +01:00
|
|
|
if (thread) {
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2017-04-17 20:33:58 +02:00
|
|
|
thread->u.fd = fd;
|
2019-06-13 04:27:29 +02:00
|
|
|
thread_array[thread->u.fd] = thread;
|
2017-04-17 20:33:58 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-15 08:29:54 +02:00
|
|
|
if (t_ptr) {
|
|
|
|
*t_ptr = thread;
|
|
|
|
thread->ref = t_ptr;
|
|
|
|
}
|
lib: safely check & set thread pointers
When scheduling a thread, the scheduling function returns a pointer to
the struct thread that was placed on one of the scheduling queues in the
associated thread master. This pointer is used to check whether or not
the thread is scheduled, and is passed to thread_cancel() should the
daemon need to cancel that particular task.
The thread_fetch() function is called to retrieve the next thread to
execute. However, when it returns, the aforementioned pointer is not
updated. As a result, in order for the above use cases to work, every
thread handler function must set the associated pointer to NULL. This is
bug prone, and moreover, not thread safe.
This patch changes the thread scheduling functions to return void. If
the caller needs a reference to the scheduled thread, it must pass in a
pointer to store the pointer to the thread struct in. Subsequent calls
to thread_cancel(), thread_cancel_event() or thread_fetch() will result
in that pointer being nulled before return. These operations occur
within the thread_master critical sections.
Overall this should avoid bugs introduced by thread handler funcs
forgetting to null the associated pointer, double-scheduling caused by
overwriting pointers to currently scheduled threads without performing a
nullity check, and the introduction of true kernel threads causing race
conditions within the userspace threading world.
Also removes the return value for thread_execute since it always returns
null...
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2017-05-05 19:30:21 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-18 20:14:52 +02:00
|
|
|
AWAKEN(m);
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
static void _event_add_timer_timeval(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*func)(struct event *), void *arg,
|
|
|
|
struct timeval *time_relative,
|
|
|
|
struct event **t_ptr)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2021-02-18 15:37:09 +01:00
|
|
|
struct timeval t;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
assert(m != NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
assert(time_relative);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-02-02 21:05:50 +01:00
|
|
|
frrtrace(9, frr_libfrr, schedule_timer, m,
|
|
|
|
xref->funcname, xref->xref.file, xref->xref.line,
|
2020-09-29 00:13:27 +02:00
|
|
|
t_ptr, 0, 0, arg, (long)time_relative->tv_sec);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2021-02-18 15:37:09 +01:00
|
|
|
/* Compute expiration/deadline time. */
|
|
|
|
monotime(&t);
|
|
|
|
timeradd(&t, time_relative, &t);
|
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2019-06-21 10:58:02 +02:00
|
|
|
if (t_ptr && *t_ptr)
|
2020-08-12 15:46:44 +02:00
|
|
|
/* thread is already scheduled; don't reschedule */
|
2021-10-06 21:06:23 +02:00
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
thread = thread_get(m, EVENT_TIMER, func, arg, xref);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2021-02-18 15:37:09 +01:00
|
|
|
thread->u.sands = t;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_timer_list_add(&m->timer, thread);
|
2017-05-15 08:29:54 +02:00
|
|
|
if (t_ptr) {
|
|
|
|
*t_ptr = thread;
|
|
|
|
thread->ref = t_ptr;
|
|
|
|
}
|
2017-04-17 20:33:58 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-02-18 15:37:09 +01:00
|
|
|
/* The timer list is sorted - if this new timer
|
|
|
|
* might change the time we'll wait for, give the pthread
|
|
|
|
* a chance to re-compute.
|
|
|
|
*/
|
2022-12-11 17:27:26 +01:00
|
|
|
if (event_timer_list_first(&m->timer) == thread)
|
2021-02-18 15:37:09 +01:00
|
|
|
AWAKEN(m);
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2022-02-25 14:19:07 +01:00
|
|
|
#define ONEYEAR2SEC (60 * 60 * 24 * 365)
|
|
|
|
if (time_relative->tv_sec > ONEYEAR2SEC)
|
|
|
|
flog_err(
|
|
|
|
EC_LIB_TIMER_TOO_LONG,
|
|
|
|
"Timer: %pTHD is created with an expiration that is greater than 1 year",
|
|
|
|
thread);
|
2003-12-23 09:56:18 +01:00
|
|
|
}
|
|
|
|
|
2004-10-05 16:57:50 +02:00
|
|
|
|
|
|
|
/* Add timer event thread. */
|
2023-03-07 20:14:41 +01:00
|
|
|
void _event_add_timer(const struct xref_eventsched *xref, struct event_loop *m,
|
|
|
|
void (*func)(struct event *), void *arg, long timer,
|
|
|
|
struct event **t_ptr)
|
2003-12-23 09:56:18 +01:00
|
|
|
{
|
2004-10-05 16:57:50 +02:00
|
|
|
struct timeval trel;
|
2003-12-23 09:56:18 +01:00
|
|
|
|
|
|
|
assert(m != NULL);
|
|
|
|
|
2004-10-11 11:40:58 +02:00
|
|
|
trel.tv_sec = timer;
|
2004-10-05 16:57:50 +02:00
|
|
|
trel.tv_usec = 0;
|
2003-12-23 09:56:18 +01:00
|
|
|
|
2022-05-20 20:19:08 +02:00
|
|
|
_event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
2004-10-05 16:57:50 +02:00
|
|
|
}
|
2003-12-23 09:56:18 +01:00
|
|
|
|
2004-10-05 16:57:50 +02:00
|
|
|
/* Add timer event thread with "millisecond" resolution */
|
2022-12-11 17:33:30 +01:00
|
|
|
void _event_add_timer_msec(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m, void (*func)(struct event *),
|
2022-12-11 17:20:40 +01:00
|
|
|
void *arg, long timer, struct event **t_ptr)
|
2004-10-05 16:57:50 +02:00
|
|
|
{
|
|
|
|
struct timeval trel;
|
2003-12-23 09:56:18 +01:00
|
|
|
|
2004-10-05 16:57:50 +02:00
|
|
|
assert(m != NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2004-12-28 18:00:12 +01:00
|
|
|
trel.tv_sec = timer / 1000;
|
|
|
|
trel.tv_usec = 1000 * (timer % 1000);
|
2004-10-05 16:57:50 +02:00
|
|
|
|
2022-05-20 20:19:08 +02:00
|
|
|
_event_add_timer_timeval(xref, m, func, arg, &trel, t_ptr);
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
}
|
|
|
|
|
2021-02-18 15:34:16 +01:00
|
|
|
/* Add timer event thread with "timeval" resolution */
|
2022-12-11 17:33:30 +01:00
|
|
|
void _event_add_timer_tv(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *m, void (*func)(struct event *),
|
2022-05-20 20:19:08 +02:00
|
|
|
void *arg, struct timeval *tv, struct event **t_ptr)
|
2016-07-28 17:23:42 +02:00
|
|
|
{
|
2022-05-20 20:19:08 +02:00
|
|
|
_event_add_timer_timeval(xref, m, func, arg, tv, t_ptr);
|
2016-07-28 17:23:42 +02:00
|
|
|
}
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Add simple event thread. */
|
2023-03-07 20:14:41 +01:00
|
|
|
void _event_add_event(const struct xref_eventsched *xref, struct event_loop *m,
|
|
|
|
void (*func)(struct event *), void *arg, int val,
|
|
|
|
struct event **t_ptr)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-02-02 21:05:50 +01:00
|
|
|
frrtrace(9, frr_libfrr, schedule_event, m,
|
|
|
|
xref->funcname, xref->xref.file, xref->xref.line,
|
2020-09-29 00:13:27 +02:00
|
|
|
t_ptr, 0, val, arg, 0);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
assert(m != NULL);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2019-06-21 10:58:02 +02:00
|
|
|
if (t_ptr && *t_ptr)
|
2020-08-12 15:46:44 +02:00
|
|
|
/* thread is already scheduled; don't reschedule */
|
2019-06-21 10:58:02 +02:00
|
|
|
break;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
thread = thread_get(m, EVENT_EVENT, func, arg, xref);
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2017-04-17 20:33:58 +02:00
|
|
|
thread->u.val = val;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_add_tail(&m->event, thread);
|
2017-04-17 20:33:58 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-04-25 00:33:25 +02:00
|
|
|
if (t_ptr) {
|
lib: safely check & set thread pointers
When scheduling a thread, the scheduling function returns a pointer to
the struct thread that was placed on one of the scheduling queues in the
associated thread master. This pointer is used to check whether or not
the thread is scheduled, and is passed to thread_cancel() should the
daemon need to cancel that particular task.
The thread_fetch() function is called to retrieve the next thread to
execute. However, when it returns, the aforementioned pointer is not
updated. As a result, in order for the above use cases to work, every
thread handler function must set the associated pointer to NULL. This is
bug prone, and moreover, not thread safe.
This patch changes the thread scheduling functions to return void. If
the caller needs a reference to the scheduled thread, it must pass in a
pointer to store the pointer to the thread struct in. Subsequent calls
to thread_cancel(), thread_cancel_event() or thread_fetch() will result
in that pointer being nulled before return. These operations occur
within the thread_master critical sections.
Overall this should avoid bugs introduced by thread handler funcs
forgetting to null the associated pointer, double-scheduling caused by
overwriting pointers to currently scheduled threads without performing a
nullity check, and the introduction of true kernel threads causing race
conditions within the userspace threading world.
Also removes the return value for thread_execute since it always returns
null...
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2017-05-05 19:30:21 +02:00
|
|
|
*t_ptr = thread;
|
|
|
|
thread->ref = t_ptr;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
AWAKEN(m);
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Thread cancellation ------------------------------------------------------ */
|
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
/**
|
|
|
|
* NOT's out the .events field of pollfd corresponding to the given file
|
|
|
|
* descriptor. The event to be NOT'd is passed in the 'state' parameter.
|
|
|
|
*
|
2022-12-11 13:51:16 +01:00
|
|
|
* This needs to happen for both copies of pollfd's. See 'event_fetch'
|
2017-06-09 05:40:27 +02:00
|
|
|
* implementation for details.
|
|
|
|
*
|
|
|
|
* @param master
|
|
|
|
* @param fd
|
|
|
|
* @param state the event to cancel. One or more (OR'd together) of the
|
|
|
|
* following:
|
|
|
|
* - POLLIN
|
|
|
|
* - POLLOUT
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
static void event_cancel_rw(struct event_loop *master, int fd, short state,
|
2022-12-10 15:08:37 +01:00
|
|
|
int idx_hint)
|
2016-03-04 07:28:29 +01:00
|
|
|
{
|
2017-12-01 20:44:32 +01:00
|
|
|
bool found = false;
|
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
/* find the index of corresponding pollfd */
|
|
|
|
nfds_t i;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-01-27 20:32:22 +01:00
|
|
|
/* Cancel POLLHUP too just in case some bozo set it */
|
|
|
|
state |= POLLHUP;
|
|
|
|
|
|
|
|
/* Some callers know the index of the pfd already */
|
|
|
|
if (idx_hint >= 0) {
|
|
|
|
i = idx_hint;
|
|
|
|
found = true;
|
|
|
|
} else {
|
|
|
|
/* Have to look for the fd in the pfd array */
|
|
|
|
for (i = 0; i < master->handler.pfdcount; i++)
|
|
|
|
if (master->handler.pfds[i].fd == fd) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-12-01 20:44:32 +01:00
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
zlog_debug(
|
|
|
|
"[!] Received cancellation request for nonexistent rw job");
|
|
|
|
zlog_debug("[!] threadmaster: %s | fd: %d",
|
|
|
|
master->name ? master->name : "", fd);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
/* NOT out event. */
|
|
|
|
master->handler.pfds[i].events &= ~(state);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
/* If all events are canceled, delete / resize the pollfd array. */
|
|
|
|
if (master->handler.pfds[i].events == 0) {
|
|
|
|
memmove(master->handler.pfds + i, master->handler.pfds + i + 1,
|
|
|
|
(master->handler.pfdcount - i - 1)
|
|
|
|
* sizeof(struct pollfd));
|
|
|
|
master->handler.pfdcount--;
|
2019-10-14 14:17:29 +02:00
|
|
|
master->handler.pfds[master->handler.pfdcount].fd = 0;
|
|
|
|
master->handler.pfds[master->handler.pfdcount].events = 0;
|
2017-06-09 05:40:27 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
/*
|
|
|
|
* If we have the same pollfd in the copy, perform the same operations,
|
|
|
|
* otherwise return.
|
|
|
|
*/
|
2017-06-09 05:40:27 +02:00
|
|
|
if (i >= master->handler.copycount)
|
|
|
|
return;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
master->handler.copy[i].events &= ~(state);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
if (master->handler.copy[i].events == 0) {
|
|
|
|
memmove(master->handler.copy + i, master->handler.copy + i + 1,
|
|
|
|
(master->handler.copycount - i - 1)
|
|
|
|
* sizeof(struct pollfd));
|
|
|
|
master->handler.copycount--;
|
2019-10-14 14:17:29 +02:00
|
|
|
master->handler.copy[master->handler.copycount].fd = 0;
|
2023-03-23 12:37:28 +01:00
|
|
|
master->handler.copy[master->handler.copycount].events = 0;
|
2017-06-09 05:40:27 +02:00
|
|
|
}
|
2016-03-04 07:28:29 +01:00
|
|
|
}
|
|
|
|
|
2021-01-27 20:32:22 +01:00
|
|
|
/*
|
|
|
|
* Process task cancellation given a task argument: iterate through the
|
|
|
|
* various lists of tasks, looking for any that match the argument.
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
static void cancel_arg_helper(struct event_loop *master,
|
2021-01-27 20:32:22 +01:00
|
|
|
const struct cancel_req *cr)
|
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t;
|
2021-01-27 20:32:22 +01:00
|
|
|
nfds_t i;
|
|
|
|
int fd;
|
|
|
|
struct pollfd *pfd;
|
|
|
|
|
|
|
|
/* We're only processing arg-based cancellations here. */
|
|
|
|
if (cr->eventobj == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* First process the ready lists. */
|
2022-12-11 17:27:26 +01:00
|
|
|
frr_each_safe (event_list, &master->event, t) {
|
2021-01-27 20:32:22 +01:00
|
|
|
if (t->arg != cr->eventobj)
|
|
|
|
continue;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_del(&master->event, t);
|
2021-01-27 20:32:22 +01:00
|
|
|
if (t->ref)
|
|
|
|
*t->ref = NULL;
|
|
|
|
thread_add_unuse(master, t);
|
|
|
|
}
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
frr_each_safe (event_list, &master->ready, t) {
|
2021-01-27 20:32:22 +01:00
|
|
|
if (t->arg != cr->eventobj)
|
|
|
|
continue;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_del(&master->ready, t);
|
2021-01-27 20:32:22 +01:00
|
|
|
if (t->ref)
|
|
|
|
*t->ref = NULL;
|
|
|
|
thread_add_unuse(master, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If requested, stop here and ignore io and timers */
|
2022-12-10 15:08:37 +01:00
|
|
|
if (CHECK_FLAG(cr->flags, EVENT_CANCEL_FLAG_READY))
|
2021-01-27 20:32:22 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check the io tasks */
|
|
|
|
for (i = 0; i < master->handler.pfdcount;) {
|
|
|
|
pfd = master->handler.pfds + i;
|
|
|
|
|
|
|
|
if (pfd->events & POLLIN)
|
|
|
|
t = master->read[pfd->fd];
|
|
|
|
else
|
|
|
|
t = master->write[pfd->fd];
|
|
|
|
|
|
|
|
if (t && t->arg == cr->eventobj) {
|
|
|
|
fd = pfd->fd;
|
|
|
|
|
|
|
|
/* Found a match to cancel: clean up fd arrays */
|
2022-12-10 15:08:37 +01:00
|
|
|
event_cancel_rw(master, pfd->fd, pfd->events, i);
|
2021-01-27 20:32:22 +01:00
|
|
|
|
|
|
|
/* Clean up thread arrays */
|
|
|
|
master->read[fd] = NULL;
|
|
|
|
master->write[fd] = NULL;
|
|
|
|
|
|
|
|
/* Clear caller's ref */
|
|
|
|
if (t->ref)
|
|
|
|
*t->ref = NULL;
|
|
|
|
|
|
|
|
thread_add_unuse(master, t);
|
|
|
|
|
|
|
|
/* Don't increment 'i' since the cancellation will have
|
|
|
|
* removed the entry from the pfd array
|
|
|
|
*/
|
|
|
|
} else
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the timer tasks */
|
2022-12-11 17:27:26 +01:00
|
|
|
t = event_timer_list_first(&master->timer);
|
2021-01-27 20:32:22 +01:00
|
|
|
while (t) {
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *t_next;
|
2021-01-27 20:32:22 +01:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
t_next = event_timer_list_next(&master->timer, t);
|
2021-01-27 20:32:22 +01:00
|
|
|
|
|
|
|
if (t->arg == cr->eventobj) {
|
2022-12-11 17:27:26 +01:00
|
|
|
event_timer_list_del(&master->timer, t);
|
2021-01-27 20:32:22 +01:00
|
|
|
if (t->ref)
|
|
|
|
*t->ref = NULL;
|
|
|
|
thread_add_unuse(master, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
t = t_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 20:01:49 +01:00
|
|
|
/**
|
2017-06-07 22:34:09 +02:00
|
|
|
* Process cancellation requests.
|
2017-03-03 20:01:49 +01:00
|
|
|
*
|
2023-03-04 15:14:52 +01:00
|
|
|
* This may only be run from the pthread which owns the event_master.
|
2017-06-07 22:34:09 +02:00
|
|
|
*
|
|
|
|
* @param master the thread master to process
|
|
|
|
* @REQUIRE master->mtx
|
2017-03-03 20:01:49 +01:00
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
static void do_event_cancel(struct event_loop *master)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-12-11 17:27:26 +01:00
|
|
|
struct event_list_head *list = NULL;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event **thread_array = NULL;
|
|
|
|
struct event *thread;
|
2017-06-07 22:34:09 +02:00
|
|
|
struct cancel_req *cr;
|
|
|
|
struct listnode *ln;
|
2022-08-31 17:47:39 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) {
|
2020-08-12 15:46:44 +02:00
|
|
|
/*
|
2021-01-27 20:32:22 +01:00
|
|
|
* If this is an event object cancellation, search
|
|
|
|
* through task lists deleting any tasks which have the
|
|
|
|
* specified argument - use this handy helper function.
|
2020-08-12 15:46:44 +02:00
|
|
|
*/
|
2017-06-07 22:34:09 +02:00
|
|
|
if (cr->eventobj) {
|
2021-01-27 20:32:22 +01:00
|
|
|
cancel_arg_helper(master, cr);
|
2017-06-07 22:34:09 +02:00
|
|
|
continue;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2020-08-12 15:46:44 +02:00
|
|
|
/*
|
|
|
|
* The pointer varies depending on whether the cancellation
|
|
|
|
* request was made asynchronously or not. If it was, we
|
|
|
|
* need to check whether the thread even exists anymore
|
|
|
|
* before cancelling it.
|
|
|
|
*/
|
2017-06-07 22:34:09 +02:00
|
|
|
thread = (cr->thread) ? cr->thread : *cr->threadref;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
if (!thread)
|
|
|
|
continue;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-08-31 17:47:39 +02:00
|
|
|
list = NULL;
|
|
|
|
thread_array = NULL;
|
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Determine the appropriate queue to cancel the thread from */
|
|
|
|
switch (thread->type) {
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_READ:
|
2022-12-10 15:08:37 +01:00
|
|
|
event_cancel_rw(master, thread->u.fd, POLLIN, -1);
|
2017-06-09 05:40:27 +02:00
|
|
|
thread_array = master->read;
|
2017-06-07 22:34:09 +02:00
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_WRITE:
|
2022-12-10 15:08:37 +01:00
|
|
|
event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
|
2017-06-09 05:40:27 +02:00
|
|
|
thread_array = master->write;
|
2017-06-07 22:34:09 +02:00
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_TIMER:
|
2022-12-11 17:27:26 +01:00
|
|
|
event_timer_list_del(&master->timer, thread);
|
2017-06-07 22:34:09 +02:00
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_EVENT:
|
2017-06-09 05:40:27 +02:00
|
|
|
list = &master->event;
|
2017-06-07 22:34:09 +02:00
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_READY:
|
2017-06-09 05:40:27 +02:00
|
|
|
list = &master->ready;
|
2017-06-07 22:34:09 +02:00
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_UNUSED:
|
|
|
|
case EVENT_EXECUTE:
|
2017-06-07 22:34:09 +02:00
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
if (list)
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_del(list, thread);
|
2023-03-23 12:37:28 +01:00
|
|
|
else if (thread_array)
|
2017-06-07 22:34:09 +02:00
|
|
|
thread_array[thread->u.fd] = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
if (thread->ref)
|
|
|
|
*thread->ref = NULL;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
thread_add_unuse(thread->master, thread);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Delete and free all cancellation requests */
|
2020-06-11 17:16:02 +02:00
|
|
|
if (master->cancel_req)
|
|
|
|
list_delete_all_node(master->cancel_req);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:08:37 +01:00
|
|
|
/* Wake up any threads which may be blocked in event_cancel_async() */
|
2017-06-09 05:40:27 +02:00
|
|
|
master->canceled = true;
|
2017-06-07 22:34:09 +02:00
|
|
|
pthread_cond_broadcast(&master->cancel_cond);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2021-01-27 20:32:22 +01:00
|
|
|
/*
|
|
|
|
* Helper function used for multiple flavors of arg-based cancellation.
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
static void cancel_event_helper(struct event_loop *m, void *arg, int flags)
|
2021-01-27 20:32:22 +01:00
|
|
|
{
|
|
|
|
struct cancel_req *cr;
|
|
|
|
|
|
|
|
assert(m->owner == pthread_self());
|
|
|
|
|
|
|
|
/* Only worth anything if caller supplies an arg. */
|
|
|
|
if (arg == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cr = XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
|
|
|
|
|
|
|
cr->flags = flags;
|
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2021-01-27 20:32:22 +01:00
|
|
|
cr->eventobj = arg;
|
|
|
|
listnode_add(m->cancel_req, cr);
|
2022-12-10 15:08:37 +01:00
|
|
|
do_event_cancel(m);
|
2021-01-27 20:32:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/**
|
|
|
|
* Cancel any events which have the specified argument.
|
|
|
|
*
|
|
|
|
* MT-Unsafe
|
|
|
|
*
|
2023-03-04 15:14:52 +01:00
|
|
|
* @param m the event_master to cancel from
|
2017-06-07 22:34:09 +02:00
|
|
|
* @param arg the argument passed when creating the event
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_cancel_event(struct event_loop *master, void *arg)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2021-01-27 20:32:22 +01:00
|
|
|
cancel_event_helper(master, arg, 0);
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2021-01-27 20:32:22 +01:00
|
|
|
/*
|
|
|
|
* Cancel ready tasks with an arg matching 'arg'
|
|
|
|
*
|
|
|
|
* MT-Unsafe
|
|
|
|
*
|
2023-03-04 15:14:52 +01:00
|
|
|
* @param m the event_master to cancel from
|
2021-01-27 20:32:22 +01:00
|
|
|
* @param arg the argument passed when creating the event
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_cancel_event_ready(struct event_loop *m, void *arg)
|
2021-01-27 20:32:22 +01:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Only cancel ready/event tasks */
|
2022-12-10 15:08:37 +01:00
|
|
|
cancel_event_helper(m, arg, EVENT_CANCEL_FLAG_READY);
|
2017-06-07 22:34:09 +02:00
|
|
|
}
|
2017-03-03 20:01:49 +01:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/**
|
|
|
|
* Cancel a specific task.
|
|
|
|
*
|
|
|
|
* MT-Unsafe
|
|
|
|
*
|
|
|
|
* @param thread task to cancel
|
|
|
|
*/
|
2022-12-10 15:08:37 +01:00
|
|
|
void event_cancel(struct event **thread)
|
2017-06-07 22:34:09 +02:00
|
|
|
{
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master;
|
2020-07-06 18:55:03 +02:00
|
|
|
|
|
|
|
if (thread == NULL || *thread == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
master = (*thread)->master;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:08:37 +01:00
|
|
|
frrtrace(9, frr_libfrr, event_cancel, master, (*thread)->xref->funcname,
|
|
|
|
(*thread)->xref->xref.file, (*thread)->xref->xref.line, NULL,
|
|
|
|
(*thread)->u.fd, (*thread)->u.val, (*thread)->arg,
|
|
|
|
(*thread)->u.sands.tv_sec);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2018-08-23 02:59:46 +02:00
|
|
|
assert(master->owner == pthread_self());
|
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&master->mtx) {
|
2017-06-07 22:34:09 +02:00
|
|
|
struct cancel_req *cr =
|
|
|
|
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
2020-07-06 18:55:03 +02:00
|
|
|
cr->thread = *thread;
|
2018-08-23 02:59:46 +02:00
|
|
|
listnode_add(master->cancel_req, cr);
|
2022-12-10 15:08:37 +01:00
|
|
|
do_event_cancel(master);
|
2020-07-06 18:55:03 +02:00
|
|
|
|
2023-07-11 22:03:38 +02:00
|
|
|
*thread = NULL;
|
|
|
|
}
|
2017-06-07 22:34:09 +02:00
|
|
|
}
|
2017-03-03 20:01:49 +01:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/**
|
|
|
|
* Asynchronous cancellation.
|
|
|
|
*
|
2022-03-01 22:18:12 +01:00
|
|
|
* Called with either a struct event ** or void * to an event argument,
|
2017-06-09 05:40:27 +02:00
|
|
|
* this function posts the correct cancellation request and blocks until it is
|
|
|
|
* serviced.
|
2017-06-07 22:34:09 +02:00
|
|
|
*
|
|
|
|
* If the thread is currently running, execution blocks until it completes.
|
|
|
|
*
|
2017-06-09 05:40:27 +02:00
|
|
|
* The last two parameters are mutually exclusive, i.e. if you pass one the
|
|
|
|
* other must be NULL.
|
|
|
|
*
|
2023-03-04 15:14:52 +01:00
|
|
|
* When the cancellation procedure executes on the target event_master, the
|
2017-06-09 05:40:27 +02:00
|
|
|
* thread * provided is checked for nullity. If it is null, the thread is
|
|
|
|
* assumed to no longer exist and the cancellation request is a no-op. Thus
|
|
|
|
* users of this API must pass a back-reference when scheduling the original
|
|
|
|
* task.
|
|
|
|
*
|
2017-06-07 22:34:09 +02:00
|
|
|
* MT-Safe
|
|
|
|
*
|
2017-06-09 05:40:27 +02:00
|
|
|
* @param master the thread master with the relevant event / task
|
|
|
|
* @param thread pointer to thread to cancel
|
|
|
|
* @param eventobj the event
|
2017-06-07 22:34:09 +02:00
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
void event_cancel_async(struct event_loop *master, struct event **thread,
|
2022-12-10 15:08:37 +01:00
|
|
|
void *eventobj)
|
2017-06-07 22:34:09 +02:00
|
|
|
{
|
|
|
|
assert(!(thread && eventobj) && (thread || eventobj));
|
2020-09-15 00:04:33 +02:00
|
|
|
|
|
|
|
if (thread && *thread)
|
2022-12-10 15:08:37 +01:00
|
|
|
frrtrace(9, frr_libfrr, event_cancel_async, master,
|
2021-02-02 21:05:50 +01:00
|
|
|
(*thread)->xref->funcname, (*thread)->xref->xref.file,
|
|
|
|
(*thread)->xref->xref.line, NULL, (*thread)->u.fd,
|
2020-09-29 00:13:27 +02:00
|
|
|
(*thread)->u.val, (*thread)->arg,
|
|
|
|
(*thread)->u.sands.tv_sec);
|
2020-09-15 00:04:33 +02:00
|
|
|
else
|
2022-12-10 15:08:37 +01:00
|
|
|
frrtrace(9, frr_libfrr, event_cancel_async, master, NULL, NULL,
|
2020-09-29 00:13:27 +02:00
|
|
|
0, NULL, 0, 0, eventobj, 0);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
assert(master->owner != pthread_self());
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&master->mtx) {
|
2017-06-09 05:40:27 +02:00
|
|
|
master->canceled = false;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
if (thread) {
|
|
|
|
struct cancel_req *cr =
|
|
|
|
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
|
|
|
cr->threadref = thread;
|
|
|
|
listnode_add(master->cancel_req, cr);
|
|
|
|
} else if (eventobj) {
|
|
|
|
struct cancel_req *cr =
|
|
|
|
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
|
|
|
|
cr->eventobj = eventobj;
|
|
|
|
listnode_add(master->cancel_req, cr);
|
|
|
|
}
|
2017-06-07 22:34:09 +02:00
|
|
|
AWAKEN(master);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
while (!master->canceled)
|
|
|
|
pthread_cond_wait(&master->cancel_cond, &master->mtx);
|
2017-03-03 20:01:49 +01:00
|
|
|
}
|
2020-07-17 23:09:51 +02:00
|
|
|
|
|
|
|
if (thread)
|
|
|
|
*thread = NULL;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2017-06-07 22:34:09 +02:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
static struct timeval *thread_timer_wait(struct event_timer_list_head *timers,
|
2013-11-19 15:11:42 +01:00
|
|
|
struct timeval *timer_val)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-12-11 17:27:26 +01:00
|
|
|
if (!event_timer_list_count(timers))
|
2019-01-31 02:30:35 +01:00
|
|
|
return NULL;
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
struct event *next_timer = event_timer_list_first(timers);
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2019-01-31 02:30:35 +01:00
|
|
|
monotime_until(&next_timer->u.sands, timer_val);
|
|
|
|
return timer_val;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static struct event *thread_run(struct event_loop *m, struct event *thread,
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *fetch)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
|
|
|
*fetch = *thread;
|
|
|
|
thread_add_unuse(m, thread);
|
|
|
|
return fetch;
|
|
|
|
}
|
|
|
|
|
2023-03-07 20:14:41 +01:00
|
|
|
static int thread_process_io_helper(struct event_loop *m, struct event *thread,
|
|
|
|
short state, short actual_state, int pos)
|
2016-03-04 04:52:12 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event **thread_array;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2019-06-13 03:13:18 +02:00
|
|
|
/*
|
|
|
|
* poll() clears the .events field, but the pollfd array we
|
|
|
|
* pass to poll() is a copy of the one used to schedule threads.
|
|
|
|
* We need to synchronize state between the two here by applying
|
|
|
|
* the same changes poll() made on the copy of the "real" pollfd
|
|
|
|
* array.
|
|
|
|
*
|
|
|
|
* This cleans up a possible infinite loop where we refuse
|
|
|
|
* to respond to a poll event but poll is insistent that
|
|
|
|
* we should.
|
|
|
|
*/
|
|
|
|
m->handler.pfds[pos].events &= ~(state);
|
|
|
|
|
|
|
|
if (!thread) {
|
|
|
|
if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP)
|
|
|
|
flog_err(EC_LIB_NO_THREAD,
|
2021-02-14 15:35:07 +01:00
|
|
|
"Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!",
|
2019-06-13 03:13:18 +02:00
|
|
|
m->handler.pfds[pos].fd, actual_state);
|
2016-03-04 04:52:12 +01:00
|
|
|
return 0;
|
2019-06-13 03:13:18 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
if (thread->type == EVENT_READ)
|
2016-03-04 07:28:29 +01:00
|
|
|
thread_array = m->read;
|
2016-03-04 04:52:12 +01:00
|
|
|
else
|
2016-03-04 07:28:29 +01:00
|
|
|
thread_array = m->write;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
thread_array[thread->u.fd] = NULL;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_add_tail(&m->ready, thread);
|
2022-12-10 15:28:31 +01:00
|
|
|
thread->type = EVENT_READY;
|
2019-06-13 03:13:18 +02:00
|
|
|
|
2017-05-10 20:09:49 +02:00
|
|
|
return 1;
|
2016-03-04 04:52:12 +01:00
|
|
|
}
|
|
|
|
|
2023-11-02 17:02:43 +01:00
|
|
|
static inline void thread_process_io_inner_loop(struct event_loop *m,
|
|
|
|
unsigned int num,
|
|
|
|
struct pollfd *pfds, nfds_t *i,
|
|
|
|
uint32_t *ready)
|
|
|
|
{
|
|
|
|
/* no event for current fd? immediately continue */
|
|
|
|
if (pfds[*i].revents == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
*ready = *ready + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unless someone has called event_cancel from another
|
|
|
|
* pthread, the only thing that could have changed in
|
|
|
|
* m->handler.pfds while we were asleep is the .events
|
|
|
|
* field in a given pollfd. Barring event_cancel() that
|
|
|
|
* value should be a superset of the values we have in our
|
|
|
|
* copy, so there's no need to update it. Similarily,
|
|
|
|
* barring deletion, the fd should still be a valid index
|
|
|
|
* into the master's pfds.
|
|
|
|
*
|
|
|
|
* We are including POLLERR here to do a READ event
|
|
|
|
* this is because the read should fail and the
|
|
|
|
* read function should handle it appropriately
|
|
|
|
*/
|
|
|
|
if (pfds[*i].revents & (POLLIN | POLLHUP | POLLERR)) {
|
|
|
|
thread_process_io_helper(m, m->read[pfds[*i].fd], POLLIN,
|
|
|
|
pfds[*i].revents, *i);
|
|
|
|
}
|
|
|
|
if (pfds[*i].revents & POLLOUT)
|
|
|
|
thread_process_io_helper(m, m->write[pfds[*i].fd], POLLOUT,
|
|
|
|
pfds[*i].revents, *i);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if one of our file descriptors is garbage, remove the same
|
|
|
|
* from both pfds + update sizes and index
|
|
|
|
*/
|
|
|
|
if (pfds[*i].revents & POLLNVAL) {
|
|
|
|
memmove(m->handler.pfds + *i, m->handler.pfds + *i + 1,
|
|
|
|
(m->handler.pfdcount - *i - 1) * sizeof(struct pollfd));
|
|
|
|
m->handler.pfdcount--;
|
|
|
|
m->handler.pfds[m->handler.pfdcount].fd = 0;
|
|
|
|
m->handler.pfds[m->handler.pfdcount].events = 0;
|
|
|
|
|
|
|
|
memmove(pfds + *i, pfds + *i + 1,
|
|
|
|
(m->handler.copycount - *i - 1) * sizeof(struct pollfd));
|
|
|
|
m->handler.copycount--;
|
|
|
|
m->handler.copy[m->handler.copycount].fd = 0;
|
|
|
|
m->handler.copy[m->handler.copycount].events = 0;
|
|
|
|
|
|
|
|
*i = *i - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-09 05:40:27 +02:00
|
|
|
/**
|
|
|
|
* Process I/O events.
|
|
|
|
*
|
|
|
|
* Walks through file descriptor array looking for those pollfds whose .revents
|
|
|
|
* field has something interesting. Deletes any invalid file descriptors.
|
|
|
|
*
|
2023-11-02 21:46:27 +01:00
|
|
|
* Try to impart some impartiality to handling of io. The event
|
|
|
|
* system will cycle through the fd's available for io
|
|
|
|
* giving each one a chance to go first.
|
|
|
|
*
|
2017-06-09 05:40:27 +02:00
|
|
|
* @param m the thread master
|
|
|
|
* @param num the number of active file descriptors (return value of poll())
|
|
|
|
*/
|
2023-03-07 20:14:41 +01:00
|
|
|
static void thread_process_io(struct event_loop *m, unsigned int num)
|
2016-03-04 07:28:29 +01:00
|
|
|
{
|
2017-05-10 20:09:49 +02:00
|
|
|
unsigned int ready = 0;
|
2017-06-09 05:40:27 +02:00
|
|
|
struct pollfd *pfds = m->handler.copy;
|
2023-11-02 21:46:27 +01:00
|
|
|
nfds_t i, last_read = m->last_read % m->handler.copycount;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2023-11-02 21:46:27 +01:00
|
|
|
for (i = last_read; i < m->handler.copycount && ready < num; ++i)
|
|
|
|
thread_process_io_inner_loop(m, num, pfds, &i, &ready);
|
|
|
|
|
|
|
|
for (i = 0; i < last_read && ready < num; ++i)
|
|
|
|
thread_process_io_inner_loop(m, num, pfds, &i, &ready);
|
|
|
|
|
|
|
|
m->last_read++;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
/* Add all timers that have popped to the ready list. */
|
2023-03-07 20:14:41 +01:00
|
|
|
static unsigned int thread_process_timers(struct event_loop *m,
|
2017-05-10 20:09:49 +02:00
|
|
|
struct timeval *timenow)
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
{
|
2021-02-03 15:13:59 +01:00
|
|
|
struct timeval prev = *timenow;
|
|
|
|
bool displayed = false;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
unsigned int ready = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
while ((thread = event_timer_list_first(&m->timer))) {
|
2017-01-17 22:57:57 +01:00
|
|
|
if (timercmp(timenow, &thread->u.sands, <))
|
2021-02-18 15:38:19 +01:00
|
|
|
break;
|
2021-02-03 15:13:59 +01:00
|
|
|
prev = thread->u.sands;
|
|
|
|
prev.tv_sec += 4;
|
|
|
|
/*
|
|
|
|
* If the timer would have popped 4 seconds in the
|
|
|
|
* past then we are in a situation where we are
|
|
|
|
* really getting behind on handling of events.
|
|
|
|
* Let's log it and do the right thing with it.
|
|
|
|
*/
|
2022-02-10 20:10:26 +01:00
|
|
|
if (timercmp(timenow, &prev, >)) {
|
|
|
|
atomic_fetch_add_explicit(
|
|
|
|
&thread->hist->total_starv_warn, 1,
|
|
|
|
memory_order_seq_cst);
|
|
|
|
if (!displayed && !thread->ignore_timer_late) {
|
|
|
|
flog_warn(
|
|
|
|
EC_LIB_STARVE_THREAD,
|
|
|
|
"Thread Starvation: %pTHD was scheduled to pop greater than 4s ago",
|
|
|
|
thread);
|
|
|
|
displayed = true;
|
|
|
|
}
|
2021-02-03 15:13:59 +01:00
|
|
|
}
|
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
event_timer_list_pop(&m->timer);
|
2022-12-10 15:28:31 +01:00
|
|
|
thread->type = EVENT_READY;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_add_tail(&m->ready, thread);
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
ready++;
|
|
|
|
}
|
2021-02-18 15:38:19 +01:00
|
|
|
|
2005-04-22 Paul Jakma <paul.jakma@sun.com>
* thread.h: Add background thread type and thread_add_background
macro and accompanying funcname_... function.
export thread_should_yield, background threads can use it.
Lower thread yield time to 10ms, 100ms is noticeable lag and
a thread would only be /starting/ to finish sometime afterward.
* thread.c: (general) Add background thread type and schedule
nearly all thread types through the ready list for fairness.
(timeval_adjust) static qualifier missing
(vty_out_cpu_thread_history) add support for printout of
background threads
(show_thread_cpu) ditto.
(thread_master_debug) add debug of background list
(thread_master_create) fixup long line
(thread_add_unuse) add asserts for required state.
(thread_master_free) free background thread list
(funcname_thread_add_timer_timeval) make generic, able to
support arbitrary timer-like thread types.
(funcname_thread_add_timer) pass thread type to .._add_timer_timeval
(funcname_thread_add_timer_msec) ditto
(funcname_thread_add_background) Add a background thread, with an
optional millisecond delay factor, using .._add_timer_timeval.
(thread_cancel) Add background thread type.
Move the thread_list_delete common to all cases to bottom of
function, after the switch statement..
(thread_cancel_event) indent
(thread_timer_wait) Static qualifier, and make it able to cope
with arbitrary timer-like thread lists, so its of use to
background threads too.
(thread_process_fd) static qualifier. Again, make it take a list
reference rather than thread_master. Fix indentation.
(thread_timer_process) Check for ready timer-like threads in the
given list and move them on to the ready list - code originally
embedded in thread_fetch.
(thread_fetch) Schedule all threads, other than events, through
the ready list, to ensure fairness. Timer readying code moved to
thread_timer_process so it can be reused for background threads.
Remove the unneeded quagga_sigevent_process, as pointed out by
John Lin <john.ch.lin@gmail.com>.
(thread_should_yield) make this available.
2005-04-22 02:43:47 +02:00
|
|
|
return ready;
|
|
|
|
}
|
|
|
|
|
2010-01-11 17:33:07 +01:00
|
|
|
/* process a list en masse, e.g. for event thread lists */
|
2022-12-11 17:27:26 +01:00
|
|
|
static unsigned int thread_process(struct event_list_head *list)
|
2010-01-11 17:33:07 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2010-01-11 17:33:07 +01:00
|
|
|
unsigned int ready = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
while ((thread = event_list_pop(list))) {
|
2022-12-10 15:28:31 +01:00
|
|
|
thread->type = EVENT_READY;
|
2022-12-11 17:27:26 +01:00
|
|
|
event_list_add_tail(&thread->master->ready, thread);
|
2010-01-11 17:33:07 +01:00
|
|
|
ready++;
|
|
|
|
}
|
|
|
|
return ready;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Fetch next ready thread. */
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event *event_fetch(struct event_loop *m, struct event *fetch)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread = NULL;
|
2017-01-23 18:45:43 +01:00
|
|
|
struct timeval now;
|
2017-06-09 05:40:27 +02:00
|
|
|
struct timeval zerotime = {0, 0};
|
2017-06-14 19:06:10 +02:00
|
|
|
struct timeval tv;
|
2017-06-30 16:29:14 +02:00
|
|
|
struct timeval *tw = NULL;
|
2020-09-21 21:57:59 +02:00
|
|
|
bool eintr_p = false;
|
2017-06-07 22:34:09 +02:00
|
|
|
int num = 0;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
do {
|
2017-06-09 05:40:27 +02:00
|
|
|
/* Handle signals if any */
|
2017-06-07 22:34:09 +02:00
|
|
|
if (m->handle_signals)
|
2021-11-11 20:28:54 +01:00
|
|
|
frr_sigevent_process();
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
pthread_mutex_lock(&m->mtx);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Process any pending cancellation requests */
|
2022-12-10 15:08:37 +01:00
|
|
|
do_event_cancel(m);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-29 20:58:43 +02:00
|
|
|
/*
|
|
|
|
* Attempt to flush ready queue before going into poll().
|
|
|
|
* This is performance-critical. Think twice before modifying.
|
|
|
|
*/
|
2022-12-11 17:27:26 +01:00
|
|
|
if ((thread = event_list_pop(&m->ready))) {
|
2017-08-29 20:58:43 +02:00
|
|
|
fetch = thread_run(m, thread, fetch);
|
|
|
|
if (fetch->ref)
|
|
|
|
*fetch->ref = NULL;
|
|
|
|
pthread_mutex_unlock(&m->mtx);
|
2021-02-18 01:58:19 +01:00
|
|
|
if (!m->ready_run_loop)
|
|
|
|
GETRUSAGE(&m->last_getrusage);
|
|
|
|
m->ready_run_loop = true;
|
2017-08-29 20:58:43 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-02-18 01:58:19 +01:00
|
|
|
m->ready_run_loop = false;
|
2017-08-29 20:58:43 +02:00
|
|
|
/* otherwise, tick through scheduling sequence */
|
|
|
|
|
2017-08-03 17:19:48 +02:00
|
|
|
/*
|
|
|
|
* Post events to ready queue. This must come before the
|
|
|
|
* following block since events should occur immediately
|
|
|
|
*/
|
2017-06-07 22:34:09 +02:00
|
|
|
thread_process(&m->event);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-03 17:19:48 +02:00
|
|
|
/*
|
|
|
|
* If there are no tasks on the ready queue, we will poll()
|
|
|
|
* until a timer expires or we receive I/O, whichever comes
|
|
|
|
* first. The strategy for doing this is:
|
2017-06-14 19:06:10 +02:00
|
|
|
*
|
|
|
|
* - If there are events pending, set the poll() timeout to zero
|
|
|
|
* - If there are no events pending, but there are timers
|
2020-08-12 15:46:44 +02:00
|
|
|
* pending, set the timeout to the smallest remaining time on
|
|
|
|
* any timer.
|
2017-06-14 19:06:10 +02:00
|
|
|
* - If there are neither timers nor events pending, but there
|
2020-08-12 15:46:44 +02:00
|
|
|
* are file descriptors pending, block indefinitely in poll()
|
2017-06-14 19:06:10 +02:00
|
|
|
* - If nothing is pending, it's time for the application to die
|
|
|
|
*
|
|
|
|
* In every case except the last, we need to hit poll() at least
|
2017-08-03 17:19:48 +02:00
|
|
|
* once per loop to avoid starvation by events
|
|
|
|
*/
|
2022-12-11 17:27:26 +01:00
|
|
|
if (!event_list_count(&m->ready))
|
2019-01-31 02:30:35 +01:00
|
|
|
tw = thread_timer_wait(&m->timer, &tv);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
if (event_list_count(&m->ready) ||
|
|
|
|
(tw && !timercmp(tw, &zerotime, >)))
|
2017-06-14 19:06:10 +02:00
|
|
|
tw = &zerotime;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-14 19:06:10 +02:00
|
|
|
if (!tw && m->handler.pfdcount == 0) { /* die */
|
|
|
|
pthread_mutex_unlock(&m->mtx);
|
|
|
|
fetch = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-03 17:19:48 +02:00
|
|
|
/*
|
|
|
|
* Copy pollfd array + # active pollfds in it. Not necessary to
|
|
|
|
* copy the array size as this is fixed.
|
|
|
|
*/
|
2017-06-14 19:06:10 +02:00
|
|
|
m->handler.copycount = m->handler.pfdcount;
|
|
|
|
memcpy(m->handler.copy, m->handler.pfds,
|
|
|
|
m->handler.copycount * sizeof(struct pollfd));
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-29 20:58:43 +02:00
|
|
|
pthread_mutex_unlock(&m->mtx);
|
|
|
|
{
|
2020-09-21 21:57:59 +02:00
|
|
|
eintr_p = false;
|
|
|
|
num = fd_poll(m, tw, &eintr_p);
|
2017-08-29 20:58:43 +02:00
|
|
|
}
|
|
|
|
pthread_mutex_lock(&m->mtx);
|
2017-08-02 20:15:40 +02:00
|
|
|
|
2017-08-29 20:58:43 +02:00
|
|
|
/* Handle any errors received in poll() */
|
|
|
|
if (num < 0) {
|
2020-09-21 21:57:59 +02:00
|
|
|
if (eintr_p) {
|
2017-06-14 19:06:10 +02:00
|
|
|
pthread_mutex_unlock(&m->mtx);
|
2017-08-29 20:58:43 +02:00
|
|
|
/* loop around to signal handler */
|
|
|
|
continue;
|
2017-06-14 19:06:10 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-08-29 20:58:43 +02:00
|
|
|
/* else die */
|
2018-09-13 21:34:28 +02:00
|
|
|
flog_err(EC_LIB_SYSTEM_CALL, "poll() error: %s",
|
2018-08-20 15:45:06 +02:00
|
|
|
safe_strerror(errno));
|
2017-08-29 20:58:43 +02:00
|
|
|
pthread_mutex_unlock(&m->mtx);
|
|
|
|
fetch = NULL;
|
|
|
|
break;
|
2017-08-03 17:19:48 +02:00
|
|
|
}
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Post timers to ready queue. */
|
|
|
|
monotime(&now);
|
2021-02-18 15:38:19 +01:00
|
|
|
thread_process_timers(m, &now);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
/* Post I/O to ready queue. */
|
|
|
|
if (num > 0)
|
2017-06-09 05:40:27 +02:00
|
|
|
thread_process_io(m, num);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
pthread_mutex_unlock(&m->mtx);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
} while (!thread && m->spin);
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-07 22:34:09 +02:00
|
|
|
return fetch;
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
unsigned long event_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
|
|
|
|
unsigned long *cputime)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2021-04-13 20:49:26 +02:00
|
|
|
#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
|
2022-03-01 15:02:33 +01:00
|
|
|
|
|
|
|
#ifdef __FreeBSD__
|
|
|
|
/*
|
|
|
|
* FreeBSD appears to have an issue when calling clock_gettime
|
|
|
|
* with CLOCK_THREAD_CPUTIME_ID really close to each other
|
|
|
|
* occassionally the now time will be before the start time.
|
|
|
|
* This is not good and FRR is ending up with CPU HOG's
|
|
|
|
* when the subtraction wraps to very large numbers
|
|
|
|
*
|
|
|
|
* What we are going to do here is cheat a little bit
|
|
|
|
* and notice that this is a problem and just correct
|
|
|
|
* it so that it is impossible to happen
|
|
|
|
*/
|
|
|
|
if (start->cpu.tv_sec == now->cpu.tv_sec &&
|
|
|
|
start->cpu.tv_nsec > now->cpu.tv_nsec)
|
|
|
|
now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
|
|
|
|
else if (start->cpu.tv_sec > now->cpu.tv_sec) {
|
|
|
|
now->cpu.tv_sec = start->cpu.tv_sec;
|
|
|
|
now->cpu.tv_nsec = start->cpu.tv_nsec + 1;
|
|
|
|
}
|
|
|
|
#endif
|
2021-04-13 20:49:26 +02:00
|
|
|
*cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
|
|
|
|
+ (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
|
|
|
|
#else
|
2002-12-13 21:15:29 +01:00
|
|
|
/* This is 'user + sys' time. */
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
*cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime)
|
|
|
|
+ timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime);
|
2021-04-13 20:49:26 +02:00
|
|
|
#endif
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
return timeval_elapsed(now->real, start->real);
|
|
|
|
}
|
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
/*
|
|
|
|
* We should aim to yield after yield milliseconds, which defaults
|
|
|
|
* to EVENT_YIELD_TIME_SLOT .
|
|
|
|
* Note: we are using real (wall clock) time for this calculation.
|
|
|
|
* It could be argued that CPU time may make more sense in certain
|
|
|
|
* contexts. The things to consider are whether the thread may have
|
|
|
|
* blocked (in which case wall time increases, but CPU time does not),
|
|
|
|
* or whether the system is heavily loaded with other processes competing
|
|
|
|
* for CPU time. On balance, wall clock time seems to make sense.
|
|
|
|
* Plus it has the added benefit that gettimeofday should be faster
|
|
|
|
* than calling getrusage.
|
|
|
|
*/
|
2022-12-11 16:39:12 +01:00
|
|
|
int event_should_yield(struct event *thread)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2017-03-03 20:01:49 +01:00
|
|
|
int result;
|
2023-03-23 12:37:28 +01:00
|
|
|
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2017-03-03 20:01:49 +01:00
|
|
|
result = monotime_since(&thread->real, NULL)
|
|
|
|
> (int64_t)thread->yield;
|
|
|
|
}
|
|
|
|
return result;
|
2015-05-20 02:58:10 +02:00
|
|
|
}
|
|
|
|
|
2022-12-11 16:39:12 +01:00
|
|
|
void event_set_yield_time(struct event *thread, unsigned long yield_time)
|
2015-05-20 02:58:10 +02:00
|
|
|
{
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2017-03-03 20:01:49 +01:00
|
|
|
thread->yield = yield_time;
|
|
|
|
}
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
void event_getrusage(RUSAGE_T *r)
|
[lib] Bug #134: threads should be more robust against backward time jumps
2006-08-25 Paul Jakma <paul.jakma@sun.com>
* thread.c: (general) Add support for monotonic clock, it may still
jump forward by huge amounts, but should be immune to going
backwards. Fixes bug #134.
(quagga_gettimeofday_relative_adjust) helper, does what name
says - adjusts gettimeofday based relative timer.
(quagga_gettimeofday) helper to keep recent_time up to date.
(quagga_get_relative) helper, update and getch the relative
timer using gettimeofday(). POSIX CLOCK_MONOTONIC is also
supported, but the code is not enabled yet nor tested.
(quagga_real_stabilised) helper, retrieve absolute time but
stabilised so as to never decrease.
(quagga_gettime) Exported interface, analogous to POSIX
clock_gettime() in interface, supporting several clocks.
(quagga_time) Exported interface, analogous to traditional
time(), will never decrease.
(recent_relative_time) Convenience function to retrieve
relative_time timeval, similar to existing recent_time absolute
timeval, for when an approximately recent value will do.
(remainder) Update to use above helpers.
(thread_getrusage) Previously was a macro, but needs to be
a function to twiddle with thread.c private stuff.
* thread.c: Point the GETRUSAGE macro at previous function.
Export quagga_gettime, quagga_time and recent_relative_time for
general use.
2006-08-27 08:44:02 +02:00
|
|
|
{
|
2021-04-13 20:49:26 +02:00
|
|
|
monotime(&r->real);
|
|
|
|
if (!cputime_enabled) {
|
|
|
|
memset(&r->cpu, 0, sizeof(r->cpu));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
|
|
|
|
/* not currently implemented in Linux's vDSO, but maybe at some point
|
|
|
|
* in the future?
|
|
|
|
*/
|
|
|
|
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &r->cpu);
|
|
|
|
#else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
|
2019-01-09 18:18:21 +01:00
|
|
|
#if defined RUSAGE_THREAD
|
|
|
|
#define FRR_RUSAGE RUSAGE_THREAD
|
|
|
|
#else
|
|
|
|
#define FRR_RUSAGE RUSAGE_SELF
|
|
|
|
#endif
|
2021-04-13 20:49:26 +02:00
|
|
|
getrusage(FRR_RUSAGE, &(r->cpu));
|
|
|
|
#endif
|
[lib] Bug #134: threads should be more robust against backward time jumps
2006-08-25 Paul Jakma <paul.jakma@sun.com>
* thread.c: (general) Add support for monotonic clock, it may still
jump forward by huge amounts, but should be immune to going
backwards. Fixes bug #134.
(quagga_gettimeofday_relative_adjust) helper, does what name
says - adjusts gettimeofday based relative timer.
(quagga_gettimeofday) helper to keep recent_time up to date.
(quagga_get_relative) helper, update and getch the relative
timer using gettimeofday(). POSIX CLOCK_MONOTONIC is also
supported, but the code is not enabled yet nor tested.
(quagga_real_stabilised) helper, retrieve absolute time but
stabilised so as to never decrease.
(quagga_gettime) Exported interface, analogous to POSIX
clock_gettime() in interface, supporting several clocks.
(quagga_time) Exported interface, analogous to traditional
time(), will never decrease.
(recent_relative_time) Convenience function to retrieve
relative_time timeval, similar to existing recent_time absolute
timeval, for when an approximately recent value will do.
(remainder) Update to use above helpers.
(thread_getrusage) Previously was a macro, but needs to be
a function to twiddle with thread.c private stuff.
* thread.c: Point the GETRUSAGE macro at previous function.
Export quagga_gettime, quagga_time and recent_relative_time for
general use.
2006-08-27 08:44:02 +02:00
|
|
|
}
|
|
|
|
|
2018-04-20 23:27:16 +02:00
|
|
|
/*
|
|
|
|
* Call a thread.
|
|
|
|
*
|
|
|
|
* This function will atomically update the thread's usage history. At present
|
|
|
|
* this is the only spot where usage history is written. Nevertheless the code
|
|
|
|
* has been written such that the introduction of writers in the future should
|
|
|
|
* not need to update it provided the writers atomically perform only the
|
|
|
|
* operations done here, i.e. updating the total and maximum times. In
|
|
|
|
* particular, the maximum real and cpu times must be monotonically increasing
|
|
|
|
* or this code is not correct.
|
|
|
|
*/
|
2022-12-11 13:51:16 +01:00
|
|
|
void event_call(struct event *thread)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2012-05-07 18:53:12 +02:00
|
|
|
RUSAGE_T before, after;
|
2023-10-09 11:03:50 +02:00
|
|
|
bool suppress_warnings = EVENT_ARG(thread);
|
2006-07-25 22:40:40 +02:00
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
/* if the thread being called is the CLI, it may change cputime_enabled
|
|
|
|
* ("service cputime-stats" command), which can result in nonsensical
|
|
|
|
* and very confusing warnings
|
|
|
|
*/
|
|
|
|
bool cputime_enabled_here = cputime_enabled;
|
|
|
|
|
2021-02-18 01:58:19 +01:00
|
|
|
if (thread->master->ready_run_loop)
|
|
|
|
before = thread->master->last_getrusage;
|
|
|
|
else
|
|
|
|
GETRUSAGE(&before);
|
|
|
|
|
2012-05-07 18:53:12 +02:00
|
|
|
thread->real = before.real;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2022-12-11 13:51:16 +01:00
|
|
|
frrtrace(9, frr_libfrr, event_call, thread->master,
|
2021-02-02 21:05:50 +01:00
|
|
|
thread->xref->funcname, thread->xref->xref.file,
|
2022-12-11 13:51:16 +01:00
|
|
|
thread->xref->xref.line, NULL, thread->u.fd, thread->u.val,
|
|
|
|
thread->arg, thread->u.sands.tv_sec);
|
2020-09-15 00:04:33 +02:00
|
|
|
|
2017-06-15 18:05:19 +02:00
|
|
|
pthread_setspecific(thread_current, thread);
|
2002-12-13 21:15:29 +01:00
|
|
|
(*thread->func)(thread);
|
2017-06-15 18:05:19 +02:00
|
|
|
pthread_setspecific(thread_current, NULL);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2012-05-07 18:53:12 +02:00
|
|
|
GETRUSAGE(&after);
|
2021-02-18 01:58:19 +01:00
|
|
|
thread->master->last_getrusage = after;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
unsigned long walltime, cputime;
|
|
|
|
unsigned long exp;
|
2018-04-20 23:27:16 +02:00
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
walltime = event_consumed_time(&after, &before, &cputime);
|
2021-04-13 20:38:09 +02:00
|
|
|
|
|
|
|
/* update walltime */
|
|
|
|
atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
|
2018-04-20 23:27:16 +02:00
|
|
|
memory_order_seq_cst);
|
|
|
|
exp = atomic_load_explicit(&thread->hist->real.max,
|
|
|
|
memory_order_seq_cst);
|
2021-04-13 20:38:09 +02:00
|
|
|
while (exp < walltime
|
2018-04-20 23:27:16 +02:00
|
|
|
&& !atomic_compare_exchange_weak_explicit(
|
2021-04-13 20:38:09 +02:00
|
|
|
&thread->hist->real.max, &exp, walltime,
|
|
|
|
memory_order_seq_cst, memory_order_seq_cst))
|
2018-04-20 23:27:16 +02:00
|
|
|
;
|
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
if (cputime_enabled_here && cputime_enabled) {
|
|
|
|
/* update cputime */
|
|
|
|
atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
|
|
|
|
memory_order_seq_cst);
|
|
|
|
exp = atomic_load_explicit(&thread->hist->cpu.max,
|
|
|
|
memory_order_seq_cst);
|
|
|
|
while (exp < cputime
|
|
|
|
&& !atomic_compare_exchange_weak_explicit(
|
|
|
|
&thread->hist->cpu.max, &exp, cputime,
|
|
|
|
memory_order_seq_cst, memory_order_seq_cst))
|
|
|
|
;
|
|
|
|
}
|
2018-04-20 23:27:16 +02:00
|
|
|
|
|
|
|
atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
|
|
|
|
memory_order_seq_cst);
|
|
|
|
atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
|
|
|
|
memory_order_seq_cst);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2023-10-09 11:03:50 +02:00
|
|
|
if (suppress_warnings)
|
|
|
|
return;
|
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
if (cputime_enabled_here && cputime_enabled && cputime_threshold
|
|
|
|
&& cputime > cputime_threshold) {
|
2002-12-13 21:15:29 +01:00
|
|
|
/*
|
2021-04-13 20:38:09 +02:00
|
|
|
* We have a CPU Hog on our hands. The time FRR has spent
|
|
|
|
* doing actual work (not sleeping) is greater than 5 seconds.
|
2002-12-13 21:15:29 +01:00
|
|
|
* Whinge about it now, so we're aware this is yet another task
|
|
|
|
* to fix.
|
|
|
|
*/
|
2021-02-02 19:24:31 +01:00
|
|
|
atomic_fetch_add_explicit(&thread->hist->total_cpu_warn,
|
|
|
|
1, memory_order_seq_cst);
|
2018-08-20 15:45:06 +02:00
|
|
|
flog_warn(
|
2021-02-02 18:56:06 +01:00
|
|
|
EC_LIB_SLOW_THREAD_CPU,
|
|
|
|
"CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
|
|
|
|
thread->xref->funcname, (unsigned long)thread->func,
|
2021-04-13 20:38:09 +02:00
|
|
|
walltime / 1000, cputime / 1000);
|
|
|
|
|
|
|
|
} else if (walltime_threshold && walltime > walltime_threshold) {
|
2021-02-02 18:56:06 +01:00
|
|
|
/*
|
2021-04-13 20:38:09 +02:00
|
|
|
* The runtime for a task is greater than 5 seconds, but the
|
|
|
|
* cpu time is under 5 seconds. Let's whine about this because
|
|
|
|
* this could imply some sort of scheduling issue.
|
2021-02-02 18:56:06 +01:00
|
|
|
*/
|
2021-02-02 19:24:31 +01:00
|
|
|
atomic_fetch_add_explicit(&thread->hist->total_wall_warn,
|
|
|
|
1, memory_order_seq_cst);
|
2021-02-02 18:56:06 +01:00
|
|
|
flog_warn(
|
|
|
|
EC_LIB_SLOW_THREAD_WALL,
|
|
|
|
"STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
|
2020-04-28 09:30:50 +02:00
|
|
|
thread->xref->funcname, (unsigned long)thread->func,
|
2021-04-13 20:38:09 +02:00
|
|
|
walltime / 1000, cputime / 1000);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Execute thread */
|
2023-03-07 20:14:41 +01:00
|
|
|
void _event_execute(const struct xref_eventsched *xref, struct event_loop *m,
|
2023-07-11 22:03:38 +02:00
|
|
|
void (*func)(struct event *), void *arg, int val,
|
|
|
|
struct event **eref)
|
2002-12-13 21:15:29 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event *thread;
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2023-07-11 22:03:38 +02:00
|
|
|
/* Cancel existing scheduled task TODO -- nice to do in 1 lock cycle */
|
|
|
|
if (eref)
|
|
|
|
event_cancel(eref);
|
|
|
|
|
2018-10-01 18:38:34 +02:00
|
|
|
/* Get or allocate new thread to execute. */
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&m->mtx) {
|
2022-12-10 15:28:31 +01:00
|
|
|
thread = thread_get(m, EVENT_EVENT, func, arg, xref);
|
2013-11-18 23:04:27 +01:00
|
|
|
|
2018-10-01 18:38:34 +02:00
|
|
|
/* Set its event value. */
|
2022-07-20 21:47:42 +02:00
|
|
|
frr_with_mutex (&thread->mtx) {
|
2022-12-10 15:28:31 +01:00
|
|
|
thread->add_type = EVENT_EXECUTE;
|
2018-10-01 18:38:34 +02:00
|
|
|
thread->u.val = val;
|
|
|
|
thread->ref = &thread;
|
|
|
|
}
|
|
|
|
}
|
2016-11-10 20:25:39 +01:00
|
|
|
|
2018-10-01 18:38:34 +02:00
|
|
|
/* Execute thread doing all accounting. */
|
2022-12-11 13:51:16 +01:00
|
|
|
event_call(thread);
|
2013-11-18 23:04:27 +01:00
|
|
|
|
2018-10-01 18:38:34 +02:00
|
|
|
/* Give back or free thread. */
|
|
|
|
thread_add_unuse(m, thread);
|
2002-12-13 21:15:29 +01:00
|
|
|
}
|
2020-09-21 22:02:06 +02:00
|
|
|
|
|
|
|
/* Debug signal mask - if 'sigs' is NULL, use current effective mask. */
|
|
|
|
void debug_signals(const sigset_t *sigs)
|
|
|
|
{
|
|
|
|
int i, found;
|
|
|
|
sigset_t tmpsigs;
|
|
|
|
char buf[300];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're only looking at the non-realtime signals here, so we need
|
|
|
|
* some limit value. Platform differences mean at some point we just
|
|
|
|
* need to pick a reasonable value.
|
|
|
|
*/
|
|
|
|
#if defined SIGRTMIN
|
|
|
|
# define LAST_SIGNAL SIGRTMIN
|
|
|
|
#else
|
|
|
|
# define LAST_SIGNAL 32
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
if (sigs == NULL) {
|
|
|
|
sigemptyset(&tmpsigs);
|
|
|
|
pthread_sigmask(SIG_BLOCK, NULL, &tmpsigs);
|
|
|
|
sigs = &tmpsigs;
|
|
|
|
}
|
|
|
|
|
|
|
|
found = 0;
|
|
|
|
buf[0] = '\0';
|
|
|
|
|
|
|
|
for (i = 0; i < LAST_SIGNAL; i++) {
|
|
|
|
char tmp[20];
|
|
|
|
|
|
|
|
if (sigismember(sigs, i) > 0) {
|
|
|
|
if (found > 0)
|
|
|
|
strlcat(buf, ",", sizeof(buf));
|
|
|
|
snprintf(tmp, sizeof(tmp), "%d", i);
|
|
|
|
strlcat(buf, tmp, sizeof(buf));
|
|
|
|
found++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found == 0)
|
|
|
|
snprintf(buf, sizeof(buf), "<none>");
|
|
|
|
|
|
|
|
zlog_debug("%s: %s", __func__, buf);
|
|
|
|
}
|
2021-10-12 19:22:54 +02:00
|
|
|
|
2022-01-12 02:07:41 +01:00
|
|
|
static ssize_t printfrr_thread_dbg(struct fbuf *buf, struct printfrr_eargs *ea,
|
2022-03-01 22:18:12 +01:00
|
|
|
const struct event *thread)
|
2022-01-12 02:07:41 +01:00
|
|
|
{
|
2022-12-10 15:28:31 +01:00
|
|
|
static const char *const types[] = {
|
|
|
|
[EVENT_READ] = "read", [EVENT_WRITE] = "write",
|
|
|
|
[EVENT_TIMER] = "timer", [EVENT_EVENT] = "event",
|
|
|
|
[EVENT_READY] = "ready", [EVENT_UNUSED] = "unused",
|
|
|
|
[EVENT_EXECUTE] = "exec",
|
2022-01-12 02:07:41 +01:00
|
|
|
};
|
|
|
|
ssize_t rv = 0;
|
|
|
|
char info[16] = "";
|
|
|
|
|
|
|
|
if (!thread)
|
|
|
|
return bputs(buf, "{(thread *)NULL}");
|
|
|
|
|
|
|
|
rv += bprintfrr(buf, "{(thread *)%p arg=%p", thread, thread->arg);
|
|
|
|
|
|
|
|
if (thread->type < array_size(types) && types[thread->type])
|
|
|
|
rv += bprintfrr(buf, " %-6s", types[thread->type]);
|
|
|
|
else
|
|
|
|
rv += bprintfrr(buf, " INVALID(%u)", thread->type);
|
|
|
|
|
|
|
|
switch (thread->type) {
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_READ:
|
|
|
|
case EVENT_WRITE:
|
2022-01-12 02:07:41 +01:00
|
|
|
snprintfrr(info, sizeof(info), "fd=%d", thread->u.fd);
|
|
|
|
break;
|
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_TIMER:
|
2022-01-12 02:07:41 +01:00
|
|
|
snprintfrr(info, sizeof(info), "r=%pTVMud", &thread->u.sands);
|
|
|
|
break;
|
2022-12-10 15:28:31 +01:00
|
|
|
case EVENT_READY:
|
|
|
|
case EVENT_EVENT:
|
|
|
|
case EVENT_UNUSED:
|
|
|
|
case EVENT_EXECUTE:
|
|
|
|
break;
|
2022-01-12 02:07:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
rv += bprintfrr(buf, " %-12s %s() %s from %s:%d}", info,
|
|
|
|
thread->xref->funcname, thread->xref->dest,
|
|
|
|
thread->xref->xref.file, thread->xref->xref.line);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2022-01-14 11:56:25 +01:00
|
|
|
printfrr_ext_autoreg_p("TH", printfrr_thread);
|
2022-01-12 02:07:41 +01:00
|
|
|
static ssize_t printfrr_thread(struct fbuf *buf, struct printfrr_eargs *ea,
|
|
|
|
const void *ptr)
|
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
const struct event *thread = ptr;
|
2022-01-12 02:07:41 +01:00
|
|
|
struct timespec remain = {};
|
|
|
|
|
|
|
|
if (ea->fmt[0] == 'D') {
|
|
|
|
ea->fmt++;
|
|
|
|
return printfrr_thread_dbg(buf, ea, thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!thread) {
|
|
|
|
/* need to jump over time formatting flag characters in the
|
|
|
|
* input format string, i.e. adjust ea->fmt!
|
|
|
|
*/
|
|
|
|
printfrr_time(buf, ea, &remain,
|
|
|
|
TIMEFMT_TIMER_DEADLINE | TIMEFMT_SKIP);
|
|
|
|
return bputch(buf, '-');
|
|
|
|
}
|
|
|
|
|
|
|
|
TIMEVAL_TO_TIMESPEC(&thread->u.sands, &remain);
|
|
|
|
return printfrr_time(buf, ea, &remain, TIMEFMT_TIMER_DEADLINE);
|
|
|
|
}
|