2023-02-08 13:17:09 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2022-12-11 17:33:30 +01:00
|
|
|
/* Event management routine header.
|
2002-12-13 21:15:29 +01:00
|
|
|
* Copyright (C) 1998 Kunihiro Ishiguro
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ZEBRA_THREAD_H
|
|
|
|
#define _ZEBRA_THREAD_H
|
|
|
|
|
2024-02-29 13:17:20 +01:00
|
|
|
#include <signal.h>
|
2011-12-25 17:52:09 +01:00
|
|
|
#include <zebra.h>
|
2017-03-03 20:01:49 +01:00
|
|
|
#include <pthread.h>
|
2017-05-10 20:09:49 +02:00
|
|
|
#include <poll.h>
|
|
|
|
#include "monotime.h"
|
2018-04-20 23:27:16 +02:00
|
|
|
#include "frratomic.h"
|
2019-01-31 02:12:38 +01:00
|
|
|
#include "typesafe.h"
|
2020-04-28 09:30:50 +02:00
|
|
|
#include "xref.h"
|
2011-12-25 17:52:09 +01:00
|
|
|
|
2019-02-07 23:10:31 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2023-09-24 19:41:24 +02:00
|
|
|
#define CONSUMED_TIME_CHECK 5000000
|
|
|
|
|
2021-04-13 20:38:09 +02:00
|
|
|
extern bool cputime_enabled;
|
|
|
|
extern unsigned long cputime_threshold;
|
|
|
|
/* capturing wallclock time is always enabled since it is fast (reading
|
|
|
|
* hardware TSC w/o syscalls)
|
|
|
|
*/
|
|
|
|
extern unsigned long walltime_threshold;
|
|
|
|
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
struct rusage_t {
|
2021-04-13 20:49:26 +02:00
|
|
|
#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
|
|
|
|
struct timespec cpu;
|
|
|
|
#else
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
struct rusage cpu;
|
2021-04-13 20:49:26 +02:00
|
|
|
#endif
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
struct timeval real;
|
|
|
|
};
|
2023-03-07 20:22:48 +01:00
|
|
|
#define RUSAGE_T struct rusage_t
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
#define GETRUSAGE(X) event_getrusage(X)
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2022-12-11 17:27:26 +01:00
|
|
|
PREDECL_LIST(event_list);
|
|
|
|
PREDECL_HEAP(event_timer_list);
|
2013-11-19 15:11:42 +01:00
|
|
|
|
2016-03-04 07:28:29 +01:00
|
|
|
struct fd_handler {
|
2017-06-09 05:40:27 +02:00
|
|
|
/* number of pfd that fit in the allocated space of pfds. This is a
|
2021-01-27 20:32:22 +01:00
|
|
|
* constant and is the same for both pfds and copy.
|
|
|
|
*/
|
2016-03-04 07:28:29 +01:00
|
|
|
nfds_t pfdsize;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-01 01:21:40 +02:00
|
|
|
/* file descriptors to monitor for i/o */
|
2016-03-04 07:28:29 +01:00
|
|
|
struct pollfd *pfds;
|
2017-06-09 05:40:27 +02:00
|
|
|
/* number of pollfds stored in pfds */
|
|
|
|
nfds_t pfdcount;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2017-06-01 01:21:40 +02:00
|
|
|
/* chunk used for temp copy of pollfds */
|
|
|
|
struct pollfd *copy;
|
2017-06-09 05:40:27 +02:00
|
|
|
/* number of pollfds stored in copy */
|
|
|
|
nfds_t copycount;
|
2016-03-04 07:28:29 +01:00
|
|
|
};
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
struct xref_eventsched {
|
2020-04-28 09:30:50 +02:00
|
|
|
struct xref xref;
|
|
|
|
|
|
|
|
const char *funcname;
|
|
|
|
const char *dest;
|
2022-12-10 15:28:31 +01:00
|
|
|
uint32_t event_type;
|
2020-04-28 09:30:50 +02:00
|
|
|
};
|
|
|
|
|
2023-09-07 11:48:22 +02:00
|
|
|
PREDECL_HASH(cpu_records);
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Master of the theads. */
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop {
|
2017-06-15 22:17:44 +02:00
|
|
|
char *name;
|
2017-07-17 14:03:14 +02:00
|
|
|
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event **read;
|
|
|
|
struct event **write;
|
2022-12-11 17:27:26 +01:00
|
|
|
struct event_timer_list_head timer;
|
|
|
|
struct event_list_head event, ready, unuse;
|
2017-06-07 22:34:09 +02:00
|
|
|
struct list *cancel_req;
|
2017-06-09 05:40:27 +02:00
|
|
|
bool canceled;
|
2017-06-07 22:34:09 +02:00
|
|
|
pthread_cond_t cancel_cond;
|
2023-09-07 11:48:22 +02:00
|
|
|
struct cpu_records_head cpu_records[1];
|
2017-05-10 20:09:49 +02:00
|
|
|
int io_pipe[2];
|
2015-08-11 22:14:40 +02:00
|
|
|
int fd_limit;
|
2016-03-04 07:28:29 +01:00
|
|
|
struct fd_handler handler;
|
2017-04-29 00:45:59 +02:00
|
|
|
long selectpoll_timeout;
|
|
|
|
bool spin;
|
|
|
|
bool handle_signals;
|
2017-03-03 20:01:49 +01:00
|
|
|
pthread_mutex_t mtx;
|
2017-05-17 19:11:34 +02:00
|
|
|
pthread_t owner;
|
2021-02-18 01:58:19 +01:00
|
|
|
|
2023-11-02 21:46:27 +01:00
|
|
|
nfds_t last_read;
|
|
|
|
|
2021-02-18 01:58:19 +01:00
|
|
|
bool ready_run_loop;
|
|
|
|
RUSAGE_T last_getrusage;
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2022-12-10 15:28:31 +01:00
|
|
|
/* Event types. */
|
|
|
|
enum event_types {
|
|
|
|
EVENT_READ,
|
|
|
|
EVENT_WRITE,
|
|
|
|
EVENT_TIMER,
|
|
|
|
EVENT_EVENT,
|
|
|
|
EVENT_READY,
|
|
|
|
EVENT_UNUSED,
|
|
|
|
EVENT_EXECUTE,
|
|
|
|
};
|
|
|
|
|
2022-12-11 17:36:01 +01:00
|
|
|
/* Event itself. */
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event {
|
2022-12-11 17:36:01 +01:00
|
|
|
enum event_types type; /* event type */
|
|
|
|
enum event_types add_type; /* event type */
|
2022-12-11 17:27:26 +01:00
|
|
|
struct event_list_item eventitem;
|
|
|
|
struct event_timer_list_item timeritem;
|
2022-03-01 22:18:12 +01:00
|
|
|
struct event **ref; /* external reference (if given) */
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master; /* pointer to the struct event_loop */
|
2023-03-23 12:37:28 +01:00
|
|
|
void (*func)(struct event *e); /* event function */
|
lib: safely check & set thread pointers
When scheduling a thread, the scheduling function returns a pointer to
the struct thread that was placed on one of the scheduling queues in the
associated thread master. This pointer is used to check whether or not
the thread is scheduled, and is passed to thread_cancel() should the
daemon need to cancel that particular task.
The thread_fetch() function is called to retrieve the next thread to
execute. However, when it returns, the aforementioned pointer is not
updated. As a result, in order for the above use cases to work, every
thread handler function must set the associated pointer to NULL. This is
bug prone, and moreover, not thread safe.
This patch changes the thread scheduling functions to return void. If
the caller needs a reference to the scheduled thread, it must pass in a
pointer to store the pointer to the thread struct in. Subsequent calls
to thread_cancel(), thread_cancel_event() or thread_fetch() will result
in that pointer being nulled before return. These operations occur
within the thread_master critical sections.
Overall this should avoid bugs introduced by thread handler funcs
forgetting to null the associated pointer, double-scheduling caused by
overwriting pointers to currently scheduled threads without performing a
nullity check, and the introduction of true kernel threads causing race
conditions within the userspace threading world.
Also removes the return value for thread_execute since it always returns
null...
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2017-05-05 19:30:21 +02:00
|
|
|
void *arg; /* event argument */
|
2002-12-13 21:15:29 +01:00
|
|
|
union {
|
lib: safely check & set thread pointers
When scheduling a thread, the scheduling function returns a pointer to
the struct thread that was placed on one of the scheduling queues in the
associated thread master. This pointer is used to check whether or not
the thread is scheduled, and is passed to thread_cancel() should the
daemon need to cancel that particular task.
The thread_fetch() function is called to retrieve the next thread to
execute. However, when it returns, the aforementioned pointer is not
updated. As a result, in order for the above use cases to work, every
thread handler function must set the associated pointer to NULL. This is
bug prone, and moreover, not thread safe.
This patch changes the thread scheduling functions to return void. If
the caller needs a reference to the scheduled thread, it must pass in a
pointer to store the pointer to the thread struct in. Subsequent calls
to thread_cancel(), thread_cancel_event() or thread_fetch() will result
in that pointer being nulled before return. These operations occur
within the thread_master critical sections.
Overall this should avoid bugs introduced by thread handler funcs
forgetting to null the associated pointer, double-scheduling caused by
overwriting pointers to currently scheduled threads without performing a
nullity check, and the introduction of true kernel threads causing race
conditions within the userspace threading world.
Also removes the return value for thread_execute since it always returns
null...
Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
2017-05-05 19:30:21 +02:00
|
|
|
int val; /* second argument of the event. */
|
|
|
|
int fd; /* file descriptor in case of r/w */
|
|
|
|
struct timeval sands; /* rest of time sands value. */
|
2002-12-13 21:15:29 +01:00
|
|
|
} u;
|
2012-05-07 18:53:12 +02:00
|
|
|
struct timeval real;
|
2023-03-07 20:22:48 +01:00
|
|
|
struct cpu_event_history *hist; /* cache pointer to cpu_history */
|
|
|
|
unsigned long yield; /* yield time in microseconds */
|
2022-12-11 17:33:30 +01:00
|
|
|
const struct xref_eventsched *xref; /* origin location */
|
2023-03-07 20:22:48 +01:00
|
|
|
pthread_mutex_t mtx; /* mutex for thread.c functions */
|
2022-01-19 20:56:25 +01:00
|
|
|
bool ignore_timer_late;
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
};
|
|
|
|
|
2022-01-12 02:07:41 +01:00
|
|
|
#ifdef _FRR_ATTRIBUTE_PRINTFRR
|
2022-03-01 22:18:12 +01:00
|
|
|
#pragma FRR printfrr_ext "%pTH"(struct event *)
|
2022-01-12 02:07:41 +01:00
|
|
|
#endif
|
|
|
|
|
2022-12-11 17:36:01 +01:00
|
|
|
struct cpu_event_history {
|
2023-09-07 11:48:22 +02:00
|
|
|
struct cpu_records_item item;
|
|
|
|
|
2023-03-23 12:37:28 +01:00
|
|
|
void (*func)(struct event *e);
|
2025-01-28 16:37:52 +01:00
|
|
|
|
|
|
|
/* fields between the pair of these two are nulled on "clear event cpu" */
|
|
|
|
char _clear_begin[0];
|
|
|
|
|
2021-02-02 19:24:31 +01:00
|
|
|
atomic_size_t total_cpu_warn;
|
|
|
|
atomic_size_t total_wall_warn;
|
2022-02-10 20:10:26 +01:00
|
|
|
atomic_size_t total_starv_warn;
|
2020-11-03 23:07:08 +01:00
|
|
|
atomic_size_t total_calls;
|
|
|
|
atomic_size_t total_active;
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
struct time_stats {
|
2019-02-11 11:38:57 +01:00
|
|
|
atomic_size_t total, max;
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
} real;
|
|
|
|
struct time_stats cpu;
|
2019-02-11 11:38:57 +01:00
|
|
|
atomic_uint_fast32_t types;
|
2025-01-28 16:37:52 +01:00
|
|
|
|
|
|
|
/* end of cleared region */
|
|
|
|
char _clear_end[0];
|
|
|
|
|
2013-11-18 23:04:27 +01:00
|
|
|
const char *funcname;
|
2002-12-13 21:15:29 +01:00
|
|
|
};
|
|
|
|
|
2015-05-20 03:03:40 +02:00
|
|
|
/* Struct timeval's tv_usec one second value. */
|
|
|
|
#define TIMER_SECOND_MICRO 1000000L
|
|
|
|
|
2023-11-17 02:27:00 +01:00
|
|
|
static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
|
|
|
|
{
|
|
|
|
return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
|
|
|
|
+ (a.tv_usec - b.tv_usec));
|
|
|
|
}
|
|
|
|
|
2022-12-11 17:39:09 +01:00
|
|
|
/* Event yield time. */
|
2022-12-11 13:55:02 +01:00
|
|
|
#define EVENT_YIELD_TIME_SLOT 10 * 1000L /* 10ms */
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2022-12-11 17:39:09 +01:00
|
|
|
#define EVENT_TIMER_STRLEN 12
|
2020-03-27 15:30:20 +01:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Macros. */
|
2022-12-25 16:26:52 +01:00
|
|
|
#define EVENT_ARG(X) ((X)->arg)
|
|
|
|
#define EVENT_FD(X) ((X)->u.fd)
|
|
|
|
#define EVENT_VAL(X) ((X)->u.val)
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-07-17 23:09:51 +02:00
|
|
|
/*
|
|
|
|
* Please consider this macro deprecated, and do not use it in new code.
|
|
|
|
*/
|
2022-12-25 16:26:52 +01:00
|
|
|
#define EVENT_OFF(thread) \
|
2022-12-10 15:08:37 +01:00
|
|
|
do { \
|
|
|
|
if ((thread)) \
|
|
|
|
event_cancel(&(thread)); \
|
2020-07-17 23:09:51 +02:00
|
|
|
} while (0)
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2020-04-28 09:30:50 +02:00
|
|
|
/*
|
|
|
|
* Macro wrappers to generate xrefs for all thread add calls. Includes
|
|
|
|
* file/line/function info for debugging/tracing.
|
|
|
|
*/
|
|
|
|
#include "lib/xref.h"
|
|
|
|
|
|
|
|
#define _xref_t_a(addfn, type, m, f, a, v, t) \
|
|
|
|
({ \
|
2022-12-11 17:33:30 +01:00
|
|
|
static const struct xref_eventsched _xref __attribute__( \
|
2022-05-20 20:19:08 +02:00
|
|
|
(used)) = { \
|
2022-12-11 17:33:30 +01:00
|
|
|
.xref = XREF_INIT(XREFT_EVENTSCHED, NULL, __func__), \
|
2020-04-28 09:30:50 +02:00
|
|
|
.funcname = #f, \
|
|
|
|
.dest = #t, \
|
2022-12-10 15:28:31 +01:00
|
|
|
.event_type = EVENT_##type, \
|
2020-04-28 09:30:50 +02:00
|
|
|
}; \
|
|
|
|
XREF_LINK(_xref.xref); \
|
2022-05-20 20:19:08 +02:00
|
|
|
_event_add_##addfn(&_xref, m, f, a, v, t); \
|
|
|
|
}) /* end */
|
2020-04-28 09:30:50 +02:00
|
|
|
|
2022-05-20 20:19:08 +02:00
|
|
|
#define event_add_read(m, f, a, v, t) _xref_t_a(read_write, READ, m, f, a, v, t)
|
|
|
|
#define event_add_write(m, f, a, v, t) \
|
|
|
|
_xref_t_a(read_write, WRITE, m, f, a, v, t)
|
|
|
|
#define event_add_timer(m, f, a, v, t) _xref_t_a(timer, TIMER, m, f, a, v, t)
|
|
|
|
#define event_add_timer_msec(m, f, a, v, t) \
|
|
|
|
_xref_t_a(timer_msec, TIMER, m, f, a, v, t)
|
|
|
|
#define event_add_timer_tv(m, f, a, v, t) \
|
|
|
|
_xref_t_a(timer_tv, TIMER, m, f, a, v, t)
|
|
|
|
#define event_add_event(m, f, a, v, t) _xref_t_a(event, EVENT, m, f, a, v, t)
|
2020-04-28 09:30:50 +02:00
|
|
|
|
2023-07-11 22:03:38 +02:00
|
|
|
#define event_execute(m, f, a, v, p) \
|
2020-04-28 09:30:50 +02:00
|
|
|
({ \
|
2022-12-11 17:33:30 +01:00
|
|
|
static const struct xref_eventsched _xref __attribute__( \
|
2022-12-10 15:28:31 +01:00
|
|
|
(used)) = { \
|
2022-12-11 17:33:30 +01:00
|
|
|
.xref = XREF_INIT(XREFT_EVENTSCHED, NULL, __func__), \
|
2020-04-28 09:30:50 +02:00
|
|
|
.funcname = #f, \
|
|
|
|
.dest = NULL, \
|
2022-12-10 15:28:31 +01:00
|
|
|
.event_type = EVENT_EXECUTE, \
|
2020-04-28 09:30:50 +02:00
|
|
|
}; \
|
|
|
|
XREF_LINK(_xref.xref); \
|
2023-07-11 22:03:38 +02:00
|
|
|
_event_execute(&_xref, m, f, a, v, p); \
|
2020-04-28 09:30:50 +02:00
|
|
|
}) /* end */
|
2005-04-22 16:23:34 +02:00
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
/* Prototypes. */
|
2023-03-07 20:14:41 +01:00
|
|
|
extern struct event_loop *event_master_create(const char *name);
|
|
|
|
void event_master_set_name(struct event_loop *master, const char *name);
|
|
|
|
extern void event_master_free(struct event_loop *m);
|
2005-05-06 Paul Jakma <paul@dishone.st>
* (general) extern and static'ification of functions in code and
header.
Cleanup any definitions with unspecified arguments.
Add casts for callback assignments where the callback is defined,
typically, as passing void *, but the function being assigned has
some other pointer type defined as its argument, as gcc complains
about casts from void * to X* via function arguments.
Fix some old K&R style function argument definitions.
Add noreturn gcc attribute to some functions, as appropriate.
Add unused gcc attribute to some functions (eg ones meant to help
while debugging)
Add guard defines to headers which were missing them.
* command.c: (install_node) add const qualifier, still doesnt shut
up the warning though, because of the double pointer.
(cmp_node) ditto
* keychain.c: (key_str2time) Add GET_LONG_RANGE() macro, derived
fromn vty.h ones to fix some of the (long) < 0 warnings.
* thread.c: (various) use thread_empty
(cpu_record_hash_key) should cast to uintptr_t, a stdint.h type
* vty.h: Add VTY_GET_IPV4_ADDRESS and VTY_GET_IPV4_PREFIX so they
removed from ospfd/ospf_vty.h
* zebra.h: Move definition of ZEBRA_PORT to here, to remove
dependence of lib on zebra/zserv.h
2005-05-06 23:25:49 +02:00
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_add_read_write(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*fn)(struct event *), void *arg, int fd,
|
|
|
|
struct event **tref);
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_add_timer(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*fn)(struct event *), void *arg, long t,
|
|
|
|
struct event **tref);
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_add_timer_msec(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*fn)(struct event *), void *arg, long t,
|
|
|
|
struct event **tref);
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_add_timer_tv(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*fn)(struct event *), void *arg,
|
|
|
|
struct timeval *tv, struct event **tref);
|
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_add_event(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2022-05-20 20:19:08 +02:00
|
|
|
void (*fn)(struct event *), void *arg, int val,
|
|
|
|
struct event **tref);
|
2020-04-28 09:30:50 +02:00
|
|
|
|
2022-12-11 17:33:30 +01:00
|
|
|
extern void _event_execute(const struct xref_eventsched *xref,
|
2023-03-07 20:14:41 +01:00
|
|
|
struct event_loop *master,
|
2023-07-11 22:03:38 +02:00
|
|
|
void (*fn)(struct event *), void *arg, int val,
|
|
|
|
struct event **eref);
|
2013-11-18 23:04:27 +01:00
|
|
|
|
2022-12-10 15:08:37 +01:00
|
|
|
extern void event_cancel(struct event **event);
|
2023-03-07 20:14:41 +01:00
|
|
|
extern void event_cancel_async(struct event_loop *m, struct event **eptr,
|
2022-12-11 17:20:40 +01:00
|
|
|
void *data);
|
2021-01-27 20:32:22 +01:00
|
|
|
/* Cancel ready tasks with an arg matching 'arg' */
|
2023-03-07 20:14:41 +01:00
|
|
|
extern void event_cancel_event_ready(struct event_loop *m, void *arg);
|
2021-01-27 20:32:22 +01:00
|
|
|
/* Cancel all tasks with an arg matching 'arg', including timers and io */
|
2023-03-07 20:14:41 +01:00
|
|
|
extern void event_cancel_event(struct event_loop *m, void *arg);
|
|
|
|
extern struct event *event_fetch(struct event_loop *m, struct event *event);
|
2022-12-11 13:51:16 +01:00
|
|
|
extern void event_call(struct event *event);
|
2022-12-11 14:19:00 +01:00
|
|
|
extern unsigned long event_timer_remain_second(struct event *event);
|
|
|
|
extern struct timeval event_timer_remain(struct event *event);
|
|
|
|
extern unsigned long event_timer_remain_msec(struct event *event);
|
2022-12-11 16:39:12 +01:00
|
|
|
extern int event_should_yield(struct event *event);
|
2015-05-20 02:58:10 +02:00
|
|
|
/* set yield time for thread */
|
2022-12-11 17:20:40 +01:00
|
|
|
extern void event_set_yield_time(struct event *event, unsigned long ytime);
|
2002-12-13 21:15:29 +01:00
|
|
|
|
2017-02-03 13:59:04 +01:00
|
|
|
/* Internal libfrr exports */
|
2022-12-11 17:20:40 +01:00
|
|
|
extern void event_getrusage(RUSAGE_T *r);
|
2022-12-11 16:51:58 +01:00
|
|
|
extern void event_cmd_init(void);
|
From havanna_moon@gmx.net Fri Jan 17 23:37:49 2003
Date: Sat, 11 Jan 2003 23:26:28 +0100 (CET)
From: Yon Uriarte <havanna_moon@gmx.net>
To: "the list(tm) Zebra" <zebra@zebra.org>
Subject: [zebra 17217] [PATCH] show thread CPU
Hi,
a little patch from the 'stupid preprocessor tricks' collection to record
thread statistics.
Usage: "show thread cpu [r][w][t][e][x]"
Output Fields: self explaining I hope. Type is one of RWTEX for:
Read, Write (fd threads), Timer, Event, Execute.
Overhead vs. vanilla zebra: almost nothing. Vanilla CVS zebra already
collects thread run times.
Caveats: Under linux getrusage has a granularity of 10ms, which is almost
useless in this case. Run ./configure, edit config.h and comment out
"#define HAVE_RUSAGE", this way it will use getimeofday which has a much
better granularity. IMHO this is better, as cooperative threads are
effectively running during all that wall time (dont care if CPU
utilization was 3% or 99% during the time the thread was running (an
effective rusage combined with getimeofday could give that info)).
Maybe someone can give tips for other platforms on API granularity.
TODO: change some of the calls to thread_add_$KIND to
funcname_thread_add_$KIND with a meaningfull funcname, so users will get a
better idea of what's going on.
F.ex. (AFAIK):
ospf_spf_calculate_timer -> "Routes Step 1, areas SPF"
ospf_ase_calculate_timer -> "Routes Step 2, externals"
Could this be added to the unofficial patch collection?
Could someone with BGP keepalive problems run their bgpd with this patch
and post the results?
TIA, HTH, HAND, regards
yon
Example output:
--------------------------------
ospfd# show thread cpu
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
14.829 31 478 585 T ospf_ase_calculate_timer
82.132 9838 8 291 EX ospf_nsm_event
0.029 1 29 29 E ospf_default_originate_timer
0.254 9 28 34 T ospf_db_desc_timer
0.026 7 3 11 T ospf_wait_timer
669.015 523 1279 490696 R vty_read
4.415 45 98 173 TE ospf_network_lsa_refresh_timer
15.026 31 484 588 T ospf_spf_calculate_timer
29.478 1593 18 122 E ospf_ls_upd_send_queue_event
0.173 1 173 173 T vty_timeout
4.173 242 17 58 E ospf_ls_ack_send_event
637.767 121223 5 55 T ospf_ls_ack_timer
39.373 244 161 2691 R zclient_read
12.169 98 124 726 EX ospf_ism_event
0.226 2 113 125 R vty_accept
537.776 14256 37 3813 W ospf_write
4.967 41 121 250 T ospf_router_lsa_timer
0.672 1 672 672 E zclient_connect
7.901 1658 4 26 T ospf_ls_req_timer
0.459 2 229 266 E ospf_external_lsa_originate_timer
3.203 60 53 305 T ospf_maxage_lsa_remover
108.341 9772 11 65 T ospf_ls_upd_timer
33.302 525 63 8628 W vty_flush
0.101 1 101 101 T ospf_router_lsa_update_timer
0.016 1 16 16 T ospf_router_id_update_timer
26.970 407 66 176 T ospf_lsa_maxage_walker
381.949 12244 31 69 T ospf_hello_timer
0.114 22 5 14 T ospf_inactivity_timer
34.290 1223 28 310 T ospf_lsa_refresh_walker
470.645 6592 71 665 R ospf_read
3119.791 180693 17 490696 RWTEX TOTAL
ospfd#
bgpd# sh t c TeX
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
21.504 476 45 71 T bgp_keepalive_timer
17.784 1157 15 131 T bgp_reuse_timer
29.080 193 150 249 T bgp_scan
23.606 995 23 420 E bgp_event
317.734 28572 11 69 T bgp_routeadv_timer
0.084 1 84 84 E zlookup_connect
0.526 1 526 526 E zclient_connect
1.348 13 103 147 T bgp_start_timer
19.443 142 136 420 T bgp_connect_timer
16.032 772 20 27 T bgp_import
447.141 32322 13 526 TEX TOTAL
bgpd#
bgpd# show thread cpu rw
Runtime(ms) Invoked Avg uSecs Max uSecs Type Thread
155.043 7 22149 150659 R bgp_accept
129.638 180 720 53844 R vty_read
1.734 56 30 129 R zclient_read
0.255 2 127 148 R vty_accept
58.483 983 59 340 R bgp_read
171.495 29190 5 245 W bgp_write
13.884 181 76 2542 W vty_flush
530.532 30599 17 150659 RW TOTAL
bgpd#
--------------------------------
2003-01-18 00:47:00 +01:00
|
|
|
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
/* Returns elapsed real (wall clock) time. */
|
2022-12-11 16:51:58 +01:00
|
|
|
extern unsigned long event_consumed_time(RUSAGE_T *after, RUSAGE_T *before,
|
|
|
|
unsigned long *cpu_time_elapsed);
|
2005-04-27 Andrew J. Schorr <ajschorr@alumni.princeton.edu>
Add wall-clock timing statistics to 'show thread cpu' output.
* thread.h: Define struct rusage_t to contain wall-clock time
and cpu time. Change GETRUSAGE macro to collect both pieces
of data. Make appropriate changes to struct cpu_thread_history
to track CPU time and real time. Change proto for
thread_consumed_time to return real and cpu time elapsed.
And declare a new global variable 'struct timeval recent_time'.
* thread.c (struct timeval recent_time): New global timestamp variable.
(timeval_adjust): If timeout is negative, set to 0 (not 10
microseconds). And remove upper bound of 1,000,000 seconds, since
this does not seem to make any sense (and it breaks
funcname_thread_add_timer_timeval).
(timeval_cmp): Should return long, not int.
(vty_out_cpu_thread_history): Show CPU time and real time.
(cpu_record_hash_print): Calculate totals for CPU and real time.
(cpu_record_print): Change 'show thread cpu' title to show CPU and
real time.
(thread_timer_remain_second): Put current time in global recent_time.
(funcname_thread_add_timer_timeval): Fix assert. Replace 2-case
switch assignment with a ternary expression. Use global recent_time
variable. Fix use of timeval_adjust (previously, the value was not
actually being adjusted).
(thread_cancel): Add missing "break" statement in case
THREAD_BACKGROUND.
(thread_timer_wait): Use global recent_time value instead of calling
gettimeofday. And there's no need to check for negative timeouts,
since timeval_subtract already sets these to zero.
(thread_timer_process): Timers are sorted, so bail out once we
encounter a timer that has not yet popped. And remove some
extraneous asserts.
(thread_fetch): Do not process foreground timers before calling
select. Instead, add them to the ready list just after the select.
Also, no need to maintain a count of the number of ready threads,
since we don't care how many there are, just whether there's
one at the head of the ready list (which is easily checked).
Stick current time in global variable recent_time to reduce
the number of calls to gettimeofday. Tighten logic for
calculating the select timeout.
(thread_consumed_time): Now returns real time and puts the elapsed
cpu time in an additional argument.
(thread_should_yield): Use real (wall-clock) time to decide whether
to yield.
(thread_call): Maintain CPU and real time statistics.
* vty.c (vty_command): For slow commands, show real and cpu time.
2005-04-28 03:31:13 +02:00
|
|
|
|
2013-11-18 23:52:02 +01:00
|
|
|
/* only for use in logging functions! */
|
2017-06-15 18:05:19 +02:00
|
|
|
extern pthread_key_t thread_current;
|
2022-12-11 16:51:58 +01:00
|
|
|
extern char *event_timer_to_hhmmss(char *buf, int buf_size,
|
|
|
|
struct event *t_timer);
|
2013-11-18 23:52:02 +01:00
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
static inline bool event_is_scheduled(struct event *thread)
|
2022-05-20 15:53:44 +02:00
|
|
|
{
|
|
|
|
if (thread)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-21 22:02:06 +02:00
|
|
|
/* Debug signal mask */
|
|
|
|
void debug_signals(const sigset_t *sigs);
|
|
|
|
|
2022-12-11 16:51:58 +01:00
|
|
|
static inline void event_ignore_late_timer(struct event *event)
|
2022-01-19 20:56:25 +01:00
|
|
|
{
|
2022-03-01 22:18:12 +01:00
|
|
|
event->ignore_timer_late = true;
|
2022-01-19 20:56:25 +01:00
|
|
|
}
|
|
|
|
|
2019-02-07 23:10:31 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-12-13 21:15:29 +01:00
|
|
|
#endif /* _ZEBRA_THREAD_H */
|