lib/seqlock: add a few more comments

Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
This commit is contained in:
David Lamparter 2019-06-25 20:33:02 +02:00
parent 2a5e62359f
commit 30ef834ab3
2 changed files with 51 additions and 7 deletions

View file

@ -36,8 +36,14 @@
#include "seqlock.h"
/****************************************
* OS specific synchronization wrappers *
****************************************/
/*
* Linux: sys_futex()
*/
#ifdef HAVE_SYNC_LINUX_FUTEX
/* Linux-specific - sys_futex() */
#include <sys/syscall.h>
#include <linux/futex.h>
@ -55,8 +61,10 @@ static long sys_futex(void *addr1, int op, int val1,
#define wait_poke(sqlo) \
sys_futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
/*
* OpenBSD: sys_futex(), almost the same as on Linux
*/
#elif defined(HAVE_SYNC_OPENBSD_FUTEX)
/* OpenBSD variant of the above. */
#include <sys/syscall.h>
#include <sys/futex.h>
@ -69,8 +77,10 @@ static long sys_futex(void *addr1, int op, int val1,
#define wait_poke(sqlo) \
futex((int *)&sqlo->pos, FUTEX_WAKE, INT_MAX, NULL, NULL, 0)
/*
* FreeBSD: _umtx_op()
*/
#elif defined(HAVE_SYNC_UMTX_OP)
/* FreeBSD-specific: umtx_op() */
#include <sys/umtx.h>
#define wait_once(sqlo, val) \
@ -89,9 +99,10 @@ static int wait_time(struct seqlock *sqlo, uint32_t val,
#define wait_poke(sqlo) \
_umtx_op((void *)&sqlo->pos, UMTX_OP_WAKE, INT_MAX, NULL, NULL)
#else
/* generic version. used on *BSD, Solaris and OSX.
/*
* generic version. used on NetBSD, Solaris and OSX. really shitty.
*/
#else
#define TIME_ABS_REALTIME 1
@ -151,6 +162,9 @@ void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val)
bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
const struct timespec *abs_monotime_limit)
{
/*
* ABS_REALTIME - used on NetBSD, Solaris and OSX
*/
#if TIME_ABS_REALTIME
#define time_arg1 &abs_rt
#define time_arg2 NULL
@ -170,6 +184,9 @@ bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
}
abs_rt.tv_sec += abs_monotime_limit->tv_sec - curmono.tv_sec;
/*
* RELATIVE - used on OpenBSD (might get a patch to get absolute monotime)
*/
#elif TIME_RELATIVE
struct timespec reltime;
@ -183,6 +200,9 @@ bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
reltime.tv_sec--; \
reltime.tv_nsec += 1000000000; \
}
/*
* FreeBSD & Linux: absolute time re. CLOCK_MONOTONIC
*/
#else
#define time_arg1 abs_monotime_limit
#define time_arg2 NULL

View file

@ -61,12 +61,22 @@ typedef _Atomic uint32_t seqlock_ctr_t;
typedef uint32_t seqlock_val_t;
#define seqlock_assert_valid(val) assert((val) & SEQLOCK_HELD)
/* NB: SEQLOCK_WAITERS is only allowed if SEQLOCK_HELD is also set; can't
* have waiters on an unheld seqlock
*/
#define SEQLOCK_HELD (1U << 0)
#define SEQLOCK_WAITERS (1U << 1)
#define SEQLOCK_VAL(n) ((n) & ~SEQLOCK_WAITERS)
#define SEQLOCK_STARTVAL 1U
#define SEQLOCK_INCR 4U
/* TODO: originally, this was using "atomic_fetch_add", which is the reason
* bit 0 is used to indicate held state. With SEQLOCK_WAITERS added, there's
* no fetch_add anymore (cmpxchg loop instead), so we don't need to use bit 0
* for this anymore & can just special-case the value 0 for it and skip it in
* counting.
*/
struct seqlock {
/* always used */
seqlock_ctr_t pos;
@ -80,10 +90,16 @@ struct seqlock {
extern void seqlock_init(struct seqlock *sqlo);
/* while (sqlo <= val) - wait until seqlock->pos > val, or seqlock unheld */
/* basically: "while (sqlo <= val) wait();"
* returns when sqlo > val || !seqlock_held(sqlo)
*/
extern void seqlock_wait(struct seqlock *sqlo, seqlock_val_t val);
/* same, but time-limited (limit is an absolute CLOCK_MONOTONIC value) */
extern bool seqlock_timedwait(struct seqlock *sqlo, seqlock_val_t val,
const struct timespec *abs_monotime_limit);
/* one-shot test, returns true if seqlock_wait would return immediately */
extern bool seqlock_check(struct seqlock *sqlo, seqlock_val_t val);
static inline bool seqlock_held(struct seqlock *sqlo)
@ -93,12 +109,20 @@ static inline bool seqlock_held(struct seqlock *sqlo)
/* sqlo - get seqlock position -- for the "counter" seqlock */
extern seqlock_val_t seqlock_cur(struct seqlock *sqlo);
/* sqlo++ - note: like x++, returns previous value, before bumping */
/* ++sqlo (but atomic & wakes waiters) - returns value that we bumped to.
*
* guarantees:
* - each seqlock_bump call bumps the position by exactly one SEQLOCK_INCR.
* There are no skipped/missed or multiple increments.
* - each return value is only returned from one seqlock_bump() call
*/
extern seqlock_val_t seqlock_bump(struct seqlock *sqlo);
/* sqlo = val - can be used on held seqlock. */
extern void seqlock_acquire_val(struct seqlock *sqlo, seqlock_val_t val);
/* sqlo = ref - standard pattern: acquire relative to other seqlock */
static inline void seqlock_acquire(struct seqlock *sqlo, struct seqlock *ref)
{