Index: include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.36 thread_private.h --- include/thread_private.h 6 Jan 2021 19:54:17 -0000 1.36 +++ include/thread_private.h 3 Jul 2024 10:58:23 -0000 @@ -287,6 +287,12 @@ struct __sem { int shared; }; +struct __cmtx { + _atomic_lock_t lock; + uint32_t ticket; + uint32_t next; +}; + TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX @@ -402,6 +408,13 @@ struct pthread { (self)->delayed_cancel = 0; \ ENTER_CANCEL_POINT_INNER(tib, 1, 1) +#define SPIN_COUNT 128 +#if defined(__i386__) || defined(__amd64__) +#define SPIN_WAIT() asm volatile("pause": : : "memory") +#else +#define SPIN_WAIT() do { } while (0) +#endif + /* * Internal functions exported from libc's thread bits for use by libpthread */ @@ -413,6 +426,15 @@ void _rthread_debug(int, const char *, . __attribute__((__format__ (printf, 2, 3))); pid_t _thread_dofork(pid_t (*_sys_fork)(void)); void _thread_finalize(void); + +/* + * simple mutex for libc to use internally + */ +void __cmtx_init(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +#define __CMTX_INITIALIZER() { .lock = _SPINLOCK_UNLOCKED } /* * Threading syscalls not declared in system headers Index: thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ thread/rthread.c 3 Jul 2024 10:58:24 -0000 @@ -21,6 +21,8 @@ #include #include +#include +#include #include #include @@ -46,8 +48,11 @@ struct pthread _initial_thread = { void _spinlock(volatile _atomic_lock_t *lock) { - while (_atomic_lock(lock)) - sched_yield(); + while (_atomic_lock(lock)) { + do { + SPIN_WAIT(); + } while (*lock != _ATOMIC_LOCK_UNLOCKED); + } membar_enter_after_atomic(); } DEF_STRONG(_spinlock); @@ -69,6 +74,51 @@ _spinunlock(volatile _atomic_lock_t *loc *lock = _ATOMIC_LOCK_UNLOCKED; } DEF_STRONG(_spinunlock); + +void +__cmtx_init(struct __cmtx *cm) +{ + cm->lock = _SPINLOCK_UNLOCKED; + cm->ticket = cm->next = 0; +} + +void +__cmtx_enter(struct __cmtx *cm) +{ + uint32_t t, n; + unsigned int spin; + + _spinlock(&cm->lock); + n = cm->next++; + t = cm->ticket; + _spinunlock(&cm->lock); + + for (spin = 0; spin < SPIN_COUNT; spin++) { + if (t == n) + return; + SPIN_WAIT(); + t = cm->ticket; + } + + while (t != n) { + futex(&cm->ticket, FUTEX_WAIT, t, NULL, NULL); + t = cm->ticket; + } +} + +void +__cmtx_leave(struct __cmtx *cm) +{ + uint32_t t, n; + + _spinlock(&cm->lock); + t = cm->ticket++; + n = cm->next; + _spinunlock(&cm->lock); + + if (t != n) + futex(&cm->ticket, FUTEX_WAKE, 1, NULL, NULL); +} static void _rthread_init(void) Index: thread/rthread_libc.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_libc.c,v diff -u -p -r1.4 rthread_libc.c --- thread/rthread_libc.c 6 Jan 2021 19:54:17 -0000 1.4 +++ thread/rthread_libc.c 3 Jul 2024 10:58:24 -0000 @@ -303,18 +303,18 @@ _thread_atfork_unlock(void) /* * arc4random lock */ -static _atomic_lock_t arc4_lock = _SPINLOCK_UNLOCKED; +static struct __cmtx arc4_lock = __CMTX_INITIALIZER(); void _thread_arc4_lock(void) { - _spinlock(&arc4_lock); + __cmtx_enter(&arc4_lock); } void _thread_arc4_unlock(void) { - _spinunlock(&arc4_lock); + __cmtx_leave(&arc4_lock); } pid_t Index: thread/rthread_mutex.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_mutex.c,v diff -u -p -r1.5 rthread_mutex.c --- thread/rthread_mutex.c 13 Feb 2019 13:09:32 -0000 1.5 +++ thread/rthread_mutex.c 3 Jul 2024 10:58:24 -0000 @@ -36,14 +36,7 @@ enum { CONTENDED = 2, /* threads waiting for this mutex */ }; -#define SPIN_COUNT 128 -#if defined(__i386__) || defined(__amd64__) -#define SPIN_WAIT() asm volatile("pause": : : "memory") -#else -#define SPIN_WAIT() do { } while (0) -#endif - -static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; +static struct __cmtx static_init_lock = __CMTX_INITIALIZER(); int pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr) @@ -151,10 +144,10 @@ _rthread_mutex_timedlock(pthread_mutex_t * is NULL. */ if (*mutexp == NULL) { - _spinlock(&static_init_lock); + __cmtx_enter(&static_init_lock); if (*mutexp == NULL) error = pthread_mutex_init(mutexp, NULL); - _spinunlock(&static_init_lock); + __cmtx_leave(&static_init_lock); if (error != 0) return (EINVAL); } Index: thread/rthread_sync.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_sync.c,v diff -u -p -r1.6 rthread_sync.c --- thread/rthread_sync.c 10 Jan 2024 04:28:43 -0000 1.6 +++ thread/rthread_sync.c 3 Jul 2024 10:58:24 -0000 @@ -30,7 +30,7 @@ #include "rthread.h" #include "cancel.h" /* in libc/include */ -static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; +static struct __cmtx static_init_lock = __CMTX_INITIALIZER(); /* * mutexen @@ -96,10 +96,10 @@ _rthread_mutex_lock(pthread_mutex_t *mut * is NULL. */ if (*mutexp == NULL) { - _spinlock(&static_init_lock); + __cmtx_enter(&static_init_lock); if (*mutexp == NULL) ret = pthread_mutex_init(mutexp, NULL); - _spinunlock(&static_init_lock); + __cmtx_leave(&static_init_lock); if (ret != 0) return (EINVAL); }