Index: libc/include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.37 thread_private.h --- libc/include/thread_private.h 18 Aug 2024 02:25:51 -0000 1.37 +++ libc/include/thread_private.h 26 Sep 2024 00:38:01 -0000 @@ -406,6 +406,14 @@ struct pthread { /* * Internal functions exported from libc's thread bits for use by libpthread */ + +#define SPIN_COUNT 128 +#if defined(__i386__) || defined(__amd64__) +#define SPIN_WAIT() asm volatile("pause": : : "memory") +#else +#define SPIN_WAIT() do { } while (0) +#endif + void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); Index: libc/thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- libc/thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ libc/thread/rthread.c 26 Sep 2024 00:38:01 -0000 @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -34,6 +35,7 @@ int _rthread_debug_level; static int _threads_inited; +static int _threads_ncpus; struct pthread _initial_thread = { .flags_lock = _SPINLOCK_UNLOCKED, @@ -43,11 +45,50 @@ struct pthread _initial_thread = { /* * internal support functions */ + + /* + * Wait for the spinlock to become unlocked. + * + * On uniprocessor systems it is pointless to spin waiting for + * another thread to release the lock because this thread occupies + * the only CPU, preventing the thread holding the lock from running + * and leaving the critical section. + * + * On multiprocessor systems we spin, but not forever in case there + * are more threads than CPUs still, and more progress might be made + * if we can get the other thread to run. + */ +static inline void +_spinlock_wait(volatile _atomic_lock_t *lock) +{ + do { + if (_threads_ncpus > 1) { + unsigned int spin; + + for (spin = 0; spin < SPIN_COUNT; spin++) { + SPIN_WAIT(); + if (*lock == _ATOMIC_LOCK_UNLOCKED) + return; + } + } + + sched_yield(); + } while (*lock != _ATOMIC_LOCK_UNLOCKED); +} + void _spinlock(volatile _atomic_lock_t *lock) { + if (_threads_ncpus == 0) { + static const int mib[] = { CTL_HW, HW_NCPU }; + size_t ncpuslen = sizeof(_threads_ncpus); + + if (sysctl(mib, 2, &_threads_ncpus, &ncpuslen, NULL, 0) == -1) + _threads_ncpus = 1; + } + while (_atomic_lock(lock)) - sched_yield(); + _spinlock_wait(lock); membar_enter_after_atomic(); } DEF_STRONG(_spinlock); Index: libc/thread/rthread_mutex.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_mutex.c,v diff -u -p -r1.6 rthread_mutex.c --- libc/thread/rthread_mutex.c 20 Sep 2024 02:00:46 -0000 1.6 +++ libc/thread/rthread_mutex.c 26 Sep 2024 00:38:01 -0000 @@ -36,13 +36,6 @@ enum { CONTENDED = 2, /* threads waiting for this mutex */ }; -#define SPIN_COUNT 128 -#if defined(__i386__) || defined(__amd64__) -#define SPIN_WAIT() asm volatile("pause": : : "memory") -#else -#define SPIN_WAIT() do { } while (0) -#endif - static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; int Index: librthread/rthread_rwlock.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread_rwlock.c,v diff -u -p -r1.13 rthread_rwlock.c --- librthread/rthread_rwlock.c 3 Mar 2019 18:39:10 -0000 1.13 +++ librthread/rthread_rwlock.c 26 Sep 2024 00:38:03 -0000 @@ -31,13 +31,6 @@ #define WAITING 0x80000000 #define COUNT(v) ((v) & WRITER) -#define SPIN_COUNT 128 -#if defined(__i386__) || defined(__amd64__) -#define SPIN_WAIT() asm volatile("pause": : : "memory") -#else -#define SPIN_WAIT() do { } while (0) -#endif - static _atomic_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED; int