Index: include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.37 thread_private.h --- include/thread_private.h 18 Aug 2024 02:25:51 -0000 1.37 +++ include/thread_private.h 12 Jul 2025 08:47:27 -0000 @@ -292,6 +292,12 @@ TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX +/* + * CAS based implementations + */ + +#define __NO_CMTX_CAS + struct pthread_mutex { volatile unsigned int lock; int type; @@ -312,6 +318,10 @@ struct pthread_rwlock { #else +/* + * spinlock based implementations + */ + struct pthread_mutex { _atomic_lock_t lock; struct pthread_queue lockers; @@ -336,6 +346,40 @@ struct pthread_rwlock { }; #endif /* FUTEX */ +/* libc mutex */ + +#define __CMTX_UNLOCKED 0 +#define __CMTX_LOCKED 1 +#define __CMTX_CONTENDED 2 + +#ifdef __CMTX_CAS +struct __cmtx { + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .lock = __CMTX_UNLOCKED, \ +} +#else /* __CMTX_CAS */ +struct __cmtx { + _atomic_lock_t spin; + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .spin = _SPINLOCK_UNLOCKED, \ + .lock = __CMTX_UNLOCKED, \ +} +#endif /* __CMTX_CAS */ + +/* libc recursive mutex */ + +struct __rcmtx { + volatile pthread_t owner; + struct __cmtx mtx; + unsigned int depth; +}; + struct pthread_mutex_attr { int ma_type; int ma_protocol; @@ -409,6 +453,16 @@ struct pthread { void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); + +void __cmtx_init(struct __cmtx *); +int __cmtx_enter_try(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +void __rcmtx_init(struct __rcmtx *); +int __rcmtx_enter_try(struct __rcmtx *); +void __rcmtx_enter(struct __rcmtx *); +void __rcmtx_leave(struct __rcmtx *); void _rthread_debug(int, const char *, ...) __attribute__((__format__ (printf, 2, 3))); Index: thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ thread/rthread.c 12 Jul 2025 08:47:27 -0000 @@ -20,12 +20,14 @@ */ #include +#include #include #include #include #include #include +#include #include "rthread.h" @@ -69,6 +71,222 @@ _spinunlock(volatile _atomic_lock_t *loc *lock = _ATOMIC_LOCK_UNLOCKED; } DEF_STRONG(_spinunlock); + +#ifdef __CMTX_CAS + +/* + * CAS+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->lock = __CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + if (atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_LOCKED) == __CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return (1); + } + + return (0); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + int spins; + + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_LOCKED); + if (locked == __CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return; + } + + /* add adaptive spin here */ + + do { + switch (locked) { + case __CMTX_LOCKED: + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_LOCKED, __CMTX_CONTENDED); + if (locked == __CMTX_UNLOCKED) + break; + + /* lock is LOCKED -> CONTENDED or was CONTENDED */ + /* FALLTHROUGH */ + case __CMTX_CONTENDED: + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + __CMTX_CONTENDED, NULL, NULL); + break; + } + + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_CONTENDED); + } while (locked != __CMTX_UNLOCKED); + + membar_enter_after_atomic(); +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + membar_exit_before_atomic(); + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_LOCKED, __CMTX_UNLOCKED); + if (locked != __CMTX_LOCKED) { + assert(locked != __CMTX_UNLOCKED); + cmtx->lock = __CMTX_UNLOCKED; + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#else /* __CMTX_CAS */ + +/* + * spinlock+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->spin = _SPINLOCK_UNLOCKED; + cmtx->lock = __CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + if (locked == __CMTX_UNLOCKED) + cmtx->lock = __CMTX_LOCKED; + _spinunlock(&cmtx->spin); + + /* spinlocks provide enough membars */ + + return (locked == __CMTX_UNLOCKED); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case __CMTX_UNLOCKED: + cmtx->lock = __CMTX_LOCKED; + break; + case __CMTX_LOCKED: + cmtx->lock = __CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + + while (locked != __CMTX_UNLOCKED) { + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + __CMTX_CONTENDED, NULL, NULL); + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case __CMTX_UNLOCKED: + case __CMTX_LOCKED: + cmtx->lock = __CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + } + + /* spinlocks provide enough membars */ +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + /* spinlocks provide enough membars */ + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + cmtx->lock = __CMTX_UNLOCKED; + _spinunlock(&cmtx->spin); + + if (locked != __CMTX_LOCKED) { + assert(locked != __CMTX_UNLOCKED); + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#endif /* __CMTX_CAS */ + +/* + * recursive mutex + */ + +void +__rcmtx_init(struct __rcmtx *rcmtx) +{ + __cmtx_init(&rcmtx->mtx); + rcmtx->owner = NULL; + rcmtx->depth = 0; +} + +int +__rcmtx_enter_try(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + if (__cmtx_enter_try(&rcmtx->mtx) == 0) + return (0); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; + + return (1); +} + +void +__rcmtx_enter(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + __cmtx_enter(&rcmtx->mtx); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; +} + +void +__rcmtx_leave(struct __rcmtx *rcmtx) +{ + assert(rcmtx->owner == pthread_self()); + if (--rcmtx->depth == 0) { + rcmtx->owner = NULL; + __cmtx_leave(&rcmtx->mtx); + } +} static void _rthread_init(void)