Index: shlib_version =================================================================== RCS file: /cvs/src/lib/libc/shlib_version,v diff -u -p -r1.222 shlib_version --- shlib_version 12 Aug 2024 20:56:55 -0000 1.222 +++ shlib_version 12 Jul 2025 06:22:33 -0000 @@ -1,4 +1,4 @@ major=100 -minor=3 +minor=4 # note: If changes were made to include/thread_private.h or if system calls # were added/changed then librthread/shlib_version must also be updated. Index: include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.37 thread_private.h --- include/thread_private.h 18 Aug 2024 02:25:51 -0000 1.37 +++ include/thread_private.h 12 Jul 2025 06:22:34 -0000 @@ -292,6 +292,12 @@ TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX +/* + * CAS based implementations + */ + +#define __CMTX_CAS + struct pthread_mutex { volatile unsigned int lock; int type; @@ -312,6 +318,10 @@ struct pthread_rwlock { #else +/* + * spinlock based implementations + */ + struct pthread_mutex { _atomic_lock_t lock; struct pthread_queue lockers; @@ -336,6 +346,40 @@ struct pthread_rwlock { }; #endif /* FUTEX */ +/* libc mutex */ + +#define __CMTX_UNLOCKED 0 +#define __CMTX_LOCKED 1 +#define __CMTX_CONTENDED 2 + +#ifdef __CMTX_CAS +struct __cmtx { + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .lock = __CMTX_UNLOCKED, \ +} +#else /* __CMTX_CAS */ +struct __cmtx { + _atomic_lock_t spin; + volatile unsigned int lock; +}; + +#define __CMTX_INITIALIZER() { \ + .spin = _SPINLOCK_UNLOCKED, \ + .lock = __CMTX_UNLOCKED, \ +} +#endif /* __CMTX_CAS */ + +/* libc recursive mutex */ + +struct __rcmtx { + volatile pthread_t owner; + struct __cmtx mtx; + unsigned int depth; +}; + struct pthread_mutex_attr { int ma_type; int ma_protocol; @@ -409,6 +453,16 @@ struct pthread { void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); + +void __cmtx_init(struct __cmtx *); +int __cmtx_enter_try(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +void __rcmtx_init(struct __rcmtx *); +int __rcmtx_enter_try(struct __rcmtx *); +void __rcmtx_enter(struct __rcmtx *); +void __rcmtx_leave(struct __rcmtx *); void _rthread_debug(int, const char *, ...) __attribute__((__format__ (printf, 2, 3))); Index: stdio/fclose.c =================================================================== RCS file: /cvs/src/lib/libc/stdio/fclose.c,v diff -u -p -r1.13 fclose.c --- stdio/fclose.c 3 Jun 2025 14:15:53 -0000 1.13 +++ stdio/fclose.c 12 Jul 2025 06:22:34 -0000 @@ -57,8 +57,8 @@ fclose(FILE *fp) if (HASLB(fp)) FREELB(fp); fp->_r = fp->_w = 0; /* Mess up if reaccessed. */ - fp->_flags = 0; /* Release this FILE for reuse. */ FUNLOCKFILE(fp); + fp->_flags = 0; /* Release this FILE for reuse. */ return (r); } DEF_STRONG(fclose); Index: stdio/fileext.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/fileext.h,v diff -u -p -r1.2 fileext.h --- stdio/fileext.h 17 Jun 2005 20:40:32 -0000 1.2 +++ stdio/fileext.h 12 Jul 2025 06:22:34 -0000 @@ -35,6 +35,7 @@ struct __sfileext { struct __sbuf _ub; /* ungetc buffer */ struct wchar_io_data _wcio; /* wide char io status */ + struct __rcmtx _lock; /* f{lock,trylock,unlock}file */ }; #define _EXT(fp) ((struct __sfileext *)((fp)->_ext._base)) @@ -45,6 +46,7 @@ do { \ _UB(fp)._base = NULL; \ _UB(fp)._size = 0; \ WCIO_INIT(fp); \ + __rcmtx_init(&_EXT(fp)->_lock); \ } while (0) #define _FILEEXT_SETUP(f, fext) \ Index: stdio/flockfile.c =================================================================== RCS file: /cvs/src/lib/libc/stdio/flockfile.c,v diff -u -p -r1.9 flockfile.c --- stdio/flockfile.c 7 May 2016 19:05:22 -0000 1.9 +++ stdio/flockfile.c 12 Jul 2025 06:22:34 -0000 @@ -3,19 +3,29 @@ #include #include "local.h" +static inline struct __rcmtx * +frcmtx(FILE *fp) +{ + return &_EXT(fp)->_lock; +} + void flockfile(FILE *fp) { - FLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_enter(rcm); + } } DEF_WEAK(flockfile); - int ftrylockfile(FILE *fp) { - if (_thread_cb.tc_ftrylockfile != NULL) - return (_thread_cb.tc_ftrylockfile(fp)); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + return __rcmtx_enter_try(rcm); + } return 0; } @@ -24,6 +34,9 @@ DEF_WEAK(ftrylockfile); void funlockfile(FILE *fp) { - FUNLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_leave(rcm); + } } DEF_WEAK(funlockfile); Index: stdio/local.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/local.h,v diff -u -p -r1.25 local.h --- stdio/local.h 23 May 2016 00:21:48 -0000 1.25 +++ stdio/local.h 12 Jul 2025 06:22:34 -0000 @@ -39,8 +39,8 @@ #include #include "wcio.h" -#include "fileext.h" #include "thread_private.h" +#include "fileext.h" __BEGIN_HIDDEN_DECLS void _cleanup(void); @@ -95,13 +95,5 @@ __END_HIDDEN_DECLS (fp)->_lb._base = NULL; \ } -#define FLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_flockfile != NULL) \ - _thread_cb.tc_flockfile(fp); \ - } while (0) -#define FUNLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_funlockfile != NULL) \ - _thread_cb.tc_funlockfile(fp); \ - } while (0) +#define FLOCKFILE(fp) flockfile(fp) +#define FUNLOCKFILE(fp) funlockfile(fp) Index: thread/Makefile.inc =================================================================== RCS file: /cvs/src/lib/libc/thread/Makefile.inc,v diff -u -p -r1.19 Makefile.inc --- thread/Makefile.inc 6 Feb 2020 03:13:45 -0000 1.19 +++ thread/Makefile.inc 12 Jul 2025 06:22:34 -0000 @@ -8,7 +8,6 @@ SRCS+= callbacks.c atfork.c SRCS+= rthread.c \ rthread_condattr.c \ rthread_debug.c \ - rthread_file.c \ rthread_libc.c \ rthread_once.c \ rthread_tls.c \ Index: thread/callbacks.c =================================================================== RCS file: /cvs/src/lib/libc/thread/callbacks.c,v diff -u -p -r1.4 callbacks.c --- thread/callbacks.c 5 Sep 2017 02:40:54 -0000 1.4 +++ thread/callbacks.c 12 Jul 2025 06:22:34 -0000 @@ -51,9 +51,6 @@ _thread_set_callbacks(const struct threa * here when we actually need to prep for doing MT. */ _thread_cb.tc_canceled = _thread_canceled; - _thread_cb.tc_flockfile = _thread_flockfile; - _thread_cb.tc_ftrylockfile = _thread_ftrylockfile; - _thread_cb.tc_funlockfile = _thread_funlockfile; _thread_cb.tc_malloc_lock = _thread_malloc_lock; _thread_cb.tc_malloc_unlock = _thread_malloc_unlock; _thread_cb.tc_atexit_lock = _thread_atexit_lock; Index: thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ thread/rthread.c 12 Jul 2025 06:22:34 -0000 @@ -20,12 +20,14 @@ */ #include +#include #include #include #include #include #include +#include #include "rthread.h" @@ -69,6 +71,221 @@ _spinunlock(volatile _atomic_lock_t *loc *lock = _ATOMIC_LOCK_UNLOCKED; } DEF_STRONG(_spinunlock); + +#ifdef __CMTX_CAS + +/* + * CAS+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->lock = __CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + if (atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_LOCKED) == __CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return (1); + } + + return (0); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_LOCKED); + if (locked == __CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return; + } + + /* add adaptive spin here */ + + do { + switch (locked) { + case __CMTX_LOCKED: + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_LOCKED, __CMTX_CONTENDED); + if (locked == __CMTX_UNLOCKED) + break; + + /* lock is LOCKED -> CONTENDED or was CONTENDED */ + /* FALLTHROUGH */ + case __CMTX_CONTENDED: + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + __CMTX_CONTENDED, NULL, NULL); + break; + } + + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_UNLOCKED, __CMTX_CONTENDED); + } while (locked != __CMTX_UNLOCKED); + + membar_enter_after_atomic(); +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + membar_exit_before_atomic(); + locked = atomic_cas_uint(&cmtx->lock, + __CMTX_LOCKED, __CMTX_UNLOCKED); + if (locked != __CMTX_LOCKED) { + assert(locked != __CMTX_UNLOCKED); + cmtx->lock = __CMTX_UNLOCKED; + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#else /* __CMTX_CAS */ + +/* + * spinlock+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->spin = _SPINLOCK_UNLOCKED; + cmtx->lock = __CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + if (locked == __CMTX_UNLOCKED) + cmtx->lock = __CMTX_LOCKED; + _spinunlock(&cmtx->spin); + + /* spinlocks provide enough membars */ + + return (locked == __CMTX_UNLOCKED); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case __CMTX_UNLOCKED: + cmtx->lock = __CMTX_LOCKED; + break; + case __CMTX_LOCKED: + cmtx->lock = __CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + + while (locked != __CMTX_UNLOCKED) { + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + __CMTX_CONTENDED, NULL, NULL); + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case __CMTX_UNLOCKED: + case __CMTX_LOCKED: + cmtx->lock = __CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + } + + /* spinlocks provide enough membars */ +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + /* spinlocks provide enough membars */ + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + cmtx->lock = __CMTX_UNLOCKED; + _spinunlock(&cmtx->spin); + + if (locked != __CMTX_LOCKED) { + assert(locked != __CMTX_UNLOCKED); + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#endif /* __CMTX_CAS */ + +/* + * recursive mutex + */ + +void +__rcmtx_init(struct __rcmtx *rcmtx) +{ + __cmtx_init(&rcmtx->mtx); + rcmtx->owner = NULL; + rcmtx->depth = 0; +} + +int +__rcmtx_enter_try(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + if (__cmtx_enter_try(&rcmtx->mtx) == 0) + return (0); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; + + return (1); +} + +void +__rcmtx_enter(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + __cmtx_enter(&rcmtx->mtx); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; +} + +void +__rcmtx_leave(struct __rcmtx *rcmtx) +{ + assert(rcmtx->owner == pthread_self()); + if (--rcmtx->depth == 0) { + rcmtx->owner = NULL; + __cmtx_leave(&rcmtx->mtx); + } +} static void _rthread_init(void) Index: thread/rthread_libc.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_libc.c,v diff -u -p -r1.4 rthread_libc.c --- thread/rthread_libc.c 6 Jan 2021 19:54:17 -0000 1.4 +++ thread/rthread_libc.c 12 Jul 2025 06:22:34 -0000 @@ -152,24 +152,9 @@ _thread_mutex_destroy(void **mutex) /* * the malloc lock */ -#ifndef FUTEX -#define MALLOC_LOCK_INITIALIZER(n) { \ - _SPINLOCK_UNLOCKED, \ - TAILQ_HEAD_INITIALIZER(malloc_lock[n].lockers), \ - PTHREAD_MUTEX_DEFAULT, \ - NULL, \ - 0, \ - -1 } -#else -#define MALLOC_LOCK_INITIALIZER(n) { \ - _SPINLOCK_UNLOCKED, \ - PTHREAD_MUTEX_DEFAULT, \ - NULL, \ - 0, \ - -1 } -#endif +#define MALLOC_LOCK_INITIALIZER(n) __CMTX_INITIALIZER() -static struct pthread_mutex malloc_lock[_MALLOC_MUTEXES] = { +static struct __cmtx malloc_lock[_MALLOC_MUTEXES] = { MALLOC_LOCK_INITIALIZER(0), MALLOC_LOCK_INITIALIZER(1), MALLOC_LOCK_INITIALIZER(2), @@ -204,51 +189,16 @@ static struct pthread_mutex malloc_lock[ MALLOC_LOCK_INITIALIZER(31) }; -static pthread_mutex_t malloc_mutex[_MALLOC_MUTEXES] = { - &malloc_lock[0], - &malloc_lock[1], - &malloc_lock[2], - &malloc_lock[3], - &malloc_lock[4], - &malloc_lock[5], - &malloc_lock[6], - &malloc_lock[7], - &malloc_lock[8], - &malloc_lock[9], - &malloc_lock[10], - &malloc_lock[11], - &malloc_lock[12], - &malloc_lock[13], - &malloc_lock[14], - &malloc_lock[15], - &malloc_lock[16], - &malloc_lock[17], - &malloc_lock[18], - &malloc_lock[19], - &malloc_lock[20], - &malloc_lock[21], - &malloc_lock[22], - &malloc_lock[23], - &malloc_lock[24], - &malloc_lock[25], - &malloc_lock[26], - &malloc_lock[27], - &malloc_lock[28], - &malloc_lock[29], - &malloc_lock[30], - &malloc_lock[31] -}; - void _thread_malloc_lock(int i) { - pthread_mutex_lock(&malloc_mutex[i]); + __cmtx_enter(&malloc_lock[i]); } void _thread_malloc_unlock(int i) { - pthread_mutex_unlock(&malloc_mutex[i]); + __cmtx_leave(&malloc_lock[i]); } static void @@ -256,14 +206,8 @@ _thread_malloc_reinit(void) { int i; - for (i = 0; i < _MALLOC_MUTEXES; i++) { - malloc_lock[i].lock = _SPINLOCK_UNLOCKED; -#ifndef FUTEX - TAILQ_INIT(&malloc_lock[i].lockers); -#endif - malloc_lock[i].owner = NULL; - malloc_lock[i].count = 0; - } + for (i = 0; i < _MALLOC_MUTEXES; i++) + __cmtx_init(&malloc_lock[i]); } /* @@ -303,18 +247,18 @@ _thread_atfork_unlock(void) /* * arc4random lock */ -static _atomic_lock_t arc4_lock = _SPINLOCK_UNLOCKED; +static struct __cmtx arc4_lock = __CMTX_INITIALIZER(); void _thread_arc4_lock(void) { - _spinlock(&arc4_lock); + __cmtx_enter(&arc4_lock); } void _thread_arc4_unlock(void) { - _spinunlock(&arc4_lock); + __cmtx_leave(&arc4_lock); } pid_t