Index: include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.37 thread_private.h --- include/thread_private.h 18 Aug 2024 02:25:51 -0000 1.37 +++ include/thread_private.h 10 Jul 2025 02:35:10 -0000 @@ -292,6 +292,14 @@ TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX +/* + * CAS based implementations + */ + +struct __cmtx { + volatile unsigned int lock; +}; + struct pthread_mutex { volatile unsigned int lock; int type; @@ -312,6 +320,15 @@ struct pthread_rwlock { #else +/* + * spinlock based implementations + */ + +struct __cmtx { + _atomic_lock_t spin; + volatile unsigned int lock; +}; + struct pthread_mutex { _atomic_lock_t lock; struct pthread_queue lockers; @@ -336,6 +353,12 @@ struct pthread_rwlock { }; #endif /* FUTEX */ +struct __rcmtx { + volatile pthread_t owner; + struct __cmtx mtx; + unsigned int depth; +}; + struct pthread_mutex_attr { int ma_type; int ma_protocol; @@ -409,6 +432,16 @@ struct pthread { void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); + +void __cmtx_init(struct __cmtx *); +int __cmtx_enter_try(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +void __rcmtx_init(struct __rcmtx *); +int __rcmtx_enter_try(struct __rcmtx *); +void __rcmtx_enter(struct __rcmtx *); +void __rcmtx_leave(struct __rcmtx *); void _rthread_debug(int, const char *, ...) __attribute__((__format__ (printf, 2, 3))); Index: stdio/fclose.c =================================================================== RCS file: /cvs/src/lib/libc/stdio/fclose.c,v diff -u -p -r1.13 fclose.c --- stdio/fclose.c 3 Jun 2025 14:15:53 -0000 1.13 +++ stdio/fclose.c 10 Jul 2025 02:35:10 -0000 @@ -57,8 +57,8 @@ fclose(FILE *fp) if (HASLB(fp)) FREELB(fp); fp->_r = fp->_w = 0; /* Mess up if reaccessed. */ - fp->_flags = 0; /* Release this FILE for reuse. */ FUNLOCKFILE(fp); + fp->_flags = 0; /* Release this FILE for reuse. */ return (r); } DEF_STRONG(fclose); Index: stdio/fileext.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/fileext.h,v diff -u -p -r1.2 fileext.h --- stdio/fileext.h 17 Jun 2005 20:40:32 -0000 1.2 +++ stdio/fileext.h 10 Jul 2025 02:35:10 -0000 @@ -35,6 +35,7 @@ struct __sfileext { struct __sbuf _ub; /* ungetc buffer */ struct wchar_io_data _wcio; /* wide char io status */ + struct __rcmtx _lock; /* f{lock,trylock,unlock}file */ }; #define _EXT(fp) ((struct __sfileext *)((fp)->_ext._base)) @@ -45,6 +46,7 @@ do { \ _UB(fp)._base = NULL; \ _UB(fp)._size = 0; \ WCIO_INIT(fp); \ + __rcmtx_init(&_EXT(fp)->_lock); \ } while (0) #define _FILEEXT_SETUP(f, fext) \ Index: stdio/flockfile.c =================================================================== RCS file: /cvs/src/lib/libc/stdio/flockfile.c,v diff -u -p -r1.9 flockfile.c --- stdio/flockfile.c 7 May 2016 19:05:22 -0000 1.9 +++ stdio/flockfile.c 10 Jul 2025 02:35:10 -0000 @@ -3,19 +3,29 @@ #include #include "local.h" +static inline struct __rcmtx * +frcmtx(FILE *fp) +{ + return &_EXT(fp)->_lock; +} + void flockfile(FILE *fp) { - FLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_enter(rcm); + } } DEF_WEAK(flockfile); - int ftrylockfile(FILE *fp) { - if (_thread_cb.tc_ftrylockfile != NULL) - return (_thread_cb.tc_ftrylockfile(fp)); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + return __rcmtx_enter_try(rcm); + } return 0; } @@ -24,6 +34,9 @@ DEF_WEAK(ftrylockfile); void funlockfile(FILE *fp) { - FUNLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_leave(rcm); + } } DEF_WEAK(funlockfile); Index: stdio/local.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/local.h,v diff -u -p -r1.25 local.h --- stdio/local.h 23 May 2016 00:21:48 -0000 1.25 +++ stdio/local.h 10 Jul 2025 02:35:10 -0000 @@ -39,8 +39,8 @@ #include #include "wcio.h" -#include "fileext.h" #include "thread_private.h" +#include "fileext.h" __BEGIN_HIDDEN_DECLS void _cleanup(void); @@ -95,13 +95,5 @@ __END_HIDDEN_DECLS (fp)->_lb._base = NULL; \ } -#define FLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_flockfile != NULL) \ - _thread_cb.tc_flockfile(fp); \ - } while (0) -#define FUNLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_funlockfile != NULL) \ - _thread_cb.tc_funlockfile(fp); \ - } while (0) +#define FLOCKFILE(fp) flockfile(fp) +#define FUNLOCKFILE(fp) funlockfile(fp) Index: thread/Makefile.inc =================================================================== RCS file: /cvs/src/lib/libc/thread/Makefile.inc,v diff -u -p -r1.19 Makefile.inc --- thread/Makefile.inc 6 Feb 2020 03:13:45 -0000 1.19 +++ thread/Makefile.inc 10 Jul 2025 02:35:10 -0000 @@ -8,7 +8,6 @@ SRCS+= callbacks.c atfork.c SRCS+= rthread.c \ rthread_condattr.c \ rthread_debug.c \ - rthread_file.c \ rthread_libc.c \ rthread_once.c \ rthread_tls.c \ Index: thread/callbacks.c =================================================================== RCS file: /cvs/src/lib/libc/thread/callbacks.c,v diff -u -p -r1.4 callbacks.c --- thread/callbacks.c 5 Sep 2017 02:40:54 -0000 1.4 +++ thread/callbacks.c 10 Jul 2025 02:35:10 -0000 @@ -51,9 +51,6 @@ _thread_set_callbacks(const struct threa * here when we actually need to prep for doing MT. */ _thread_cb.tc_canceled = _thread_canceled; - _thread_cb.tc_flockfile = _thread_flockfile; - _thread_cb.tc_ftrylockfile = _thread_ftrylockfile; - _thread_cb.tc_funlockfile = _thread_funlockfile; _thread_cb.tc_malloc_lock = _thread_malloc_lock; _thread_cb.tc_malloc_unlock = _thread_malloc_unlock; _thread_cb.tc_atexit_lock = _thread_atexit_lock; Index: thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ thread/rthread.c 10 Jul 2025 02:35:10 -0000 @@ -20,12 +20,14 @@ */ #include +#include #include #include #include #include #include +#include #include "rthread.h" @@ -69,6 +71,225 @@ _spinunlock(volatile _atomic_lock_t *loc *lock = _ATOMIC_LOCK_UNLOCKED; } DEF_STRONG(_spinunlock); + +#define CMTX_UNLOCKED 0 +#define CMTX_LOCKED 1 +#define CMTX_CONTENDED 2 + +#ifdef FUTEX + +/* + * CAS+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->lock = CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + if (atomic_cas_uint(&cmtx->lock, + CMTX_UNLOCKED, CMTX_LOCKED) == CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return (1); + } + + return (0); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + + locked = atomic_cas_uint(&cmtx->lock, + CMTX_UNLOCKED, CMTX_LOCKED); + if (locked == CMTX_UNLOCKED) { + membar_enter_after_atomic(); + return; + } + + /* add adaptive spin here */ + + do { + switch (locked) { + case CMTX_LOCKED: + locked = atomic_cas_uint(&cmtx->lock, + CMTX_LOCKED, CMTX_CONTENDED); + if (locked == CMTX_UNLOCKED) + break; + + /* lock is LOCKED -> CONTENDED or was CONTENDED */ + /* FALLTHROUGH */ + case CMTX_CONTENDED: + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + CMTX_CONTENDED, NULL, NULL); + break; + } + + locked = atomic_cas_uint(&cmtx->lock, + CMTX_UNLOCKED, CMTX_CONTENDED); + } while (locked != CMTX_UNLOCKED); + + membar_enter_after_atomic(); +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + membar_exit_before_atomic(); + locked = atomic_cas_uint(&cmtx->lock, + CMTX_LOCKED, CMTX_UNLOCKED); + if (locked != CMTX_LOCKED) { + assert(locked != CMTX_UNLOCKED); + cmtx->lock = CMTX_UNLOCKED; + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#else /* FUTEX */ + +/* + * spinlock+futex locks + */ + +void +__cmtx_init(struct __cmtx *cmtx) +{ + cmtx->spin = _SPINLOCK_UNLOCKED; + cmtx->lock = CMTX_UNLOCKED; +} + +int +__cmtx_enter_try(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + if (locked == CMTX_UNLOCKED) + cmtx->lock = CMTX_LOCKED; + _spinunlock(&cmtx->spin); + + /* spinlocks provide enough membars */ + + return (locked == CMTX_UNLOCKED); +} + +void +__cmtx_enter(struct __cmtx *cmtx) +{ + unsigned int locked; + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case CMTX_UNLOCKED: + cmtx->lock = CMTX_LOCKED; + break; + case CMTX_LOCKED: + cmtx->lock = CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + + while (locked != CMTX_UNLOCKED) { + futex(&cmtx->lock, FUTEX_WAIT_PRIVATE, + CMTX_CONTENDED, NULL, NULL); + + _spinlock(&cmtx->spin); + locked = cmtx->lock; + switch (locked) { + case CMTX_UNLOCKED: + case CMTX_LOCKED: + cmtx->lock = CMTX_CONTENDED; + break; + } + _spinunlock(&cmtx->spin); + } + + /* spinlocks provide enough membars */ +} + +void +__cmtx_leave(struct __cmtx *cmtx) +{ + unsigned int locked; + + /* spinlocks provide enough membars */ + + _spinlock(&cmtx->mtx_spin); + locked = cmtx->lock; + cmtx->lock = CMTX_UNLOCKED; + _spinunlock(&cmtx->mtx_spin); + + if (locked != CMTX_LOCKED) { + assert(locked != CMTX_UNLOCKED); + futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); + } +} + +#endif /* FUTEX */ + +/* + * recursive mutex + */ + +void +__rcmtx_init(struct __rcmtx *rcmtx) +{ + __cmtx_init(&rcmtx->mtx); + rcmtx->owner = NULL; + rcmtx->depth = 0; +} + +int +__rcmtx_enter_try(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + if (__cmtx_enter_try(&rcmtx->mtx) == 0) + return (0); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; + + return (1); +} + +void +__rcmtx_enter(struct __rcmtx *rcmtx) +{ + pthread_t self = pthread_self(); + + if (rcmtx->owner != self) { + __cmtx_enter(&rcmtx->mtx); + assert(rcmtx->owner == NULL); + rcmtx->owner = self; + assert(rcmtx->depth == 0); + } + + rcmtx->depth++; +} + +void +__rcmtx_leave(struct __rcmtx *rcmtx) +{ + assert(rcmtx->owner == pthread_self()); + if (--rcmtx->depth == 0) { + rcmtx->owner = NULL; + __cmtx_leave(&rcmtx->mtx); + } +} static void _rthread_init(void)