Index: include/thread_private.h =================================================================== RCS file: /cvs/src/lib/libc/include/thread_private.h,v diff -u -p -r1.37 thread_private.h --- include/thread_private.h 18 Aug 2024 02:25:51 -0000 1.37 +++ include/thread_private.h 16 May 2025 01:38:01 -0000 @@ -288,6 +288,21 @@ struct __sem { int shared; }; +struct __cmtx { + _atomic_lock_t spin; + uint32_t lock; + pthread_t owner; + unsigned int waiting; +}; + +struct __rcmtx { + _atomic_lock_t spin; + uint32_t lock; + pthread_t owner; + unsigned int depth; + unsigned int waiting; +}; + TAILQ_HEAD(pthread_queue, pthread); #ifdef FUTEX @@ -406,6 +421,14 @@ struct pthread { /* * Internal functions exported from libc's thread bits for use by libpthread */ + +#define SPIN_COUNT 128 +#if defined(__i386__) || defined(__amd64__) +#define SPIN_WAIT() asm volatile("pause": : : "memory") +#else +#define SPIN_WAIT() do { } while (0) +#endif + void _spinlock(volatile _atomic_lock_t *); int _spinlocktry(volatile _atomic_lock_t *); void _spinunlock(volatile _atomic_lock_t *); @@ -414,6 +437,34 @@ void _rthread_debug(int, const char *, . __attribute__((__format__ (printf, 2, 3))); pid_t _thread_dofork(pid_t (*_sys_fork)(void)); void _thread_finalize(void); + +/* + * simple^Wmutex for libc/libpthread to use internally + */ +void __cmtx_init(struct __cmtx *); +void __cmtx_enter(struct __cmtx *); +void __cmtx_leave(struct __cmtx *); + +#define __CMTX_INITIALIZER(_cm) { \ + .spin = _SPINLOCK_UNLOCKED, \ + .owner = NULL, \ + .waiting = 0, \ +} + +/* + * recursive mutex for libc/libpthread to use internally + */ +void __rcmtx_init(struct __rcmtx *); +int __rcmtx_enter_try(struct __rcmtx *); +void __rcmtx_enter(struct __rcmtx *); +void __rcmtx_leave(struct __rcmtx *); + +#define __RCMTX_INITIALIZER(_rcm) { \ + .spin = _SPINLOCK_UNLOCKED, \ + .owner = NULL, \ + .depth = 0, \ + .waiting = 0, \ +} /* * Threading syscalls not declared in system headers Index: stdio/fileext.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/fileext.h,v diff -u -p -r1.2 fileext.h --- stdio/fileext.h 17 Jun 2005 20:40:32 -0000 1.2 +++ stdio/fileext.h 16 May 2025 01:38:02 -0000 @@ -35,6 +35,7 @@ struct __sfileext { struct __sbuf _ub; /* ungetc buffer */ struct wchar_io_data _wcio; /* wide char io status */ + struct __rcmtx _lock; /* f{lock,trylock,unlock}file */ }; #define _EXT(fp) ((struct __sfileext *)((fp)->_ext._base)) @@ -45,6 +46,7 @@ do { \ _UB(fp)._base = NULL; \ _UB(fp)._size = 0; \ WCIO_INIT(fp); \ + __rcmtx_init(&_EXT(fp)->_lock); \ } while (0) #define _FILEEXT_SETUP(f, fext) \ Index: stdio/flockfile.c =================================================================== RCS file: /cvs/src/lib/libc/stdio/flockfile.c,v diff -u -p -r1.9 flockfile.c --- stdio/flockfile.c 7 May 2016 19:05:22 -0000 1.9 +++ stdio/flockfile.c 16 May 2025 01:38:02 -0000 @@ -3,19 +3,29 @@ #include #include "local.h" +static inline struct __rcmtx * +frcmtx(FILE *fp) +{ + return &_EXT(fp)->_lock; +} + void flockfile(FILE *fp) { - FLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_enter(rcm); + } } DEF_WEAK(flockfile); - int ftrylockfile(FILE *fp) { - if (_thread_cb.tc_ftrylockfile != NULL) - return (_thread_cb.tc_ftrylockfile(fp)); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + return __rcmtx_enter_try(rcm); + } return 0; } @@ -24,6 +34,9 @@ DEF_WEAK(ftrylockfile); void funlockfile(FILE *fp) { - FUNLOCKFILE(fp); + if (__isthreaded) { + struct __rcmtx *rcm = frcmtx(fp); + __rcmtx_leave(rcm); + } } DEF_WEAK(funlockfile); Index: stdio/local.h =================================================================== RCS file: /cvs/src/lib/libc/stdio/local.h,v diff -u -p -r1.25 local.h --- stdio/local.h 23 May 2016 00:21:48 -0000 1.25 +++ stdio/local.h 16 May 2025 01:38:02 -0000 @@ -39,8 +39,8 @@ #include #include "wcio.h" -#include "fileext.h" #include "thread_private.h" +#include "fileext.h" __BEGIN_HIDDEN_DECLS void _cleanup(void); @@ -95,13 +95,5 @@ __END_HIDDEN_DECLS (fp)->_lb._base = NULL; \ } -#define FLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_flockfile != NULL) \ - _thread_cb.tc_flockfile(fp); \ - } while (0) -#define FUNLOCKFILE(fp) \ - do { \ - if (_thread_cb.tc_funlockfile != NULL) \ - _thread_cb.tc_funlockfile(fp); \ - } while (0) +#define FLOCKFILE(fp) flockfile(fp) +#define FUNLOCKFILE(fp) funlockfile(fp) Index: thread/Makefile.inc =================================================================== RCS file: /cvs/src/lib/libc/thread/Makefile.inc,v diff -u -p -r1.19 Makefile.inc --- thread/Makefile.inc 6 Feb 2020 03:13:45 -0000 1.19 +++ thread/Makefile.inc 16 May 2025 01:38:02 -0000 @@ -8,7 +8,6 @@ SRCS+= callbacks.c atfork.c SRCS+= rthread.c \ rthread_condattr.c \ rthread_debug.c \ - rthread_file.c \ rthread_libc.c \ rthread_once.c \ rthread_tls.c \ Index: thread/callbacks.c =================================================================== RCS file: /cvs/src/lib/libc/thread/callbacks.c,v diff -u -p -r1.4 callbacks.c --- thread/callbacks.c 5 Sep 2017 02:40:54 -0000 1.4 +++ thread/callbacks.c 16 May 2025 01:38:02 -0000 @@ -51,9 +51,6 @@ _thread_set_callbacks(const struct threa * here when we actually need to prep for doing MT. */ _thread_cb.tc_canceled = _thread_canceled; - _thread_cb.tc_flockfile = _thread_flockfile; - _thread_cb.tc_ftrylockfile = _thread_ftrylockfile; - _thread_cb.tc_funlockfile = _thread_funlockfile; _thread_cb.tc_malloc_lock = _thread_malloc_lock; _thread_cb.tc_malloc_unlock = _thread_malloc_unlock; _thread_cb.tc_atexit_lock = _thread_atexit_lock; Index: thread/rthread.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread.c,v diff -u -p -r1.9 rthread.c --- thread/rthread.c 12 Oct 2020 22:06:51 -0000 1.9 +++ thread/rthread.c 16 May 2025 01:38:02 -0000 @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -32,6 +33,7 @@ #define RTHREAD_ENV_DEBUG "RTHREAD_DEBUG" int _rthread_debug_level; +unsigned int _ncpus = 0; /* XXX init */ static int _threads_inited; @@ -43,11 +45,43 @@ struct pthread _initial_thread = { /* * internal support functions */ + +/* + * Wait for the spinlock to become unlocked. + * + * On uniprocessor systems it is pointless to spin waiting for + * another thread to release the lock because this thread occupies + * the only CPU, preventing the thread holding the lock from running + * and leaving the critical section. + * + * On multiprocessor systems we spin, but not forever in case there + * are more threads than CPUs still, and more progress might be made + * if we can get the other thread to run. + */ + +static inline void +_spinlock_wait(volatile _atomic_lock_t *lock) +{ + do { + if (_ncpus != 1) { + unsigned int spin; + + for (spin = 0; spin < SPIN_COUNT; spin++) { + SPIN_WAIT(); + if (*lock == _ATOMIC_LOCK_UNLOCKED) + return; + } + } + + sched_yield(); + } while (*lock != _ATOMIC_LOCK_UNLOCKED); +} + void _spinlock(volatile _atomic_lock_t *lock) { while (_atomic_lock(lock)) - sched_yield(); + _spinlock_wait(lock); membar_enter_after_atomic(); } DEF_STRONG(_spinlock); @@ -69,6 +103,156 @@ _spinunlock(volatile _atomic_lock_t *loc *lock = _ATOMIC_LOCK_UNLOCKED; } DEF_STRONG(_spinunlock); + +/* + * libc internal mutex + * + */ + +void +__cmtx_init(struct __cmtx *cm) +{ + cm->spin = _SPINLOCK_UNLOCKED; + cm->lock = 0; + cm->owner = NULL; + cm->waiting = 0; +} + +void +__cmtx_enter(struct __cmtx *cm) +{ + pthread_t self = pthread_self(); + pthread_t owner; + + _spinlock(&cm->spin); + owner = cm->owner; + if (owner == NULL) { + cm->owner = self; + cm->lock = 1; + } else + cm->waiting++; + _spinunlock(&cm->spin); + + if (owner == NULL) { + /* the spinlock ops provided enough membars */ + return; + } + + do { + futex(&cm->lock, FUTEX_WAIT_PRIVATE, 1, NULL, NULL); + + _spinlock(&cm->spin); + owner = cm->owner; + if (owner == NULL) { + cm->waiting--; + cm->owner = self; + cm->lock = 1; + } + _spinunlock(&cm->spin); + } while (owner != NULL); + + /* the spinlock ops provided enough membars */ +} + +void +__cmtx_leave(struct __cmtx *cm) +{ + unsigned int waiting; + + _spinlock(&cm->spin); + waiting = cm->waiting; + cm->owner = NULL; + cm->lock = 0; + _spinunlock(&cm->spin); /* this provides membar_exit() */ + + if (waiting) + futex(&cm->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); +} + +void +__rcmtx_init(struct __rcmtx *rcm) +{ + rcm->spin = _SPINLOCK_UNLOCKED; + rcm->lock = 0; + rcm->owner = NULL; + rcm->depth = 0; + rcm->waiting = 0; +} + +int +__rcmtx_enter_try(struct __rcmtx *rcm) +{ + pthread_t self = pthread_self(); + pthread_t owner; + uint32_t locked; + + _spinlock(&rcm->spin); + owner = rcm->owner; + if (owner == NULL) { + rcm->owner = owner = self; + rcm->lock = 1; + } + _spinunlock(&rcm->spin); + + if (owner != self) + return (0); + + rcm->depth++; + + return (1); +} + +void +__rcmtx_enter(struct __rcmtx *rcm) +{ + pthread_t self = pthread_self(); + pthread_t owner; + uint32_t locked; + + _spinlock(&rcm->spin); + owner = rcm->owner; + if (owner == NULL) { + rcm->owner = owner = self; + rcm->lock = 1; + } else if (owner != self) + rcm->waiting++; + _spinunlock(&rcm->spin); + + while (owner != self) { + futex(&rcm->lock, FUTEX_WAIT_PRIVATE, 1, NULL, NULL); + + _spinlock(&rcm->spin); + owner = rcm->owner; + if (owner == NULL) { + rcm->owner = owner = self; + rcm->lock = 1; + rcm->waiting--; + } + _spinunlock(&rcm->spin); + } + + /* the spinlock ops provided enough membars */ + + rcm->depth++; +} + +void +__rcmtx_leave(struct __rcmtx *rcm) +{ + unsigned int waiting; + + if (--rcm->depth > 0) + return; + + _spinlock(&rcm->spin); + waiting = rcm->waiting; + rcm->lock = 0; + rcm->owner = NULL; + _spinunlock(&rcm->spin); /* this provides membar_exit() */ + + if (waiting) + futex(&rcm->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL); +} static void _rthread_init(void) Index: thread/rthread_file.c =================================================================== RCS file: thread/rthread_file.c diff -N thread/rthread_file.c --- thread/rthread_file.c 27 Dec 2022 17:10:06 -0000 1.3 +++ /dev/null 1 Jan 1970 00:00:00 -0000 @@ -1,305 +0,0 @@ -/* $OpenBSD: rthread_file.c,v 1.3 2022/12/27 17:10:06 jmc Exp $ */ -/* - * Copyright (c) 1995 John Birrell . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by John Birrell. - * 4. Neither the name of the author nor the names of any co-contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: uthread_file.c,v 1.9 1999/08/28 00:03:32 peter Exp $ - * - * POSIX stdio FILE locking functions. These assume that the locking - * is only required at FILE structure level, not at file descriptor - * level too. - * - */ - -#include -#include -#include -#include -#include - -#include "rthread.h" -#include "rthread_cb.h" - -/* - * The FILE lock structure. The FILE *fp is locked if the owner is - * not NULL. If not locked, the file lock structure can be - * reassigned to a different file by setting fp. - */ -struct file_lock { - LIST_ENTRY(file_lock) entry; /* Entry if file list. */ - FILE *fp; /* The target file. */ - struct pthread_queue lockers; - pthread_t owner; - int count; -}; - -/* - * The number of file lock lists into which the file pointer is - * hashed. Ideally, the FILE structure size would have been increased, - * but this causes incompatibility, so separate data structures are - * required. - */ -#define NUM_HEADS 128 - -/* - * This macro casts a file pointer to a long integer and right - * shifts this by the number of bytes in a pointer. The shifted - * value is then remaindered using the maximum number of hash - * entries to produce and index into the array of static lock - * structures. If there is a collision, a linear search of the - * dynamic list of locks linked to each static lock is performed. - */ -#define file_idx(_p) ((int)((((uintptr_t) _p) >> sizeof(void *)) % NUM_HEADS)) - -/* - * Global array of file locks. The first lock for each hash bucket is - * allocated statically in the hope that there won't be too many - * collisions that require a malloc and an element added to the list. - */ -static struct static_file_lock { - LIST_HEAD(file_list_head, file_lock) head; - struct file_lock fl; -} flh[NUM_HEADS]; - -/* Lock for accesses to the hash table: */ -static _atomic_lock_t hash_lock = _SPINLOCK_UNLOCKED; - -/* - * Find a lock structure for a FILE, return NULL if the file is - * not locked: - */ -static -struct file_lock * -find_lock(int idx, FILE *fp) -{ - struct file_lock *p; - - /* Check if the file is locked using the static structure: */ - if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL) - /* Return a pointer to the static lock: */ - p = &flh[idx].fl; - else { - /* Point to the first dynamic lock: */ - p = LIST_FIRST(&flh[idx].head); - - /* - * Loop through the dynamic locks looking for the - * target file: - */ - while (p != NULL && (p->fp != fp || p->owner == NULL)) - /* Not this file, try the next: */ - p = LIST_NEXT(p, entry); - } - return(p); -} - -/* - * Lock a file, assuming that there is no lock structure currently - * assigned to it. - */ -static -struct file_lock * -do_lock(int idx, FILE *fp) -{ - struct file_lock *p; - - /* Check if the static structure is not being used: */ - if (flh[idx].fl.owner == NULL) { - /* Return a pointer to the static lock: */ - p = &flh[idx].fl; - } - else { - /* Point to the first dynamic lock: */ - p = LIST_FIRST(&flh[idx].head); - - /* - * Loop through the dynamic locks looking for a - * lock structure that is not being used: - */ - while (p != NULL && p->owner != NULL) - /* This one is used, try the next: */ - p = LIST_NEXT(p, entry); - } - - /* - * If an existing lock structure has not been found, - * allocate memory for a new one: - */ - if (p == NULL && (p = (struct file_lock *) - malloc(sizeof(struct file_lock))) != NULL) { - /* Add the new element to the list: */ - LIST_INSERT_HEAD(&flh[idx].head, p, entry); - } - - /* Check if there is a lock structure to acquire: */ - if (p != NULL) { - /* Acquire the lock for the running thread: */ - p->fp = fp; - p->owner = pthread_self(); - p->count = 1; - TAILQ_INIT(&p->lockers); - } - return(p); -} - -void -_thread_flockfile(FILE * fp) -{ - int idx = file_idx(fp); - struct file_lock *p; - pthread_t self = pthread_self(); - - /* Lock the hash table: */ - _spinlock(&hash_lock); - - /* Get a pointer to any existing lock for the file: */ - if ((p = find_lock(idx, fp)) == NULL) { - /* - * The file is not locked, so this thread can - * grab the lock: - */ - do_lock(idx, fp); - - /* - * The file is already locked, so check if the - * running thread is the owner: - */ - } else if (p->owner == self) { - /* - * The running thread is already the - * owner, so increment the count of - * the number of times it has locked - * the file: - */ - p->count++; - } else { - /* - * The file is locked for another thread. - * Append this thread to the queue of - * threads waiting on the lock. - */ - TAILQ_INSERT_TAIL(&p->lockers,self,waiting); - while (p->owner != self) { - __thrsleep(self, 0, NULL, &hash_lock, NULL); - _spinlock(&hash_lock); - } - } - - /* Unlock the hash table: */ - _spinunlock(&hash_lock); -} - -int -_thread_ftrylockfile(FILE * fp) -{ - int ret = -1; - int idx = file_idx(fp); - struct file_lock *p; - - /* Lock the hash table: */ - _spinlock(&hash_lock); - - /* Get a pointer to any existing lock for the file: */ - if ((p = find_lock(idx, fp)) == NULL) { - /* - * The file is not locked, so this thread can - * grab the lock: - */ - p = do_lock(idx, fp); - - /* - * The file is already locked, so check if the - * running thread is the owner: - */ - } else if (p->owner == pthread_self()) { - /* - * The running thread is already the - * owner, so increment the count of - * the number of times it has locked - * the file: - */ - p->count++; - } else { - /* - * The file is locked for another thread, - * so this try fails. - */ - p = NULL; - } - - /* Unlock the hash table: */ - _spinunlock(&hash_lock); - - /* Check if the lock was obtained: */ - if (p != NULL) - /* Return success: */ - ret = 0; - - return (ret); -} - -void -_thread_funlockfile(FILE * fp) -{ - int idx = file_idx(fp); - struct file_lock *p; - - /* Lock the hash table: */ - _spinlock(&hash_lock); - - /* - * Get a pointer to the lock for the file and check that - * the running thread is the one with the lock: - */ - if ((p = find_lock(idx, fp)) != NULL && p->owner == pthread_self()) { - /* - * Check if this thread has locked the FILE - * more than once: - */ - if (--p->count == 0) { - /* Get the new owner of the lock: */ - if ((p->owner = TAILQ_FIRST(&p->lockers)) != NULL) { - /* Pop the thread off the queue: */ - TAILQ_REMOVE(&p->lockers,p->owner,waiting); - - /* - * This is the first lock for the new - * owner: - */ - p->count = 1; - - __thrwakeup(p->owner, 1); - } - } - } - - /* Unlock the hash table: */ - _spinunlock(&hash_lock); -} Index: thread/rthread_mutex.c =================================================================== RCS file: /cvs/src/lib/libc/thread/rthread_mutex.c,v diff -u -p -r1.6 rthread_mutex.c --- thread/rthread_mutex.c 20 Sep 2024 02:00:46 -0000 1.6 +++ thread/rthread_mutex.c 16 May 2025 01:38:02 -0000 @@ -36,13 +36,6 @@ enum { CONTENDED = 2, /* threads waiting for this mutex */ }; -#define SPIN_COUNT 128 -#if defined(__i386__) || defined(__amd64__) -#define SPIN_WAIT() asm volatile("pause": : : "memory") -#else -#define SPIN_WAIT() do { } while (0) -#endif - static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; int