Index: sys/spinlock.h =================================================================== RCS file: sys/spinlock.h diff -N sys/spinlock.h --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/spinlock.h 3 May 2017 12:52:48 -0000 @@ -0,0 +1,75 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2017 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SYS_SPINLOCK_H_ +#define _SYS_SPINLOCK_H_ + +#include +#include + +#ifndef SPINLOCK_MD +struct spinlock { + volatile unsigned int lock; +}; +#endif + +#ifdef _KERNEL + +#define SPINLOCK_UNLOCKED 1 +#define SPINLOCK_LOCKED 0 + +#ifndef SPINLOCK_SPIN_HOOK +#define SPINLOCK_SPIN_HOOK (void)0 +#endif + +#ifndef SPINLOCK_INITIALIZER +#define SPINLOCK_INITIALIZER() { .lock = SPINLOCK_UNLOCKED } +#endif + +#ifndef spinlock_init +#define spinlock_init(_l) do { \ + (_l)->lock = SPINLOCK_UNLOCKED; \ +} while (/* CONSTCOND */ 0) +#endif + +#ifndef spinlock_enter_try +#define spinlock_enter_try(_l) \ + atomic_swap_uint(&(_l)->lock, SPINLOCK_LOCKED) +#endif + +#ifndef spinlock_spin +#define spinlock_spin(_l) ((_l)->lock == SPINLOCK_LOCKED) +#endif + +#ifndef spinlock_leave +#define spinlock_leave(_l) do { \ + (_l)->lock = SPINLOCK_UNLOCKED; \ +} while (/* CONSTCOND */ 0) +#endif + +#define spinlock_enter(_l) do { \ + while (!spinlock_enter_try(_l)) { \ + do { \ + SPINLOCK_SPIN_HOOK; \ + } while (spinlock_spin(_l)); \ + } \ +} while (/* CONSTCOND */ 0) + +#endif /* _KERNEL */ + +#endif /* _SYS_SPINLOCK_H_ */ Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.49 diff -u -p -r1.49 kern_lock.c --- kern/kern_lock.c 20 Apr 2017 15:06:47 -0000 1.49 +++ kern/kern_lock.c 3 May 2017 12:52:48 -0000 @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include #ifdef MP_LOCKDEBUG @@ -86,6 +88,115 @@ int _kernel_lock_held(void) { return (__mp_lock_held(&kernel_lock)); +} + +void +__mtx_init(struct mutex *mtx, int wantipl) +{ + mtx->mtx_owner = NULL; +#ifdef MULTIPROCESSOR + spinlock_init(&mtx->mtx_lock); +#endif + mtx->mtx_wantipl = __MUTEX_IPL(wantipl); + mtx->mtx_oldipl = IPL_NONE; + mtx->mtx_flags = 1; +} + +#ifdef MULTIPROCESSOR + +void +__mtx_enter(struct mutex *mtx) +{ + while (mtx_enter_try(mtx) == 0) { + do { + SPINLOCK_SPIN_HOOK; + } while (spinlock_spin(&mtx->mtx_lock)); + } +} + +int +__mtx_enter_try(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + int s; + + KASSERTMSG(mtx->mtx_flags, "mtx %p: flag not set", mtx); + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + + KASSERTMSG(mtx->mtx_owner != ci, "mtx %p: locking against myself", mtx); + + if (spinlock_enter_try(&mtx->mtx_lock)) { + mtx->mtx_owner = ci; + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif + membar_enter(); + + return (1); + } + + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + + return (0); +} + +#else /* MULTIPROCESSOR */ + +void +__mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + + KASSERTMSG(mtx->mtx_owner != ci, "mtx %p: locking against myself", mtx); + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +__mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif + +void +__mtx_leave(struct mutex *mtx) +{ + int ipl; + int s; + + MUTEX_ASSERT_LOCKED(mtx); + +#ifdef DIAGNOSTIC + curcpu()->ci_mutex_level--; +#endif + ipl = mtx->mtx_wantipl; + s = mtx->mtx_oldipl; + mtx->mtx_owner = NULL; +#ifdef MULTIPROCESSOR + { + /* avoid reload of mtx from argument after the membar */ + struct spinlock *lock = &mtx->mtx_lock; + membar_exit(); + spinlock_leave(lock); + } +#endif + + if (ipl != IPL_NONE) + splx(s); } #ifdef WITNESS Index: arch/hppa/include/mplock.h =================================================================== RCS file: /cvs/src/sys/arch/hppa/include/mplock.h,v retrieving revision 1.1 diff -u -p -r1.1 mplock.h --- arch/hppa/include/mplock.h 25 Mar 2010 14:26:21 -0000 1.1 +++ arch/hppa/include/mplock.h 3 May 2017 12:52:47 -0000 @@ -27,6 +27,10 @@ #ifndef _MACHINE_MPLOCK_H_ #define _MACHINE_MPLOCK_H_ +struct spinlock { + volatile unsigned int lock[4]; +}; + /* * Really simple spinlock implementation with recursive capabilities. * Correctness is paramount, no fancyness allowed. @@ -51,6 +55,62 @@ int __mp_release_all_but_one(struct __mp void __mp_acquire_count(struct __mp_lock *, int); int __mp_lock_held(struct __mp_lock *); -#endif +#define SPINLOCK_INITIALIZER() { \ + .lock = { \ + SPINLOCK_UNLOCKED, \ + SPINLOCK_UNLOCKED, \ + SPINLOCK_UNLOCKED, \ + SPINLOCK_UNLOCKED, \ + }, \ +} + +#define spinlock_init(_l) \ +do { \ + (_l)->lock[0] = SPINLOCK_UNLOCKED; \ + (_l)->lock[1] = SPINLOCK_UNLOCKED; \ + (_l)->lock[2] = SPINLOCK_UNLOCKED; \ + (_l)->lock[3] = SPINLOCK_UNLOCKED; \ +} while (/* CONSTCOND */ 0) + +/* Note: lock must be 16-byte aligned. */ +#define __spinlock_lock(_l) \ + ((volatile unsigned int *)(((vaddr_t)(_l)->lock + 0xf) & ~0xf)) + +static inline unsigned int +__spinlock_enter_try(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + unsigned int ret; + + __asm volatile ( + "ldcws 0(%2), %0" + : "=&r" (ret), "+m" (*lock) + : "r" (lock) + ); +} + +#define spinlock_enter_try(_l) __spinlock_entry_try(_l) + +static inline int +__spinlock_spin(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + + return (*lock == SPINLOCK_LOCKED); +} + +#define spinlock_spin(_l) __spinlock_spin(_l) + +static inline void +__spinlock_leave(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + + *lock = SPINLOCK_UNLOCKED; +} + +#define spinlock_leave(_l) __spinlock_leave(_l) + +#endif /* _KERNEL */ #endif /* !_MACHINE_MPLOCK_H */ Index: arch/sparc64/conf/files.sparc64 =================================================================== RCS file: /cvs/src/sys/arch/sparc64/conf/files.sparc64,v retrieving revision 1.148 diff -u -p -r1.148 files.sparc64 --- arch/sparc64/conf/files.sparc64 8 Jan 2016 15:54:13 -0000 1.148 +++ arch/sparc64/conf/files.sparc64 3 May 2017 12:52:48 -0000 @@ -330,7 +330,6 @@ file arch/sparc64/sparc64/lock_machdep.c file arch/sparc64/sparc64/machdep.c file arch/sparc64/sparc64/mdesc.c sun4v file arch/sparc64/sparc64/mem.c -file arch/sparc64/sparc64/mutex.S file arch/sparc64/sparc64/openprom.c file arch/sparc64/sparc64/openfirm.c file arch/sparc64/sparc64/ofw_machdep.c Index: arch/sparc64/include/mplock.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/mplock.h,v retrieving revision 1.3 diff -u -p -r1.3 mplock.h --- arch/sparc64/include/mplock.h 14 Mar 2014 01:20:44 -0000 1.3 +++ arch/sparc64/include/mplock.h 3 May 2017 12:52:48 -0000 @@ -48,6 +48,11 @@ int __mp_release_all_but_one(struct __mp void __mp_acquire_count(struct __mp_lock *, int); int __mp_lock_held(struct __mp_lock *); +void __mp_lock_spin_hook(void); +#define SPINLOCK_SPIN_HOOK __mp_lock_spin_hook() #endif + +#define spinlock_enter_try(_l) \ + atomic_cas_uint(&(_l)->lock, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED) #endif /* !_MACHINE_MPLOCK_H */ Index: arch/sparc64/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/mutex.h,v retrieving revision 1.5 diff -u -p -r1.5 mutex.h --- arch/sparc64/include/mutex.h 20 Apr 2017 13:57:30 -0000 1.5 +++ arch/sparc64/include/mutex.h 3 May 2017 12:52:48 -0000 @@ -28,17 +28,6 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -#include - -struct mutex { - volatile void *mtx_owner; /* mutex.S relies upon this being first */ - int mtx_wantipl; - int mtx_oldipl; -#ifdef WITNESS - struct lock_object mtx_lock_obj; -#endif -}; - /* * To prevent lock ordering problems with the kernel lock, we need to * make sure we block all interrupts that can grab the kernel lock. @@ -46,6 +35,7 @@ struct mutex { * raise the interrupt priority level to the highest level that has * interrupts that grab the kernel lock. */ + #ifdef MULTIPROCESSOR #define __MUTEX_IPL(ipl) \ (((ipl) > IPL_NONE && (ipl) < IPL_SERIAL) ? IPL_SERIAL : (ipl)) @@ -53,33 +43,4 @@ struct mutex { #define __MUTEX_IPL(ipl) (ipl) #endif -#ifdef WITNESS -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), 0, MTX_LO_INITIALIZER(name, flags) } -#else -#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \ - { NULL, __MUTEX_IPL((ipl)), 0 } -#endif - -void __mtx_init(struct mutex *, int); -#define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#ifdef DIAGNOSTIC -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) -#else -#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) -#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) -#endif - -#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl - -#endif +#endif /* _MACHINE_MUTEX_H_ */ Index: arch/sparc64/sparc64/genassym.cf =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/genassym.cf,v retrieving revision 1.36 diff -u -p -r1.36 genassym.cf --- arch/sparc64/sparc64/genassym.cf 8 Oct 2016 05:42:38 -0000 1.36 +++ arch/sparc64/sparc64/genassym.cf 3 May 2017 12:52:48 -0000 @@ -86,7 +86,6 @@ include endif include include -include include @@ -277,11 +276,6 @@ export SUN4V_TLB_MODIFY export SUN4V_TLB_REAL_W export SUN4V_TLB_EXEC export SUN4V_TLB_W - -struct mutex -member mtx_wantipl -member mtx_oldipl -member mtx_owner export CPU_SUN4U export CPU_SUN4V Index: arch/sparc64/sparc64/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/lock_machdep.c,v retrieving revision 1.14 diff -u -p -r1.14 lock_machdep.c --- arch/sparc64/sparc64/lock_machdep.c 30 Apr 2017 16:45:45 -0000 1.14 +++ arch/sparc64/sparc64/lock_machdep.c 3 May 2017 12:52:48 -0000 @@ -58,7 +58,7 @@ extern int __mp_lock_spinout; * On SPARC64 VI and its successors we execute the processor-specific * sleep instruction. */ -static __inline void +void __mp_lock_spin_hook(void) { __asm volatile( @@ -83,8 +83,6 @@ __mp_lock_spin_hook(void) " .previous \n" : : : "memory"); } - -#define SPINLOCK_SPIN_HOOK __mp_lock_spin_hook() static __inline void __mp_lock_spin(struct __mp_lock *mpl, u_int me) Index: sys/mutex.h =================================================================== RCS file: /cvs/src/sys/sys/mutex.h,v retrieving revision 1.8 diff -u -p -r1.8 mutex.h --- sys/mutex.h 20 Apr 2017 13:57:30 -0000 1.8 +++ sys/mutex.h 3 May 2017 12:52:48 -0000 @@ -28,6 +28,9 @@ #ifndef _SYS_MUTEX_H_ #define _SYS_MUTEX_H_ +#include +#include + /* * A mutex is: * - owned by a cpu. @@ -42,7 +45,54 @@ * "mtx_enter(foo); mtx_enter(bar); mtx_leave(foo); mtx_leave(bar);" */ -#include +struct mutex { + struct cpu_info *mtx_owner; + struct spinlock mtx_lock; + int mtx_wantipl; + int mtx_oldipl; + int mtx_flags; + +#ifdef WITNESS + struct lock_object mtx_lock_obj; +#endif +}; + +#ifdef WITNESS +#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) { \ + .mtx_lock = SPINLOCK_INITIALIZER(), \ + .mtx_owner = NULL, \ + .mtx_wantipl = __MUTEX_IPL(ipl), \ + .mtx_oldipl = IPL_NONE, \ + .mtx_flags = 1, \ + .mtx_lock_obj = MTX_LO_INITIALIZER(name, flags), \ +} +#else +#define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) { \ + .mtx_lock = SPINLOCK_INITIALIZER(), \ + .mtx_owner = NULL, \ + .mtx_wantipl = __MUTEX_IPL(ipl), \ + .mtx_oldipl = IPL_NONE, \ + .mtx_flags = 1, \ +} +#endif + +#ifdef DIAGNOSTIC +#define MUTEX_ASSERT_LOCKED(mtx) do { \ + if ((mtx)->mtx_owner != curcpu()) \ + panic("mutex %p not held in %s", (mtx), __func__); \ +} while (0) + +#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ + if ((mtx)->mtx_owner == curcpu()) \ + panic("mutex %p held in %s", (mtx), __func__); \ +} while (0) +#else +#define MUTEX_ASSERT_LOCKED(mtx) do { } while (0) +#define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) +#endif + +#define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj) +#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl #define MTX_LO_FLAGS(flags) \ ((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \ @@ -64,13 +114,7 @@ #define MUTEX_INITIALIZER(ipl) \ MUTEX_INITIALIZER_FLAGS(ipl, NULL, 0) -/* - * Some architectures need to do magic for the ipl, so they need a macro. - */ -#ifndef _mtx_init -void _mtx_init(struct mutex *, int); -#endif - +void __mtx_init(struct mutex *, int); void __mtx_enter(struct mutex *); int __mtx_enter_try(struct mutex *); void __mtx_leave(struct mutex *);