Index: hppa/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/hppa/hppa/lock_machdep.c,v retrieving revision 1.8 diff -u -p -r1.8 lock_machdep.c --- hppa/lock_machdep.c 11 Feb 2015 01:58:57 -0000 1.8 +++ hppa/lock_machdep.c 27 Apr 2015 14:15:59 -0000 @@ -77,20 +77,14 @@ extern int __mp_lock_spinout; static __inline void __mp_lock_spin(struct __mp_lock *mpl) { -#ifndef MP_LOCKDEBUG - while (mpl->mpl_count != 0) - SPINLOCK_SPIN_HOOK; -#else - int ticks = __mp_lock_spinout; + u_long spins = ~0UL; - while (mpl->mpl_count != 0 && --ticks > 0) + while (mpl->mpl_count != 0) { SPINLOCK_SPIN_HOOK; - if (ticks == 0) { - db_printf("__mp_lock(%p): lock spun out", mpl); - Debugger(); + if (--spins == 0) + panic("%s: too many spins", __func__); } -#endif } void @@ -152,7 +146,7 @@ __mp_unlock(struct __mp_lock *mpl) int __mp_release_all(struct __mp_lock *mpl) { - int rv = mpl->mpl_count - 1; + int rv; int s; #ifdef MP_LOCKDEBUG @@ -164,9 +158,10 @@ __mp_release_all(struct __mp_lock *mpl) #endif s = hppa_intr_disable(); + rv = mpl->mpl_count - 1; mpl->mpl_cpu = NULL; - __asm volatile("sync" ::: "memory"); mpl->mpl_count = 0; + __asm volatile("sync" ::: "memory"); hppa_intr_enable(s); return (rv); @@ -175,7 +170,8 @@ __mp_release_all(struct __mp_lock *mpl) int __mp_release_all_but_one(struct __mp_lock *mpl) { - int rv = mpl->mpl_count - 2; + int rv; + int s; #ifdef MP_LOCKDEBUG if (mpl->mpl_cpu != curcpu()) { @@ -185,7 +181,10 @@ __mp_release_all_but_one(struct __mp_loc } #endif + s = hppa_intr_disable(); + rv = mpl->mpl_count - 2; mpl->mpl_count = 2; + hppa_intr_enable(s); return (rv); } Index: hppa/mutex.c =================================================================== RCS file: /cvs/src/sys/arch/hppa/hppa/mutex.c,v retrieving revision 1.13 diff -u -p -r1.13 mutex.c --- hppa/mutex.c 17 Jun 2014 15:43:27 -0000 1.13 +++ hppa/mutex.c 27 Apr 2015 13:05:58 -0000 @@ -34,82 +34,115 @@ #include -static inline int -try_lock(struct mutex *mtx) -{ - volatile int *lock = (int *)(((vaddr_t)mtx->mtx_lock + 0xf) & ~0xf); - volatile register_t ret = 0; - - /* Note: lock must be 16-byte aligned. */ - asm volatile ( - "ldcws 0(%2), %0" - : "=&r" (ret), "+m" (lock) - : "r" (lock) - ); +int __mtx_enter_try(struct mutex *); - return ret; -} +#ifdef MULTIPROCESSOR +/* Note: lock must be 16-byte aligned. */ +#define __mtx_lock(mtx) ((int *)(((vaddr_t)mtx->mtx_lock + 0xf) & ~0xf)) +#endif void __mtx_init(struct mutex *mtx, int wantipl) { +#ifdef MULTIPROCESSOR mtx->mtx_lock[0] = 1; mtx->mtx_lock[1] = 1; mtx->mtx_lock[2] = 1; mtx->mtx_lock[3] = 1; +#endif mtx->mtx_wantipl = wantipl; mtx->mtx_oldipl = IPL_NONE; + mtx->mtx_owner = NULL; } +#ifdef MULTIPROCESSOR +u_int __mtx_spinout = 200000000; + void mtx_enter(struct mutex *mtx) { - int s; + u_long spins = ~0UL; - for (;;) { - if (mtx->mtx_wantipl != IPL_NONE) - s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - membar_enter(); - if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); -#ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; -#endif - return; - } - if (mtx->mtx_wantipl != IPL_NONE) - splx(s); + while (mtx_enter_try(mtx) == 0) { + if (__predict_false(--spins == 0)) + panic("mtx %p: too many spins", mtx); } } int mtx_enter_try(struct mutex *mtx) { + struct cpu_info *ci = curcpu(); + volatile int *lock = __mtx_lock(mtx); + int ret; int s; - + if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - membar_enter(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + asm volatile ( + "ldcws 0(%2), %0" + : "=&r" (ret), "+m" (lock) + : "r" (lock) + ); + + if (ret) { + mtx->mtx_owner = ci; if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + ci->ci_mutex_level++; #endif - return 1; + membar_enter(); + + return (1); } + if (mtx->mtx_wantipl != IPL_NONE) splx(s); - return 0; + return (0); } +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif void mtx_leave(struct mutex *mtx) { +#ifdef MULTIPROCESSOR + volatile int *lock = __mtx_lock(mtx); +#endif int s; MUTEX_ASSERT_LOCKED(mtx); @@ -119,12 +152,10 @@ mtx_leave(struct mutex *mtx) #endif s = mtx->mtx_oldipl; mtx->mtx_owner = NULL; +#ifdef MULTIPROCESSOR + *lock = 1; membar_exit(); - - mtx->mtx_lock[0] = 1; - mtx->mtx_lock[1] = 1; - mtx->mtx_lock[2] = 1; - mtx->mtx_lock[3] = 1; +#endif if (mtx->mtx_wantipl != IPL_NONE) splx(s); Index: include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/hppa/include/mutex.h,v retrieving revision 1.5 diff -u -p -r1.5 mutex.h --- include/mutex.h 30 Jan 2014 15:18:51 -0000 1.5 +++ include/mutex.h 27 Apr 2015 11:49:18 -0000 @@ -28,12 +28,13 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -#define MUTEX_LOCKED { 0, 0, 0, 0 } #define MUTEX_UNLOCKED { 1, 1, 1, 1 } /* Note: mtx_lock must be 16-byte aligned. */ struct mutex { +#ifdef MULTIPROCESSOR volatile int mtx_lock[4]; +#endif int mtx_wantipl; int mtx_oldipl; void *mtx_owner; @@ -49,25 +50,23 @@ struct mutex { #ifdef MULTIPROCESSOR #define __MUTEX_IPL(ipl) \ (((ipl) > IPL_NONE && (ipl) < IPL_AUDIO) ? IPL_AUDIO : (ipl)) +#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL } #else #define __MUTEX_IPL(ipl) (ipl) +#define MUTEX_INITIALIZER(ipl) { __MUTEX_IPL((ipl)), 0, NULL } #endif -#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL } - void __mtx_init(struct mutex *, int); #define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_lock[0] == 1 && (mtx)->mtx_lock[1] == 1 && \ - (mtx)->mtx_lock[2] == 1 && (mtx)->mtx_lock[3] == 1) \ + if ((mtx)->mtx_owner != curcpu()) \ panic("mutex %p not held in %s", (mtx), __func__); \ } while (0) #define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_lock[0] != 1 && (mtx)->mtx_lock[1] != 1 && \ - (mtx)->mtx_lock[2] != 1 && (mtx)->mtx_lock[3] != 1) \ + if ((mtx)->mtx_owner == curcpu()) \ panic("mutex %p held in %s", (mtx), __func__); \ } while (0) #else