Index: include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/sgi/include/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- include/mutex.h 10 Feb 2014 20:30:05 -0000 1.7 +++ include/mutex.h 16 Apr 2015 14:10:30 -0000 @@ -29,10 +29,9 @@ #define _MACHINE_MUTEX_H_ struct mutex { - int mtx_lock; + void *mtx_owner; int mtx_wantipl; int mtx_oldipl; - void *mtx_owner; }; /* @@ -49,19 +48,19 @@ struct mutex { #define __MUTEX_IPL(ipl) (ipl) #endif -#define MUTEX_INITIALIZER(ipl) { 0, __MUTEX_IPL((ipl)), IPL_NONE } +#define MUTEX_INITIALIZER(ipl) { NULL, __MUTEX_IPL((ipl)), IPL_NONE } void __mtx_init(struct mutex *, int); #define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_lock == 0) \ + if ((mtx)->mtx_owner != curcpu()) \ panic("mutex %p not held in %s", (mtx), __func__); \ } while (0) #define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_lock != 0) \ + if ((mtx)->mtx_owner == curcpu()) \ panic("mutex %p held in %s", (mtx), __func__); \ } while (0) #else Index: sgi/mutex.c =================================================================== RCS file: /cvs/src/sys/arch/sgi/sgi/mutex.c,v retrieving revision 1.14 diff -u -p -r1.14 mutex.c --- sgi/mutex.c 10 Feb 2014 20:30:05 -0000 1.14 +++ sgi/mutex.c 16 Apr 2015 14:10:30 -0000 @@ -28,100 +28,101 @@ #include #include #include +#include #include -static inline int -try_lock(struct mutex *mtx) -{ -#ifdef MULTIPROCESSOR - int tmp, ret = 0; - - asm volatile ( - ".set noreorder\n" - "1:\n" - "ll %0, %2\n" /* tmp = mtx->mtx_lock */ - "bnez %0, 2f\n" - " li %1, 0\n" /* ret = 0 */ - "li %1, 1\n" /* ret = 1 */ - "sc %1, %2\n" /* mtx->mtx_lock = 1 */ - "beqz %1, 1b\n" /* update failed */ - " nop\n" - "2:\n" - ".set reorder\n" - : "+r"(tmp), "+r"(ret) - : "m"(mtx->mtx_lock)); - - return ret; -#else /* MULTIPROCESSOR */ - mtx->mtx_lock = 1; - return 1; -#endif /* MULTIPROCESSOR */ -} void __mtx_init(struct mutex *mtx, int wantipl) { - mtx->mtx_lock = 0; + mtx->mtx_owner = NULL; mtx->mtx_wantipl = wantipl; mtx->mtx_oldipl = IPL_NONE; } +#ifdef MULTIPROCESSOR void mtx_enter(struct mutex *mtx) { - int s; - - for (;;) { - if (mtx->mtx_wantipl != IPL_NONE) - s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { - if (mtx->mtx_wantipl != IPL_NONE) - mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); -#ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; -#endif - return; - } - if (mtx->mtx_wantipl != IPL_NONE) - splx(s); - } + while (mtx_enter_try(mtx) == 0) + ; } int mtx_enter_try(struct mutex *mtx) { + struct cpu_info *owner, *ci = curcpu(); int s; if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); - if (try_lock(mtx)) { + + owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); +#ifdef DIAGNOSTIC + if (__predict_false(owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (owner == NULL) { if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; - mtx->mtx_owner = curcpu(); #ifdef DIAGNOSTIC - curcpu()->ci_mutex_level++; + ci->ci_mutex_level++; #endif - return 1; + membar_enter(); + return (1); } + if (mtx->mtx_wantipl != IPL_NONE) splx(s); - return 0; + + return (0); +} +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif } +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif + void mtx_leave(struct mutex *mtx) { int s; MUTEX_ASSERT_LOCKED(mtx); + +#ifdef MULTIPROCESSOR + membar_exit(); +#endif #ifdef DIAGNOSTIC curcpu()->ci_mutex_level--; #endif + s = mtx->mtx_oldipl; mtx->mtx_owner = NULL; - mtx->mtx_lock = 0; if (mtx->mtx_wantipl != IPL_NONE) splx(s); }