Index: arch/hppa/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/hppa/include/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- arch/hppa/include/mutex.h 20 Apr 2017 13:57:29 -0000 1.7 +++ arch/hppa/include/mutex.h 19 May 2017 01:30:45 -0000 @@ -30,6 +30,68 @@ #include +struct spinlock { + volatile unsigned int lock[4]; +}; +#define SPINLOCK_MD + +#ifdef _KERNEL +/* + * spinlock implementation + */ + +#define SPINLOCK_INITIALIZER() { .lock = { 1, 1, 1, 1 } } + +static inline void +__spinlock_init(struct spinlock *l) +{ + l->lock[0] = 1; + l->lock[1] = 1; + l->lock[2] = 1; + l->lock[3] = 1; +} +#define spinlock_init(_l) __spinlock_init(_l) + +/* Note: lock must be 16-byte aligned. */ +#define __spinlock_lock(_l) \ + ((volatile unsigned int *)(((vaddr_t)(_l)->lock + 0xf) & ~0xf)) + +static inline unsigned int +__spinlock_enter_try(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + unsigned int ret; + + __asm volatile ( + "ldcws 0(%2), %0" + : "=&r" (ret), "+m" (*lock) + : "r" (lock) + ); + + return (ret); +} +#define spinlock_enter_try(_l) __spinlock_enter_try(_l) + +static inline int +__spinlock_busy(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + + return (*lock == 0); +} +#define spinlock_busy(_l) __spinlock_busy(_l) + +static inline void +__spinlock_leave(struct spinlock *l) +{ + volatile unsigned int *lock = __spinlock_lock(l); + + *lock = 1; +} +#define spinlock_leave(_l) __spinlock_leave(_l) + +#endif /* _KERNEL */ + #define MUTEX_UNLOCKED { 1, 1, 1, 1 } /* Note: mtx_lock must be 16-byte aligned. */ Index: arch/m88k/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/m88k/include/mutex.h,v retrieving revision 1.5 diff -u -p -r1.5 mutex.h --- arch/m88k/include/mutex.h 20 Apr 2017 13:57:29 -0000 1.5 +++ arch/m88k/include/mutex.h 19 May 2017 01:30:47 -0000 @@ -29,6 +29,27 @@ #include +#ifdef _KERNEL +/* + * this is a bit contorted to avoid the function having to know about + * struct spinlock or SPINLOCK_UNLOCKED before they are defined. + */ + +static inline unsigned int +__spinlock_enter_try(volatile unsigned int *lock, unsigned int locked) +{ + unsigned int r = locked; + + __asm volatile("xmem %0, %2, %%r0" + : "+r"(r), "+m"(*lock) + : "r"(lock)); + + return (r); +} +#define spinlock_enter_try(_l) \ + __spinlock_enter_try(&(_l)->lock, SPINLOCK_LOCKED) +#endif /* _KERNEL */ + struct mutex { volatile int mtx_lock; /* mutex.S relies upon this field being first */ int mtx_wantipl; Index: arch/sparc64/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/mutex.h,v retrieving revision 1.5 diff -u -p -r1.5 mutex.h --- arch/sparc64/include/mutex.h 20 Apr 2017 13:57:30 -0000 1.5 +++ arch/sparc64/include/mutex.h 19 May 2017 01:30:53 -0000 @@ -30,6 +30,11 @@ #include +#ifdef _KERNEL +#define spinlock_enter_try(_l) \ + atomic_cas_uint(&(_l)->lock, SPINLOCK_UNLOCKED, SPINLOCK_LOCKED) +#endif + struct mutex { volatile void *mtx_owner; /* mutex.S relies upon this being first */ int mtx_wantipl; Index: sys/mutex.h =================================================================== RCS file: /cvs/src/sys/sys/mutex.h,v retrieving revision 1.8 diff -u -p -r1.8 mutex.h --- sys/mutex.h 20 Apr 2017 13:57:30 -0000 1.8 +++ sys/mutex.h 19 May 2017 01:31:00 -0000 @@ -30,9 +30,9 @@ /* * A mutex is: + * - spinning. * - owned by a cpu. * - non-recursive. - * - spinning. * - not providing mutual exclusion between processes, only cpus. * - providing interrupt blocking when necessary. * @@ -43,6 +43,61 @@ */ #include + +#ifndef SPINLOCK_MD +struct spinlock { + volatile unsigned int lock; +}; +#endif + +#ifdef _KERNEL + +#define SPINLOCK_UNLOCKED 1 +#define SPINLOCK_LOCKED 0 + +#ifndef SPINLOCK_SPIN_HOOK +#define SPINLOCK_SPIN_HOOK (void)0 +#endif + +#ifndef SPINLOCK_INITIALIZER +#define SPINLOCK_INITIALIZER() { .lock = SPINLOCK_UNLOCKED } +#endif + +#ifndef spinlock_init +#define spinlock_init(_l) \ +do { \ + (_l)->lock = SPINLOCK_UNLOCKED; \ +} while (/* CONSTCOND */ 0) +#endif + +#ifndef spinlock_enter_try +#define spinlock_enter_try(_l) \ + atomic_swap_uint(&(_l)->lock, SPINLOCK_LOCKED) +#endif + +#ifndef spinlock_busy +#define spinlock_busy(_l) ((_l)->lock == SPINLOCK_LOCKED) +#endif + +#ifndef spinlock_leave +#define spinlock_leave(_l) \ +do { \ + (_l)->lock = SPINLOCK_UNLOCKED; \ +} while (/* CONSTCOND */ 0) +#endif + +#define spinlock_spin(_l) \ +do { \ + SPINLOCK_SPIN_HOOK; \ +} while (spinlock_busy(_l)) + +#define spinlock_enter(_l) \ +do { \ + while (!spinlock_enter_try(_l)) \ + spinlock_spin(_l); \ +} while (/* CONSTCOND */ 0) + +#endif /* _KERNEL */ #define MTX_LO_FLAGS(flags) \ ((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \