Index: hppa/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/hppa/hppa/lock_machdep.c,v retrieving revision 1.6 diff -u -p -r1.6 lock_machdep.c --- hppa/lock_machdep.c 17 Jun 2014 00:00:48 -0000 1.6 +++ hppa/lock_machdep.c 22 Sep 2014 06:21:21 -0000 @@ -204,31 +204,3 @@ __mp_lock_held(struct __mp_lock *mpl) { return mpl->mpl_cpu == curcpu(); } - -/* - * Emulate a compare-and-swap instruction for rwlocks, by using a - * __cpu_simple_lock as a critical section. - * - * Since we are only competing against other processors for rwlocks, - * it is not necessary in this case to disable interrupts to prevent - * reentrancy on the same processor. - */ - -__cpu_simple_lock_t rw_cas_spinlock = __SIMPLELOCK_UNLOCKED; - -int -rw_cas_hppa(volatile unsigned long *p, unsigned long o, unsigned long n) -{ - int rc = 0; - - __cpu_simple_lock(&rw_cas_spinlock); - - if (*p != o) - rc = 1; - else - *p = n; - - __cpu_simple_unlock(&rw_cas_spinlock); - - return (rc); -} Index: hppa/machdep.c =================================================================== RCS file: /cvs/src/sys/arch/hppa/hppa/machdep.c,v retrieving revision 1.235 diff -u -p -r1.235 machdep.c --- hppa/machdep.c 19 Sep 2014 18:21:14 -0000 1.235 +++ hppa/machdep.c 22 Sep 2014 06:21:21 -0000 @@ -130,7 +130,7 @@ dev_t bootdev; int physmem, resvmem, resvphysmem, esym; #ifdef MULTIPROCESSOR -struct mutex mtx_atomic = MUTEX_INITIALIZER(IPL_NONE); +__cpu_simple_lock_t atomic_lock = __SIMPLELOCK_UNLOCKED; #endif /* Index: include/atomic.h =================================================================== RCS file: /cvs/src/sys/arch/hppa/include/atomic.h,v retrieving revision 1.7 diff -u -p -r1.7 atomic.h --- include/atomic.h 17 Jun 2014 19:49:53 -0000 1.7 +++ include/atomic.h 22 Sep 2014 06:21:21 -0000 @@ -7,41 +7,219 @@ #if defined(_KERNEL) -#include +typedef volatile u_int __cpu_simple_lock_t __attribute__((__aligned__(16))); + +#define __SIMPLELOCK_LOCKED 0 +#define __SIMPLELOCK_UNLOCKED 1 + +static inline void +__cpu_simple_lock_init(__cpu_simple_lock_t *l) +{ + *l = __SIMPLELOCK_UNLOCKED; +} + +static inline unsigned int +__cpu_simple_lock_ldcws(__cpu_simple_lock_t *l) +{ + unsigned int o; + + asm volatile("ldcws 0(%2), %0" : "=&r" (o), "+m" (l) : "r" (l)); + + return (o); +} + +static inline void +__cpu_simple_lock(__cpu_simple_lock_t *l) +{ + while (__cpu_simple_lock_ldcws(l) == __SIMPLELOCK_UNLOCKED) + ; +} + +static inline int +__cpu_simple_lock_try(__cpu_simple_lock_t *l) +{ + return (__cpu_simple_lock_ldcws(l) == __SIMPLELOCK_UNLOCKED); +} + +static inline void +__cpu_simple_unlock(__cpu_simple_lock_t *l) +{ + *l = __SIMPLELOCK_UNLOCKED; +} #ifdef MULTIPROCESSOR -extern struct mutex mtx_atomic; -#define ATOMIC_LOCK mtx_enter(&mtx_atomic) -#define ATOMIC_UNLOCK mtx_leave(&mtx_atomic) +extern __cpu_simple_lock_t atomic_lock; +#define ATOMIC_LOCK __cpu_simple_lock(&atomic_lock); +#define ATOMIC_UNLOCK __cpu_simple_unlock(&atomic_lock); #else #define ATOMIC_LOCK #define ATOMIC_UNLOCK #endif -static __inline void -atomic_setbits_int(volatile unsigned int *uip, unsigned int v) +static inline register_t +atomic_enter(void) { register_t eiem; __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); __asm volatile("mtctl %r0, %cr15"); ATOMIC_LOCK; - *uip |= v; + + return (eiem); +} + +static inline void +atomic_leave(register_t eiem) +{ ATOMIC_UNLOCK; __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); } +static inline unsigned int +_atomic_cas_uint(volatile unsigned int *uip, unsigned int o, unsigned int n) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + rv = *uip; + if (rv == o) + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n)) + +static inline unsigned long +_atomic_cas_ulong(volatile unsigned long *uip, unsigned long o, unsigned long n) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + rv = *uip; + if (rv == o) + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n)) + +static inline void * +_atomic_cas_ptr(volatile void *uip, void *o, void *n) +{ + register_t eiem; + void * volatile *uipp = (void * volatile *)uip; + void *rv; + + eiem = atomic_enter(); + rv = *uipp; + if (rv == o) + *uipp = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n)) + +static inline unsigned int +_atomic_swap_uint(volatile unsigned int *uip, unsigned int n) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + rv = *uip; + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_uint(_p, _n) _atomic_swap_uint((_p), (_n)) + +static inline unsigned long +_atomic_swap_ulong(volatile unsigned long *uip, unsigned long n) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + rv = *uip; + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_ulong(_p, _o, _n) _atomic_swap_ulong((_p), (_o), (_n)) + +static inline void * +_atomic_swap_ptr(volatile void *uip, void *n) +{ + register_t eiem; + void * volatile *uipp = (void * volatile *)uip; + void *rv; + + eiem = atomic_enter(); + rv = *uipp; + *uipp = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_ptr(_p, _o, _n) _atomic_swap_ptr((_p), (_o), (_n)) + +static __inline unsigned int +_atomic_add_int_nv(volatile unsigned int *uip, unsigned int v) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + *uip += v; + rv = *uip; + atomic_leave(eiem); + + return (rv); +} +#define atomic_add_int_nv(_uip, _v) _atomic_add_int_nv((_uip), (_v)) +#define atomic_sub_int_nv(_uip, _v) _atomic_add_int_nv((_uip), 0 - (_v)) + +static __inline unsigned long +_atomic_add_long_nv(volatile unsigned long *uip, unsigned long v) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + *uip += v; + rv = *uip; + atomic_leave(eiem); + + return (rv); +} +#define atomic_add_long_nv(_uip, _v) _atomic_add_long_nv((_uip), (_v)) +#define atomic_sub_long_nv(_uip, _v) _atomic_add_long_nv((_uip), 0 - (_v)) + +static __inline void +atomic_setbits_int(volatile unsigned int *uip, unsigned int v) +{ + register_t eiem; + + eiem = atomic_enter(); + *uip |= v; + atomic_leave(eiem); +} + static __inline void atomic_clearbits_int(volatile unsigned int *uip, unsigned int v) { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip &= ~v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); } static __inline void @@ -49,12 +227,9 @@ atomic_setbits_long(volatile unsigned lo { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip |= v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); } static __inline void @@ -62,12 +237,9 @@ atomic_clearbits_long(volatile unsigned { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip &= ~v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); } /* Index: include/lock.h =================================================================== RCS file: /cvs/src/sys/arch/hppa/include/lock.h,v retrieving revision 1.7 diff -u -p -r1.7 lock.h --- include/lock.h 29 Mar 2014 18:09:29 -0000 1.7 +++ include/lock.h 22 Sep 2014 06:21:21 -0000 @@ -7,48 +7,6 @@ #include -typedef volatile u_int __cpu_simple_lock_t __attribute__((__aligned__(16))); - -#define __SIMPLELOCK_LOCKED 0 -#define __SIMPLELOCK_UNLOCKED 1 - -static __inline__ void -__cpu_simple_lock_init(__cpu_simple_lock_t *l) -{ - *l = __SIMPLELOCK_UNLOCKED; -} - -static __inline__ void -__cpu_simple_lock(__cpu_simple_lock_t *l) -{ - volatile u_int old; - - do { - __asm__ volatile - ("ldcws 0(%2), %0" : "=&r" (old), "+m" (l) : "r" (l)); - } while (old != __SIMPLELOCK_UNLOCKED); -} - -static __inline__ int -__cpu_simple_lock_try(__cpu_simple_lock_t *l) -{ - volatile u_int old; - - __asm__ volatile - ("ldcws 0(%2), %0" : "=&r" (old), "+m" (l) : "r" (l)); - - return (old == __SIMPLELOCK_UNLOCKED); -} - -static __inline__ void -__cpu_simple_unlock(__cpu_simple_lock_t *l) -{ - *l = __SIMPLELOCK_UNLOCKED; -} - -#if defined(_KERNEL) && defined(MULTIPROCESSOR) -int rw_cas_hppa(volatile unsigned long *, unsigned long, unsigned long); -#define rw_cas rw_cas_hppa -#endif +#define rw_cas(p, o, n) (atomic_cas_ulong(p, o, n) != o) #endif /* _MACHINE_LOCK_H_ */