Index: sys/rwlock.h =================================================================== RCS file: /cvs/src/sys/sys/rwlock.h,v retrieving revision 1.17 diff -u -p -r1.17 rwlock.h --- sys/rwlock.h 10 Feb 2015 10:04:27 -0000 1.17 +++ sys/rwlock.h 10 Feb 2015 10:18:20 -0000 @@ -110,10 +110,6 @@ int rw_status(struct rwlock *); */ #define RW_WRITE_OTHER 0x0100UL -#ifndef rw_cas -int rw_cas(volatile unsigned long *, unsigned long, unsigned long); -#endif - /* recursive rwlocks; */ struct rrwlock { struct rwlock rrwl_lock; Index: kern/kern_rwlock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_rwlock.c,v retrieving revision 1.24 diff -u -p -r1.24 kern_rwlock.c --- kern/kern_rwlock.c 10 Feb 2015 10:04:27 -0000 1.24 +++ kern/kern_rwlock.c 10 Feb 2015 10:18:20 -0000 @@ -24,11 +24,23 @@ #include #include -#include - /* XXX - temporary measure until proc0 is properly aligned */ #define RW_PROC(p) (((long)p) & ~RWLOCK_MASK) +#ifdef MULTIPROCESSOR +#define rw_cas(p, o, n) (atomic_cas_ulong(p, o, n) != o) +#else +static inline int +rw_cas(volatile unsigned long *p, unsigned long o, unsigned long n) +{ + if (*p != o) + return (1); + *p = n; + + return (0); +} +#endif + /* * Magic wand for lock operations. Every operation checks if certain * flags are set and if they aren't, it increments the lock with some @@ -123,18 +135,6 @@ rw_exit_write(struct rwlock *rwl) rw_cas(&rwl->rwl_owner, owner, 0))) rw_exit(rwl); } - -#ifndef rw_cas -int -rw_cas(volatile unsigned long *p, unsigned long o, unsigned long n) -{ - if (*p != o) - return (1); - *p = n; - - return (0); -} -#endif #ifdef DIAGNOSTIC /*