Index: macppc/macppc/genassym.cf =================================================================== RCS file: /cvs/src/sys/arch/macppc/macppc/genassym.cf,v retrieving revision 1.24 diff -u -p -r1.24 genassym.cf --- macppc/macppc/genassym.cf 6 Sep 2014 10:45:29 -0000 1.24 +++ macppc/macppc/genassym.cf 2 Jul 2015 10:15:06 -0000 @@ -97,8 +97,3 @@ member ci_disisave ifdef DIAGNOSTIC member ci_mutex_level endif - -struct mutex -member mtx_wantipl -member mtx_oldcpl -member mtx_owner Index: powerpc/conf/files.powerpc =================================================================== RCS file: /cvs/src/sys/arch/powerpc/conf/files.powerpc,v retrieving revision 1.51 diff -u -p -r1.51 files.powerpc --- powerpc/conf/files.powerpc 26 Jun 2015 11:15:32 -0000 1.51 +++ powerpc/conf/files.powerpc 2 Jul 2015 10:15:06 -0000 @@ -13,8 +13,8 @@ file arch/powerpc/powerpc/process_machde file arch/powerpc/powerpc/sys_machdep.c file arch/powerpc/powerpc/trap.c file arch/powerpc/powerpc/vm_machdep.c +file arch/powerpc/powerpc/mutex.c file arch/powerpc/powerpc/lock_machdep.c multiprocessor -file arch/powerpc/powerpc/mutex.S file arch/powerpc/powerpc/intr.c file arch/powerpc/powerpc/softintr.c Index: powerpc/include/lock.h =================================================================== RCS file: /cvs/src/sys/arch/powerpc/include/lock.h,v retrieving revision 1.6 diff -u -p -r1.6 lock.h --- powerpc/include/lock.h 26 Jun 2015 12:46:13 -0000 1.6 +++ powerpc/include/lock.h 2 Jul 2015 10:15:06 -0000 @@ -37,4 +37,6 @@ #ifndef _POWERPC_LOCK_H_ #define _POWERPC_LOCK_H_ +#define SPINLOCK_SPIN_HOOK do { } while (0) + #endif /* _POWERPC_LOCK_H_ */ Index: powerpc/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/powerpc/include/mutex.h,v retrieving revision 1.4 diff -u -p -r1.4 mutex.h --- powerpc/include/mutex.h 29 Mar 2014 18:09:30 -0000 1.4 +++ powerpc/include/mutex.h 2 Jul 2015 10:15:06 -0000 @@ -28,9 +28,9 @@ #define _POWERPC_MUTEX_H_ struct mutex { - int mtx_wantipl; - int mtx_oldcpl; volatile void *mtx_owner; + int mtx_wantipl; + int mtx_oldipl; }; /* @@ -47,7 +47,7 @@ struct mutex { #define __MUTEX_IPL(ipl) (ipl) #endif -#define MUTEX_INITIALIZER(ipl) { __MUTEX_IPL((ipl)), 0, NULL } +#define MUTEX_INITIALIZER(ipl) { NULL, __MUTEX_IPL(ipl), IPL_NONE } void __mtx_init(struct mutex *, int); #define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) @@ -67,6 +67,6 @@ void __mtx_init(struct mutex *, int); #define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0) #endif -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldcpl +#define MUTEX_OLDIPL(mtx) ((mtx)->mtx_oldipl) #endif Index: powerpc/powerpc/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/powerpc/powerpc/lock_machdep.c,v retrieving revision 1.2 diff -u -p -r1.2 lock_machdep.c --- powerpc/powerpc/lock_machdep.c 26 Jun 2015 12:46:13 -0000 1.2 +++ powerpc/powerpc/lock_machdep.c 2 Jul 2015 10:15:06 -0000 @@ -42,8 +42,6 @@ __mp_lock_init(struct __mp_lock *lock) extern int __mp_lock_spinout; #endif -#define SPINLOCK_SPIN_HOOK /**/ - static __inline void __mp_lock_spin(struct __mp_lock *mpl) { Index: powerpc/powerpc/mutex.S =================================================================== RCS file: powerpc/powerpc/mutex.S diff -N powerpc/powerpc/mutex.S --- powerpc/powerpc/mutex.S 18 Jun 2014 18:42:29 -0000 1.16 +++ /dev/null 1 Jan 1970 00:00:00 -0000 @@ -1,182 +0,0 @@ -/* $OpenBSD: mutex.S,v 1.16 2014/06/18 18:42:29 kettenis Exp $ */ - -/* - * Copyright (c) 2007 Dale Rahn - * Copyright (c) 2007 Mark Kettenis - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "assym.h" - -#include - -/* XXX */ -#define GET_CPUINFO(r) mfsprg r,0 - -ENTRY(__mtx_init) - li %r5,0 - stw %r4,MTX_WANTIPL(%r3) - stw %r5,MTX_OLDCPL(%r3) - stw %r5,MTX_OWNER(%r3) - blr - - -ENTRY(mtx_enter) - stwu %r1,-16(%r1) # reserve stack - mflr %r0 - stw %r0,20(%r1) # save return address -.L_retry: - stw %r3, 12(%r1) - lwz %r3,MTX_WANTIPL(%r3) # load new ipl - bl _C_LABEL(splraise) - mr %r7, %r3 - GET_CPUINFO(%r4) - lwz %r3,12(%r1) - li %r5,MTX_OWNER # load offset constant - lwarx %r6,%r5,%r3 # load reserve owner - cmpwi 0,%r6,0 # test owner == 0 - beq+ 0,.L_mutex_free # if owner == 0 branch free -.L_mutex_locked: -#ifdef DIAGNOSTIC - cmpl 0,%r4,%r6 - beq- .L_mutex_selflocked -#endif - stw %r3,12(%r1) # save mtx during lcsplx - la %r4,12(%r1) - stwcx. %r3,0,%r4 # unreserve owner - mr %r3,%r7 # move old cpl to arg0 - bl _C_LABEL(lcsplx) # call splx on old cpl - lwz %r3,12(%r1) - b .L_retry - -.L_mutex_free: - stwcx. %r4,%r5,%r3 # old owner was 0 cond store - bne- .L_mutex_locked # branch if reserve cancelled - isync # memory barrier -#ifdef DIAGNOSTIC - lwz %r6,CI_MUTEX_LEVEL(%r4) - addi %r6,%r6,1 # curpcu->ci_mutex_level++ - stw %r6,CI_MUTEX_LEVEL(%r4) -#endif - stw %r7,MTX_OLDCPL(%r3) # save old ipl - lwz %r0,20(%r1) # load return address - mtlr %r0 - addi %r1,%r1,16 # restore stack - blr - -#ifdef DIAGNOSTIC -.L_mutex_selflocked: - mr %r5, %r3 - lis %r3,.L_paniclocked@ha - la %r3,.L_paniclocked@l(%r3) - bl panic -.L_paniclocked: - .string "mtx_enter: recursed %x %x\n" -#endif - - -ENTRY(mtx_enter_try) - stwu %r1,-16(%r1) # reserve stack - mflr %r0 - stw %r0,20(%r1) # save return address - stw %r3, 12(%r1) - lwz %r3,MTX_WANTIPL(%r3) # load new ipl - bl _C_LABEL(splraise) - mr %r7, %r3 - GET_CPUINFO(%r4) - lwz %r3,12(%r1) - li %r5,MTX_OWNER # load offset constant - lwarx %r6,%r5,%r3 # load reserve owner - cmpwi 0,%r6,0 # test owner == 0 - beq+ 0,.L_mutex_try_free # if owner == 0 branch free -.L_mutex_try_locked: -#ifdef DIAGNOSTIC - cmpl 0,%r4,%r6 - beq- .L_mutex_try_selflocked -#endif - stw %r3,12(%r1) # save mtx during lcsplx - la %r4,12(%r1) - stwcx. %r3,0,%r4 # unreserve owner - mr %r3,%r7 # move old cpl to arg0 - bl _C_LABEL(lcsplx) # call splx on old cpl - - lwz %r0,20(%r1) # load return address - mtlr %r0 - addi %r1,%r1,16 # restore stack - li %r3,0 # return zero - blr - -.L_mutex_try_free: - stwcx. %r4,%r5,%r3 # old owner was 0 cond store - bne- .L_mutex_try_locked # branch if reserve cancelled - isync # memory barrier -#ifdef DIAGNOSTIC - lwz %r6,CI_MUTEX_LEVEL(%r4) - addi %r6,%r6,1 # curpcu->ci_mutex_level++ - stw %r6,CI_MUTEX_LEVEL(%r4) -#endif - stw %r7,MTX_OLDCPL(%r3) # save old ipl - lwz %r0,20(%r1) # load return address - mtlr %r0 - addi %r1,%r1,16 # restore stack - li %r3,1 # return nonzero - blr - -#ifdef DIAGNOSTIC -.L_mutex_try_selflocked: - mr %r5, %r3 - lis %r3,.L_panictrylocked@ha - la %r3,.L_panictrylocked@l(%r3) - bl panic -.L_panictrylocked: - .string "mtx_enter_try: recursed %x %x\n" -#endif - - -ENTRY(mtx_leave) -#ifdef DIAGNOSTIC - lwz %r6,MTX_OWNER(%r3) - cmpwi 0,%r6,0 # test owner == 0 - - beq- .L_mutex_notlocked -#endif - li %r4,0 - lwz %r5,MTX_OLDCPL(%r3) - stw %r4,MTX_OLDCPL(%r3) - sync # memory barrier - stw %r4,MTX_OWNER(%r3) - GET_CPUINFO(%r4) -#ifdef DIAGNOSTIC - lwz %r6,CI_MUTEX_LEVEL(%r4) - addi %r6,%r6,-1 # curpcu->ci_mutex_level-- - stw %r6,CI_MUTEX_LEVEL(%r4) -#endif - mr %r3,%r5 - lwz %r5,CI_CPL(%r4) - cmpl 0,%r3,%r5 - beq 1f - b _C_LABEL(lcsplx) -1: - blr - -#ifdef DIAGNOSTIC -.L_mutex_notlocked: - GET_CPUINFO(%r4) - mr %r5, %r3 - lis %r3,.L_panicnotlocked@ha - la %r3,.L_panicnotlocked@l(%r3) - bl panic -.L_panicnotlocked: - .string "mtx_leave: not locked %x %x\n" -#endif Index: powerpc/powerpc/mutex.c =================================================================== RCS file: powerpc/powerpc/mutex.c diff -N powerpc/powerpc/mutex.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ powerpc/powerpc/mutex.c 2 Jul 2015 10:15:06 -0000 @@ -0,0 +1,150 @@ +/* $OpenBSD: mutex.c,v 1.14 2015/04/17 12:38:54 dlg Exp $ */ + +/* + * Copyright (c) 2004 Artur Grabowski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include +#include + +#include + +void +__mtx_init(struct mutex *mtx, int ipl) +{ + mtx->mtx_owner = NULL; + mtx->mtx_wantipl = ipl; + mtx->mtx_oldipl = IPL_NONE; +} + +#ifdef MULTIPROCESSOR +#if defined(MP_LOCKDEBUG) +#ifndef DDB +#error "MP_LOCKDEBUG requires DDB" +#endif + +/* CPU-dependent timing, needs this to be settable from ddb. */ +extern int __mp_lock_spinout; +#endif + +void +mtx_enter(struct mutex *mtx) +{ +#if defined(MP_LOCKDEBUG) + int ticks = __mp_lock_spinout; +#endif + + while (mtx_enter_try(mtx) == 0) { + SPINLOCK_SPIN_HOOK; + +#if defined(MP_LOCKDEBUG) + if (--ticks == 0) { + db_printf("%s: %p lock spun out", __func__, mtx); + Debugger(); + } +#endif + } +} + +int +mtx_enter_try(struct mutex *mtx) +{ + struct cpu_info *owner, *ci = curcpu(); + int s; + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + + owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); +#ifdef DIAGNOSTIC + if (__predict_false(owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (owner == NULL) { + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif + membar_enter(); + return (1); + } + + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + + return (0); +} +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif + +void +mtx_leave(struct mutex *mtx) +{ + int s; + + MUTEX_ASSERT_LOCKED(mtx); + +#ifdef MULTIPROCESSOR + membar_exit(); +#endif +#ifdef DIAGNOSTIC + curcpu()->ci_mutex_level--; +#endif + + s = mtx->mtx_oldipl; + mtx->mtx_owner = NULL; + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); +}