Index: arch/amd64/amd64/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/lock_machdep.c,v retrieving revision 1.10 diff -u -p -r1.10 lock_machdep.c --- arch/amd64/amd64/lock_machdep.c 19 Mar 2016 11:34:22 -0000 1.10 +++ arch/amd64/amd64/lock_machdep.c 24 Feb 2017 03:33:00 -0000 @@ -19,6 +19,8 @@ #include #include +#include +#include #include #include @@ -30,36 +32,41 @@ void __mp_lock_init(struct __mp_lock *mpl) { memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus)); + mpl->mpl_owner = NULL; mpl->mpl_users = 0; mpl->mpl_ticket = 0; } -#if defined(MP_LOCKDEBUG) -#ifndef DDB -#error "MP_LOCKDEBUG requires DDB" -#endif - +#ifdef MP_LOCKDEBUG /* CPU-dependent timing, needs this to be settable from ddb. */ extern int __mp_lock_spinout; #endif -static __inline void +static inline void __mp_lock_spin(struct __mp_lock *mpl, u_int me) { -#ifndef MP_LOCKDEBUG - while (mpl->mpl_ticket != me) - SPINLOCK_SPIN_HOOK; -#else - int nticks = __mp_lock_spinout; +#ifdef MP_LOCKDEBUG + struct cpu_info *owner = NULL; + unsigned int spins = __mp_lock_spinout; +#endif - while (mpl->mpl_ticket != me && --nticks > 0) - SPINLOCK_SPIN_HOOK; + while (mpl->mpl_ticket != me) { +#ifdef MP_LOCKDEBUG + struct cpu_info *them = mpl->mpl_owner; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + lock_check(owner); + spins = __mp_lock_spinout; + } +#endif - if (nticks == 0) { - db_printf("__mp_lock(%p): lock spun out", mpl); - Debugger(); + SPINLOCK_SPIN_HOOK; } -#endif + + mpl->mpl_owner = curcpu(); } static inline u_int @@ -80,8 +87,10 @@ __mp_lock(struct __mp_lock *mpl) long rf = read_rflags(); disable_intr(); - if (cpu->mplc_depth++ == 0) + if (cpu->mplc_depth++ == 0) { + lock_enter(mpl, LOCK_TYPE_MPLOCK); cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + } write_rflags(rf); __mp_lock_spin(mpl, cpu->mplc_ticket); @@ -101,8 +110,11 @@ __mp_unlock(struct __mp_lock *mpl) #endif disable_intr(); - if (--cpu->mplc_depth == 0) + if (--cpu->mplc_depth == 0) { + mpl->mpl_owner = NULL; mpl->mpl_ticket++; + lock_leave(mpl, LOCK_TYPE_MPLOCK); + } write_rflags(rf); } @@ -116,7 +128,9 @@ __mp_release_all(struct __mp_lock *mpl) disable_intr(); rv = cpu->mplc_depth; cpu->mplc_depth = 0; + mpl->mpl_owner = NULL; mpl->mpl_ticket++; + lock_leave(mpl, LOCK_TYPE_MPLOCK); write_rflags(rf); return (rv); Index: arch/amd64/amd64/mutex.S =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/mutex.S,v retrieving revision 1.9 diff -u -p -r1.9 mutex.S --- arch/amd64/amd64/mutex.S 2 Jun 2013 01:55:52 -0000 1.9 +++ arch/amd64/amd64/mutex.S 24 Feb 2017 03:33:00 -0000 @@ -1,157 +0,0 @@ -/* $OpenBSD: mutex.S,v 1.9 2013/06/02 01:55:52 kettenis Exp $ */ - -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "assym.h" - -#include -#include -#include -#include -#include -#include - -/* - * Yeah, we don't really need to implement mtx_init here, but let's keep - * all the functions in the same place. - */ -ENTRY(__mtx_init) - movl %esi, MTX_WANTIPL(%rdi) - movl $0, MTX_OLDIPL(%rdi) - movq $0, MTX_OWNER(%rdi) - ret - -ENTRY(mtx_enter) -1: movl MTX_WANTIPL(%rdi), %eax - movq CPUVAR(SELF), %rcx - movl CPU_INFO_ILEVEL(%rcx), %edx # oipl = cpl; - cmpl %eax, %edx # if (cpl < mtx->mtx_wantipl) - cmovge %edx, %eax - movl %eax, CPU_INFO_ILEVEL(%rcx) # cpl = mtx->mtx_wantipl; - /* - * %edx - the old ipl - * %rcx - curcpu() - */ - xorq %rax, %rax -#ifdef MULTIPROCESSOR - lock -#endif - cmpxchgq %rcx, MTX_OWNER(%rdi) # test_and_set(mtx->mtx_owner) - jne 2f - movl %edx, MTX_OLDIPL(%rdi) -#ifdef DIAGNOSTIC - incl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - ret - - /* We failed to obtain the lock. splx, spin and retry. */ -2: pushq %rdi - movl %edx, %edi - call _C_LABEL(spllower) - popq %rdi -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rdi), %rcx - je 4f -#endif -3: - movq MTX_OWNER(%rdi), %rax - testq %rax, %rax - jz 1b - jmp 3b -#ifdef DIAGNOSTIC -4: movq $5f, %rdi - call _C_LABEL(panic) -5: .asciz "mtx_enter: locking against myself" -#endif - -ENTRY(mtx_enter_try) -1: movl MTX_WANTIPL(%rdi), %eax - movq CPUVAR(SELF), %rcx - movl CPU_INFO_ILEVEL(%rcx), %edx # oipl = cpl; - cmpl %eax, %edx # if (cpl < mtx->mtx_wantipl) - cmovge %edx, %eax - movl %eax, CPU_INFO_ILEVEL(%rcx) # cpl = mtx->mtx_wantipl; - /* - * %edx - the old ipl - * %rcx - curcpu() - */ - xorq %rax, %rax -#ifdef MULTIPROCESSOR - lock -#endif - cmpxchgq %rcx, MTX_OWNER(%rdi) # test_and_set(mtx->mtx_owner) - jne 2f - movl %edx, MTX_OLDIPL(%rdi) -#ifdef DIAGNOSTIC - incl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - movq $1, %rax - ret - - /* We failed to obtain the lock. splx and return 0. */ -2: pushq %rdi - movl %edx, %edi - call _C_LABEL(spllower) - popq %rdi -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rdi), %rcx - je 3f -#endif - xorq %rax, %rax - ret - -#ifdef DIAGNOSTIC -3: movq $4f, %rdi - call _C_LABEL(panic) -4: .asciz "mtx_enter_try: locking against myself" -#endif - - -ENTRY(mtx_leave) - movq %rdi, %rax -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rax), %rcx - jne 2f - decl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - xorq %rcx, %rcx - movl MTX_OLDIPL(%rax), %edi - movl %ecx, MTX_OLDIPL(%rax) - movq %rcx, MTX_OWNER(%rax) - cmpl %edi, CPUVAR(ILEVEL) - je 1f - call _C_LABEL(spllower) -1: - ret - -#ifdef DIAGNOSTIC -2: movq $3f, %rdi - call _C_LABEL(panic) -3: .asciz "mtx_leave: lock not held" -#endif Index: arch/amd64/amd64/mutex.c =================================================================== RCS file: arch/amd64/amd64/mutex.c diff -N arch/amd64/amd64/mutex.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ arch/amd64/amd64/mutex.c 24 Feb 2017 03:33:00 -0000 @@ -0,0 +1,154 @@ +/* $OpenBSD: mutex.c,v 1.16 2016/06/13 01:26:14 dlg Exp $ */ + +/* + * Copyright (c) 2004 Artur Grabowski + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +void +__mtx_init(struct mutex *mtx, int wantipl) +{ + mtx->mtx_owner = NULL; + mtx->mtx_oldipl = IPL_NONE; + mtx->mtx_wantipl = wantipl; +} + +#ifdef MULTIPROCESSOR +extern int __mp_lock_spinout; + +void +mtx_enter(struct mutex *mtx) +{ +#ifdef MP_LOCKDEBUG + struct cpu_info *owner = NULL; + unsigned int spins = __mp_lock_spinout; +#endif + + while (mtx_enter_try(mtx) == 0) { +#ifdef MP_LOCKDEBUG + struct cpu_info *them = mtx->mtx_owner; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + lock_check(owner); + spins = __mp_lock_spinout; + } +#endif + + SPINLOCK_SPIN_HOOK; + } +} + +int +mtx_enter_try(struct mutex *mtx) +{ + struct cpu_info *owner, *ci = curcpu(); + int s; + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + + lock_enter(mtx, LOCK_TYPE_MUTEX); + owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); +#ifdef DIAGNOSTIC + if (__predict_false(owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (owner == NULL) { + membar_enter(); + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = s; +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif + return (1); + } + + lock_leave(mtx, LOCK_TYPE_MUTEX); + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + + return (0); +} +#else +void +mtx_enter(struct mutex *mtx) +{ + struct cpu_info *ci = curcpu(); + +#ifdef DIAGNOSTIC + if (__predict_false(mtx->mtx_owner == ci)) + panic("mtx %p: locking against myself", mtx); +#endif + if (mtx->mtx_wantipl != IPL_NONE) + mtx->mtx_oldipl = splraise(mtx->mtx_wantipl); + + mtx->mtx_owner = ci; + +#ifdef DIAGNOSTIC + ci->ci_mutex_level++; +#endif +} + +int +mtx_enter_try(struct mutex *mtx) +{ + mtx_enter(mtx); + return (1); +} +#endif + +void +mtx_leave(struct mutex *mtx) +{ + int s; + + MUTEX_ASSERT_LOCKED(mtx); + +#ifdef DIAGNOSTIC + curcpu()->ci_mutex_level--; +#endif + + s = mtx->mtx_oldipl; +#ifdef MULTIPROCESSOR + membar_exit(); + lock_leave(mtx, LOCK_TYPE_MUTEX); +#endif + mtx->mtx_owner = NULL; + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); +} Index: arch/amd64/conf/GENERIC.MP =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/GENERIC.MP,v retrieving revision 1.11 diff -u -p -r1.11 GENERIC.MP --- arch/amd64/conf/GENERIC.MP 3 Sep 2014 07:44:33 -0000 1.11 +++ arch/amd64/conf/GENERIC.MP 24 Feb 2017 03:33:00 -0000 @@ -3,6 +3,6 @@ include "arch/amd64/conf/GENERIC" option MULTIPROCESSOR -#option MP_LOCKDEBUG +option MP_LOCKDEBUG cpu* at mainbus? Index: arch/amd64/conf/files.amd64 =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/files.amd64,v retrieving revision 1.87 diff -u -p -r1.87 files.amd64 --- arch/amd64/conf/files.amd64 21 Jan 2017 10:58:15 -0000 1.87 +++ arch/amd64/conf/files.amd64 24 Feb 2017 03:33:00 -0000 @@ -26,7 +26,7 @@ file arch/amd64/amd64/fpu.c file arch/amd64/amd64/softintr.c file arch/amd64/amd64/i8259.c file arch/amd64/amd64/cacheinfo.c -file arch/amd64/amd64/mutex.S +file arch/amd64/amd64/mutex.c file arch/amd64/amd64/vector.S file arch/amd64/amd64/copy.S file arch/amd64/amd64/spl.S Index: arch/amd64/include/mplock.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/mplock.h,v retrieving revision 1.3 diff -u -p -r1.3 mplock.h --- arch/amd64/include/mplock.h 14 Mar 2014 02:08:57 -0000 1.3 +++ arch/amd64/include/mplock.h 24 Feb 2017 03:33:00 -0000 @@ -34,6 +34,7 @@ struct __mp_lock_cpu { struct __mp_lock { struct __mp_lock_cpu mpl_cpus[MAXCPUS]; + struct cpu_info *mpl_owner; volatile u_int mpl_ticket; u_int mpl_users; }; Index: arch/amd64/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- arch/amd64/include/mutex.h 29 Mar 2014 18:09:28 -0000 1.7 +++ arch/amd64/include/mutex.h 24 Feb 2017 03:33:00 -0000 @@ -30,7 +30,7 @@ struct mutex { int mtx_wantipl; int mtx_oldipl; - volatile void *mtx_owner; + void *mtx_owner; }; /* Index: ddb/db_command.c =================================================================== RCS file: /cvs/src/sys/ddb/db_command.c,v retrieving revision 1.71 diff -u -p -r1.71 db_command.c --- ddb/db_command.c 19 Apr 2016 12:23:25 -0000 1.71 +++ ddb/db_command.c 24 Feb 2017 03:33:00 -0000 @@ -505,6 +505,17 @@ db_extent_print_cmd(db_expr_t addr, int extent_print_all(); } +#ifdef MP_LOCKDEBUG +void _lock_stack_print(int (*)(const char *, ...)); + +/*ARGSUSED*/ +void +db_locks_print_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *modif) +{ + _lock_stack_print(db_printf); +} +#endif + /*ARGSUSED*/ void db_pool_print_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *modif) @@ -562,6 +573,9 @@ struct db_command db_show_cmds[] = { { "breaks", db_listbreak_cmd, 0, NULL }, { "buf", db_buf_print_cmd, 0, NULL }, { "extents", db_extent_print_cmd, 0, NULL }, +#ifdef MP_LOCKDEBUG + { "locks", db_locks_print_cmd, 0, NULL }, +#endif { "malloc", db_malloc_print_cmd, 0, NULL }, { "map", db_map_print_cmd, 0, NULL }, { "mbuf", db_mbuf_print_cmd, 0, NULL }, Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.47 diff -u -p -r1.47 kern_lock.c --- kern/kern_lock.c 19 Jun 2016 11:54:33 -0000 1.47 +++ kern/kern_lock.c 24 Feb 2017 03:33:00 -0000 @@ -46,6 +46,9 @@ int __mp_lock_spinout = 200000000; #endif #if defined(MULTIPROCESSOR) +#include +#include + /* * Functions for manipulating the kernel_lock. We put them here * so that they show up in profiles. @@ -82,4 +85,109 @@ _kernel_lock_held(void) { return (__mp_lock_held(&kernel_lock)); } + +#ifdef MP_LOCKDEBUG + +/* + * allocate a lock stack for all possible cpus up front rather than + * use bootable cpumem. the main reason for this is cpus are hatched + * and use locks before we reach a point in boot where we can call + * cpumem_malloc_ncpus. + */ +struct lock_stack _lock_stacks[MAXCPUS]; + +void +lock_idle(void) +{ + struct lock_stack *ls; + + ls = &_lock_stacks[cpu_number()]; + KASSERTMSG(ls->ls_index == 0, + "cpu%d idle with cpu locks held", (int)cpu_number()); +} + +void +lock_check(struct cpu_info *them) +{ + struct cpu_info *self = curcpu(); + struct lock_stack *src; + struct lock_stack *tgt; + unsigned int src_idx, tgt_idx; + vaddr_t lock; + + KASSERTMSG(self != them, "cpu%d: cannot deadlock against self", + CPU_INFO_UNIT(self)); + + src = &_lock_stacks[CPU_INFO_UNIT(self)]; + tgt = &_lock_stacks[CPU_INFO_UNIT(them)]; + + /* look for the lock tgt is trying to acquire */ + tgt_idx = tgt->ls_index; + if (tgt_idx == 0) + return; + + lock = tgt->ls_entries[tgt_idx - 1]; + + /* check to see if we own the lock they want */ + for (src_idx = 0; src_idx < src->ls_index; src_idx++) { + if (tgt->ls_entries[src_idx] != lock) + continue; + + /* we may have a winner */ + + if (tgt->ls_index != tgt_idx || + lock != tgt->ls_entries[tgt_idx - 1]) { + /* tgt has made progress */ + return; + } + + printf("potential deadlock between cpu%u and cpu%u\n", + CPU_INFO_UNIT(self), CPU_INFO_UNIT(them)); + Debugger(); + } +} + +#include +#include +#include +#include + +static inline const char * +_lock_type_name(unsigned long type) +{ + switch (type) { + case LOCK_TYPE_MPLOCK: + return ("mplock"); + case LOCK_TYPE_MUTEX: + return ("mutex"); + } + + return "(unknown!)"; +} + +void +_lock_stack_print(int (*pr)(const char *, ...)) +{ + unsigned int cpu = cpu_number(); + struct lock_stack *ls; + unsigned int index; + vaddr_t lock; + + ls = &_lock_stacks[cpu]; + + printf("lock stack at %p on cpu%d\n", ls, cpu); + + for (index = 0; index < ls->ls_index; index++) { + lock = ls->ls_entries[index]; + + (*pr)("%u: %s ", index, + _lock_type_name(lock & LOCK_TYPE_MASK)); + db_printsym((db_expr_t)(lock & ~LOCK_TYPE_MASK), + DB_STGY_XTRN, pr); + (*pr)("\n"); + } +} + +#endif /* MP_LOCKDEBUG */ + #endif /* MULTIPROCESSOR */ Index: kern/kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v retrieving revision 1.138 diff -u -p -r1.138 kern_synch.c --- kern/kern_synch.c 31 Jan 2017 12:16:20 -0000 1.138 +++ kern/kern_synch.c 24 Feb 2017 03:33:00 -0000 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -207,6 +208,7 @@ msleep(const volatile void *ident, struc */ spl = MUTEX_OLDIPL(mtx); MUTEX_OLDIPL(mtx) = splsched(); + lock_swap(); mtx_leave(mtx); sleep_finish(&sls, 1); Index: kern/sched_bsd.c =================================================================== RCS file: /cvs/src/sys/kern/sched_bsd.c,v retrieving revision 1.46 diff -u -p -r1.46 sched_bsd.c --- kern/sched_bsd.c 14 Feb 2017 10:31:15 -0000 1.46 +++ kern/sched_bsd.c 24 Feb 2017 03:33:00 -0000 @@ -47,6 +47,7 @@ #include #include #include +#include #ifdef KTRACE #include @@ -354,9 +355,10 @@ mi_switch(void) * Release the kernel_lock, as we are about to yield the CPU. */ sched_count = __mp_release_all_but_one(&sched_lock); - if (__mp_lock_held(&kernel_lock)) + if (__mp_lock_held(&kernel_lock)) { + lock_swap(); hold_count = __mp_release_all(&kernel_lock); - else + } else hold_count = 0; #endif Index: sys/mplockdebug.h =================================================================== RCS file: sys/mplockdebug.h diff -N sys/mplockdebug.h --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/mplockdebug.h 24 Feb 2017 03:33:00 -0000 @@ -0,0 +1,105 @@ +/* $OpenBSD: mplock.h,v 1.9 2007/11/26 17:15:29 art Exp $ */ + +/* + * Copyright (c) 2004 Niklas Hallqvist. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_MPLOCK_DEBUG_H_ +#define _SYS_MPLOCK_DEBUG_H_ + +#ifdef MP_LOCKDEBUG + +#include /* for CACHELINESIZE */ + +#define LOCK_STACK 7 + +/* this gets stuffed in the low bits of the stack values */ +#define LOCK_TYPE_MASK 1UL +#define LOCK_TYPE_MUTEX 0UL +#define LOCK_TYPE_MPLOCK 1UL + +struct lock_stack { + vaddr_t ls_entries[LOCK_STACK]; + unsigned int ls_index; +} __aligned(CACHELINESIZE); + +static inline void +lock_enter(void *lock, unsigned long type) +{ + extern struct lock_stack _lock_stacks[]; + struct lock_stack *ls; + unsigned int index; + + ls = &_lock_stacks[cpu_number()]; + index = ls->ls_index++; + KASSERTMSG(index < LOCK_STACK, "too many locks"); + ls->ls_entries[index] = ((vaddr_t)lock | type); +} + +static inline void +lock_swap(void) +{ + extern struct lock_stack _lock_stacks[]; + struct lock_stack *ls; + unsigned int index; + vaddr_t *entries; + vaddr_t tmp; + + ls = &_lock_stacks[cpu_number()]; + index = ls->ls_index; + KASSERTMSG(index >= 2, "not enough locks to swap"); + index -= 2; + entries = &ls->ls_entries[index]; + + tmp = entries[0]; + entries[0] = entries[1]; + entries[1] = tmp; +} + +static inline void +lock_leave(void *lock, unsigned long type) +{ + extern struct lock_stack _lock_stacks[]; + struct lock_stack *ls; + unsigned int index; + + ls = &_lock_stacks[cpu_number()]; + index = ls->ls_index - 1; + if (ls->ls_entries[index] != ((vaddr_t)lock | type)) + panic("lock %p released out of order", lock); + ls->ls_index = index; +} + +void lock_check(struct cpu_info *); +void lock_idle(void); + +#else /* MP_LOCKDEBUG */ + +#define lock_enter(_lock, _type) /* nothing */ +#define lock_leave(_lock, _type) /* nothing */ +#define lock_swap() /* nothing */ +#define lock_idle() /* nothing */ + +#endif /* MP_LOCKDEBUG */ + +#endif /* _SYS_MPLOCKDEBUG_H_ */ Index: arch/alpha/alpha/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/alpha/alpha/lock_machdep.c,v retrieving revision 1.4 diff -u -p -r1.4 lock_machdep.c --- arch/alpha/alpha/lock_machdep.c 19 Mar 2016 11:34:22 -0000 1.4 +++ arch/alpha/alpha/lock_machdep.c 24 Feb 2017 03:33:00 -0000 @@ -19,6 +19,7 @@ #include #include +#include #include #include @@ -67,22 +68,26 @@ __mp_lock_init(struct __mp_lock *lock) static inline void __mp_lock_spin(struct __mp_lock *mpl) { -#ifndef MP_LOCKDEBUG - while (mpl->mpl_count != 0) - SPINLOCK_SPIN_HOOK; -#else - int nticks = __mp_lock_spinout; - if (!CPU_IS_PRIMARY(curcpu())) - nticks += nticks; +#ifdef MP_LOCKDEBUG + volatile struct cpu_info *owner = NULL; + unsigned int spins = __mp_lock_spinout; +#endif - while (mpl->mpl_count != 0 && --nticks > 0) - SPINLOCK_SPIN_HOOK; + while (mpl->mpl_count != 0) { +#ifdef MP_LOCKDEBUG + volatile struct cpu_info *them = mpl->mpl_cpu; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + lock_check((struct cpu_info *)owner); + spins = __mp_lock_spinout; + } +#endif - if (nticks == 0) { - db_printf("__mp_lock(%p): lock spun out", mpl); - Debugger(); + SPINLOCK_SPIN_HOOK; } -#endif } void @@ -102,22 +107,24 @@ __mp_lock(struct __mp_lock *mpl) * safer to clear it and besides, setting mpl_count to 2 on the * first lock makes most of this code much simpler. */ + s = splhigh(); + lock_enter(mpl, LOCK_TYPE_MPLOCK); while (1) { - s = splhigh(); - if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) { + if (__cpu_cas(&mpl->mpl_count, 0, 2) == 0) { alpha_mb(); mpl->mpl_cpu = ci; - } - - if (mpl->mpl_cpu == ci) { + break; + } else if (mpl->mpl_cpu == ci) { + lock_leave(mpl, LOCK_TYPE_MPLOCK); mpl->mpl_count++; - splx(s); break; + } else { + splx(s); + __mp_lock_spin(mpl); + s = splhigh(); } - splx(s); - - __mp_lock_spin(mpl); } + splx(s); } void @@ -136,6 +143,7 @@ __mp_unlock(struct __mp_lock *mpl) if (--mpl->mpl_count == 1) { mpl->mpl_cpu = NULL; alpha_mb(); + lock_leave(mpl, LOCK_TYPE_MPLOCK); mpl->mpl_count = 0; } splx(s); @@ -157,6 +165,7 @@ __mp_release_all(struct __mp_lock *mpl) s = splhigh(); mpl->mpl_cpu = NULL; alpha_mb(); + lock_leave(mpl, LOCK_TYPE_MPLOCK); mpl->mpl_count = 0; splx(s); Index: arch/alpha/alpha/mutex.c =================================================================== RCS file: /cvs/src/sys/arch/alpha/alpha/mutex.c,v retrieving revision 1.16 diff -u -p -r1.16 mutex.c --- arch/alpha/alpha/mutex.c 13 Jun 2016 01:26:14 -0000 1.16 +++ arch/alpha/alpha/mutex.c 24 Feb 2017 03:33:00 -0000 @@ -29,12 +29,11 @@ #include #include #include +#include #include #include -#include - void __mtx_init(struct mutex *mtx, int wantipl) { @@ -47,8 +46,27 @@ __mtx_init(struct mutex *mtx, int wantip void mtx_enter(struct mutex *mtx) { - while (mtx_enter_try(mtx) == 0) +#ifdef MP_LOCKDEBUG + extern int __mp_lock_spinout; + struct cpu_info *owner = NULL; + unsigned int spins = __mp_lock_spinout; +#endif + + while (mtx_enter_try(mtx) == 0) { +#ifdef MP_LOCKDEBUG + struct cpu_info *them = mtx->mtx_owner; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + lock_check(owner); + spins = __mp_lock_spinout; + } +#endif + SPINLOCK_SPIN_HOOK; + } } int @@ -60,6 +78,7 @@ mtx_enter_try(struct mutex *mtx) if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); + lock_enter(mtx, LOCK_TYPE_MUTEX); owner = atomic_cas_ptr(&mtx->mtx_owner, NULL, ci); #ifdef DIAGNOSTIC if (__predict_false(owner == ci)) @@ -75,6 +94,7 @@ mtx_enter_try(struct mutex *mtx) return (1); } + lock_leave(mtx, LOCK_TYPE_MUTEX); if (mtx->mtx_wantipl != IPL_NONE) splx(s); @@ -122,6 +142,7 @@ mtx_leave(struct mutex *mtx) s = mtx->mtx_oldipl; #ifdef MULTIPROCESSOR membar_exit(); + lock_leave(mtx, LOCK_TYPE_MUTEX); #endif mtx->mtx_owner = NULL; if (mtx->mtx_wantipl != IPL_NONE) Index: arch/alpha/conf/GENERIC.MP =================================================================== RCS file: /cvs/src/sys/arch/alpha/conf/GENERIC.MP,v retrieving revision 1.1 diff -u -p -r1.1 GENERIC.MP --- arch/alpha/conf/GENERIC.MP 26 Jan 2014 17:40:11 -0000 1.1 +++ arch/alpha/conf/GENERIC.MP 24 Feb 2017 03:33:00 -0000 @@ -3,6 +3,6 @@ include "arch/alpha/conf/GENERIC" option MULTIPROCESSOR -#option MP_LOCKDEBUG +option MP_LOCKDEBUG cpu* at mainbus?