Index: arch/amd64/amd64/cpu.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/cpu.c,v retrieving revision 1.102 diff -u -p -r1.102 cpu.c --- arch/amd64/amd64/cpu.c 28 Jul 2016 21:57:57 -0000 1.102 +++ arch/amd64/amd64/cpu.c 21 Feb 2017 12:40:05 -0000 @@ -112,6 +112,10 @@ #include #endif /* HIBERNATE */ +#ifdef MULTIPROCESSOR +#include +#endif + int cpu_match(struct device *, void *, void *); void cpu_attach(struct device *, struct device *, void *); int cpu_activate(struct device *, int); @@ -170,7 +174,17 @@ struct cfdriver cpu_cd = { * CPU, on uniprocessors). The CPU info list is initialized to * point at it. */ -struct cpu_info cpu_info_primary = { 0, &cpu_info_primary }; +#ifdef MULTIPROCESSOR +struct lock_log cpu_info_primary_lock_log; +#endif + +struct cpu_info cpu_info_primary = { + .ci_dev = NULL, + .ci_self = &cpu_info_primary, +#ifdef MULTIPROCESSOR + .ci_lock_log = &cpu_info_primary_lock_log, +#endif +}; struct cpu_info *cpu_info_list = &cpu_info_primary; @@ -338,6 +352,9 @@ cpu_attach(struct device *parent, struct if (caa->cpu_role == CPU_ROLE_AP) { ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK|M_ZERO); #if defined(MULTIPROCESSOR) + ci->ci_lock_log = malloc(sizeof(*ci->ci_lock_log), M_DEVBUF, + M_WAITOK|M_ZERO); + if (cpu_info[cpunum] != NULL) panic("cpu at apic id %d already attached?", cpunum); cpu_info[cpunum] = ci; @@ -399,6 +416,8 @@ cpu_attach(struct device *parent, struct printf(": "); + printf("lock log %p %zu ", ci->ci_lock_log, sizeof(*ci->ci_lock_log)); + switch (caa->cpu_role) { case CPU_ROLE_SP: printf("(uniprocessor)\n"); @@ -448,8 +467,8 @@ cpu_attach(struct device *parent, struct cpu_start_secondary(ci); ncpus++; if (ci->ci_flags & CPUF_PRESENT) { - ci->ci_next = cpu_info_list->ci_next; - cpu_info_list->ci_next = ci; + ci->ci_next = cpu_info_list; + cpu_info_list = ci; } #else printf("%s: not started\n", sc->sc_dev.dv_xname); @@ -462,6 +481,9 @@ cpu_attach(struct device *parent, struct cpu_vm_init(ci); #if defined(MULTIPROCESSOR) + mtx_init(&ci->ci_xcall_mtx, IPL_HIGH); + TAILQ_INIT(&ci->ci_xcall_list); + if (mp_verbose) { printf("%s: kstack at 0x%lx for %d bytes\n", sc->sc_dev.dv_xname, kstack, USPACE); Index: arch/amd64/amd64/intr.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/intr.c,v retrieving revision 1.48 diff -u -p -r1.48 intr.c --- arch/amd64/amd64/intr.c 22 Jun 2016 01:12:38 -0000 1.48 +++ arch/amd64/amd64/intr.c 21 Feb 2017 12:40:05 -0000 @@ -527,7 +527,7 @@ intr_handler(struct intrframe *frame, st need_lock = frame->if_ppl < IPL_SCHED; if (need_lock) - __mp_lock(&kernel_lock); + KERNEL_LOCK(); #endif floor = ci->ci_handled_intr_level; ci->ci_handled_intr_level = ih->ih_level; @@ -535,7 +535,7 @@ intr_handler(struct intrframe *frame, st ci->ci_handled_intr_level = floor; #ifdef MULTIPROCESSOR if (need_lock) - __mp_unlock(&kernel_lock); + KERNEL_UNLOCK(); #endif return rc; } @@ -550,7 +550,10 @@ struct intrhand fake_softclock_intrhand; struct intrhand fake_softnet_intrhand; struct intrhand fake_softtty_intrhand; struct intrhand fake_timer_intrhand; +#ifdef MULTIPROCESSOR struct intrhand fake_ipi_intrhand; +struct intrhand fake_xcall_intrhand; +#endif #if NXEN > 0 struct intrhand fake_xen_intrhand; #endif @@ -621,6 +624,15 @@ cpu_intr_init(struct cpu_info *ci) isp->is_handlers = &fake_ipi_intrhand; isp->is_pic = &local_pic; ci->ci_isources[LIR_IPI] = isp; + isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO); + if (isp == NULL) + panic("can't allocate fixed interrupt source"); + isp->is_recurse = Xxcallintr; + isp->is_resume = Xxcallintr; + fake_xcall_intrhand.ih_level = IPL_SOFTCLOCK; + isp->is_handlers = &fake_xcall_intrhand; + isp->is_pic = &local_pic; + ci->ci_isources[SIR_XCALL] = isp; #endif #if NXEN > 0 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO); @@ -699,6 +711,10 @@ splraise(int nlevel) olevel = ci->ci_ilevel; ci->ci_ilevel = MAX(ci->ci_ilevel, nlevel); + +// _lock_log(__return_address(), (void *)(unsigned long)ci->ci_ilevel, +// LOCK_F_TYPE_SPL, LOCK_F_OP_ENTER); + return (olevel); } @@ -717,6 +733,9 @@ spllower(int nlevel) imask = IUNMASK(ci, nlevel); olevel = ci->ci_ilevel; +// _lock_log(__return_address(), (void *)(unsigned long)olevel, +// LOCK_F_TYPE_SPL, LOCK_F_OP_LEAVE); + psl = read_psl(); disable_intr(); @@ -741,6 +760,5 @@ softintr(int sir) { struct cpu_info *ci = curcpu(); - __asm volatile("lock; orq %1, %0" : - "=m"(ci->ci_ipending) : "ir" (1UL << sir)); + x86_atomic_setbits_u64(&ci->ci_ipending, 1UL << sir); } Index: arch/amd64/amd64/ipifuncs.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/ipifuncs.c,v retrieving revision 1.28 diff -u -p -r1.28 ipifuncs.c --- arch/amd64/amd64/ipifuncs.c 23 Nov 2015 22:57:12 -0000 1.28 +++ arch/amd64/amd64/ipifuncs.c 21 Feb 2017 12:40:05 -0000 @@ -65,6 +65,8 @@ void x86_64_ipi_halt(struct cpu_info *); void x86_64_ipi_synch_fpu(struct cpu_info *); void x86_64_ipi_flush_fpu(struct cpu_info *); +void x86_64_ipi_xcall(struct cpu_info *); + #if NVMM > 0 void x86_64_ipi_start_vmm(struct cpu_info *); void x86_64_ipi_stop_vmm(struct cpu_info *); @@ -102,6 +104,7 @@ void (*ipifunc[X86_NIPI])(struct cpu_inf NULL, NULL, #endif + x86_64_ipi_xcall, }; void @@ -113,7 +116,7 @@ void x86_64_ipi_halt(struct cpu_info *ci) { SCHED_ASSERT_UNLOCKED(); - KASSERT(!__mp_lock_held(&kernel_lock)); + KERNEL_ASSERT_UNLOCKED(); fpusave_cpu(ci, 1); disable_intr(); @@ -163,3 +166,13 @@ x86_64_ipi_stop_vmm(struct cpu_info *ci) stop_vmm_on_cpu(ci); } #endif /* NVMM > 0 */ + +void +x86_64_ipi_xcall(struct cpu_info *ci) +{ + /* + * this is an inlining of softintr() because we already have + * curcpu() and the SIR_XCALL bit to set. + */ + x86_atomic_setbits_u64(&ci->ci_ipending, 1UL << SIR_XCALL); +}; Index: arch/amd64/amd64/lock_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/lock_machdep.c,v retrieving revision 1.10 diff -u -p -r1.10 lock_machdep.c --- arch/amd64/amd64/lock_machdep.c 19 Mar 2016 11:34:22 -0000 1.10 +++ arch/amd64/amd64/lock_machdep.c 21 Feb 2017 12:40:05 -0000 @@ -19,47 +19,58 @@ #include #include +#include +#include -#include #include +#include #include +#if defined(MP_LOCKDEBUG) +#ifndef DDB +#error "MP_LOCKDEBUG requires DDB" +#endif + #include +/* CPU-dependent timing, needs this to be settable from ddb. */ +extern int __mp_lock_spinout; +#endif + void __mp_lock_init(struct __mp_lock *mpl) { memset(mpl->mpl_cpus, 0, sizeof(mpl->mpl_cpus)); + mpl->mpl_owner = NULL; mpl->mpl_users = 0; mpl->mpl_ticket = 0; } -#if defined(MP_LOCKDEBUG) -#ifndef DDB -#error "MP_LOCKDEBUG requires DDB" +static inline void +__mp_lock_spin(struct __mp_lock *mpl, u_int me) +{ +#ifdef MP_LOCKDEBUG + struct cpu_info *owner = NULL; + unsigned int spins = __mp_lock_spinout; #endif -/* CPU-dependent timing, needs this to be settable from ddb. */ -extern int __mp_lock_spinout; + while (mpl->mpl_ticket != me) { +#ifdef MP_LOCKDEBUG + struct cpu_info *them = mpl->mpl_owner; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + _lock_check(curcpu(), owner); + spins = __mp_lock_spinout; + } #endif -static __inline void -__mp_lock_spin(struct __mp_lock *mpl, u_int me) -{ -#ifndef MP_LOCKDEBUG - while (mpl->mpl_ticket != me) - SPINLOCK_SPIN_HOOK; -#else - int nticks = __mp_lock_spinout; - - while (mpl->mpl_ticket != me && --nticks > 0) SPINLOCK_SPIN_HOOK; - - if (nticks == 0) { - db_printf("__mp_lock(%p): lock spun out", mpl); - Debugger(); } -#endif + + mpl->mpl_owner = curcpu(); } static inline u_int @@ -80,8 +91,10 @@ __mp_lock(struct __mp_lock *mpl) long rf = read_rflags(); disable_intr(); - if (cpu->mplc_depth++ == 0) + if (cpu->mplc_depth++ == 0) { + lock_enter(mpl, LOCK_TYPE_MPLOCK); cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + } write_rflags(rf); __mp_lock_spin(mpl, cpu->mplc_ticket); @@ -101,8 +114,11 @@ __mp_unlock(struct __mp_lock *mpl) #endif disable_intr(); - if (--cpu->mplc_depth == 0) + if (--cpu->mplc_depth == 0) { + mpl->mpl_owner = NULL; mpl->mpl_ticket++; + lock_leave(mpl, LOCK_TYPE_MPLOCK); + } write_rflags(rf); } @@ -113,10 +129,19 @@ __mp_release_all(struct __mp_lock *mpl) long rf = read_rflags(); int rv; +#ifdef MP_LOCKDEBUG + if (!__mp_lock_held(mpl)) { + db_printf("__mp_unlock(%p): not held lock\n", mpl); + Debugger(); + } +#endif + disable_intr(); rv = cpu->mplc_depth; cpu->mplc_depth = 0; + mpl->mpl_owner = NULL; mpl->mpl_ticket++; + lock_leave(mpl, LOCK_TYPE_MPLOCK); write_rflags(rf); return (rv); @@ -143,8 +168,18 @@ __mp_release_all_but_one(struct __mp_loc void __mp_acquire_count(struct __mp_lock *mpl, int count) { - while (count--) - __mp_lock(mpl); + struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()]; + long rf = read_rflags(); + + disable_intr(); + if (cpu->mplc_depth == 0) { + lock_enter(mpl, LOCK_TYPE_MPLOCK); + cpu->mplc_ticket = fetch_and_add(&mpl->mpl_users, 1); + } + cpu->mplc_depth += count; + write_rflags(rf); + + __mp_lock_spin(mpl, cpu->mplc_ticket); } int Index: arch/amd64/amd64/mutex.S =================================================================== RCS file: arch/amd64/amd64/mutex.S diff -N arch/amd64/amd64/mutex.S --- arch/amd64/amd64/mutex.S 2 Jun 2013 01:55:52 -0000 1.9 +++ /dev/null 1 Jan 1970 00:00:00 -0000 @@ -1,157 +0,0 @@ -/* $OpenBSD: mutex.S,v 1.9 2013/06/02 01:55:52 kettenis Exp $ */ - -/* - * Copyright (c) 2004 Artur Grabowski - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "assym.h" - -#include -#include -#include -#include -#include -#include - -/* - * Yeah, we don't really need to implement mtx_init here, but let's keep - * all the functions in the same place. - */ -ENTRY(__mtx_init) - movl %esi, MTX_WANTIPL(%rdi) - movl $0, MTX_OLDIPL(%rdi) - movq $0, MTX_OWNER(%rdi) - ret - -ENTRY(mtx_enter) -1: movl MTX_WANTIPL(%rdi), %eax - movq CPUVAR(SELF), %rcx - movl CPU_INFO_ILEVEL(%rcx), %edx # oipl = cpl; - cmpl %eax, %edx # if (cpl < mtx->mtx_wantipl) - cmovge %edx, %eax - movl %eax, CPU_INFO_ILEVEL(%rcx) # cpl = mtx->mtx_wantipl; - /* - * %edx - the old ipl - * %rcx - curcpu() - */ - xorq %rax, %rax -#ifdef MULTIPROCESSOR - lock -#endif - cmpxchgq %rcx, MTX_OWNER(%rdi) # test_and_set(mtx->mtx_owner) - jne 2f - movl %edx, MTX_OLDIPL(%rdi) -#ifdef DIAGNOSTIC - incl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - ret - - /* We failed to obtain the lock. splx, spin and retry. */ -2: pushq %rdi - movl %edx, %edi - call _C_LABEL(spllower) - popq %rdi -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rdi), %rcx - je 4f -#endif -3: - movq MTX_OWNER(%rdi), %rax - testq %rax, %rax - jz 1b - jmp 3b -#ifdef DIAGNOSTIC -4: movq $5f, %rdi - call _C_LABEL(panic) -5: .asciz "mtx_enter: locking against myself" -#endif - -ENTRY(mtx_enter_try) -1: movl MTX_WANTIPL(%rdi), %eax - movq CPUVAR(SELF), %rcx - movl CPU_INFO_ILEVEL(%rcx), %edx # oipl = cpl; - cmpl %eax, %edx # if (cpl < mtx->mtx_wantipl) - cmovge %edx, %eax - movl %eax, CPU_INFO_ILEVEL(%rcx) # cpl = mtx->mtx_wantipl; - /* - * %edx - the old ipl - * %rcx - curcpu() - */ - xorq %rax, %rax -#ifdef MULTIPROCESSOR - lock -#endif - cmpxchgq %rcx, MTX_OWNER(%rdi) # test_and_set(mtx->mtx_owner) - jne 2f - movl %edx, MTX_OLDIPL(%rdi) -#ifdef DIAGNOSTIC - incl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - movq $1, %rax - ret - - /* We failed to obtain the lock. splx and return 0. */ -2: pushq %rdi - movl %edx, %edi - call _C_LABEL(spllower) - popq %rdi -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rdi), %rcx - je 3f -#endif - xorq %rax, %rax - ret - -#ifdef DIAGNOSTIC -3: movq $4f, %rdi - call _C_LABEL(panic) -4: .asciz "mtx_enter_try: locking against myself" -#endif - - -ENTRY(mtx_leave) - movq %rdi, %rax -#ifdef DIAGNOSTIC - movq CPUVAR(SELF), %rcx - cmpq MTX_OWNER(%rax), %rcx - jne 2f - decl CPU_INFO_MUTEX_LEVEL(%rcx) -#endif - xorq %rcx, %rcx - movl MTX_OLDIPL(%rax), %edi - movl %ecx, MTX_OLDIPL(%rax) - movq %rcx, MTX_OWNER(%rax) - cmpl %edi, CPUVAR(ILEVEL) - je 1f - call _C_LABEL(spllower) -1: - ret - -#ifdef DIAGNOSTIC -2: movq $3f, %rdi - call _C_LABEL(panic) -3: .asciz "mtx_leave: lock not held" -#endif Index: arch/amd64/amd64/softintr.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/softintr.c,v retrieving revision 1.9 diff -u -p -r1.9 softintr.c --- arch/amd64/amd64/softintr.c 28 Aug 2015 00:03:53 -0000 1.9 +++ arch/amd64/amd64/softintr.c 21 Feb 2017 12:40:05 -0000 @@ -168,3 +168,43 @@ softintr_disestablish(void *arg) free(sih, M_DEVBUF, sizeof(*sih)); } + +#ifdef MULTIPROCESSOR +#include + +void +cpu_xcall(struct cpu_info *ci, struct task *t) +{ + if (ci == curcpu()) { + /* execute the task immediately on the local cpu */ + int s = splsoftclock(); + (*t->t_func)(t->t_arg); + splx(s); + } else { + mtx_enter(&ci->ci_xcall_mtx); + TAILQ_INSERT_TAIL(&ci->ci_xcall_list, t, t_entry); + mtx_leave(&ci->ci_xcall_mtx); + + x86_send_ipi(ci, X86_IPI_XCALL); + } +} + +void +cpu_xcall_dispatch(void) +{ + struct cpu_info *ci = curcpu(); + struct task *list; + + mtx_enter(&ci->ci_xcall_mtx); + list = TAILQ_FIRST(&ci->ci_xcall_list); + TAILQ_INIT(&ci->ci_xcall_list); + mtx_leave(&ci->ci_xcall_mtx); + + while (list != NULL) { + struct task *t = list; + list = TAILQ_NEXT(t, t_entry); + + (*t->t_func)(t->t_arg); + } +} +#endif Index: arch/amd64/amd64/vector.S =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/vector.S,v retrieving revision 1.47 diff -u -p -r1.47 vector.S --- arch/amd64/amd64/vector.S 4 Sep 2016 09:22:28 -0000 1.47 +++ arch/amd64/amd64/vector.S 21 Feb 2017 12:40:05 -0000 @@ -1108,3 +1108,11 @@ IDTVEC(softclock) call _C_LABEL(softintr_dispatch) decl CPUVAR(IDEPTH) jmp *%r13 + +IDTVEC(xcallintr) + movl $IPL_SOFTCLOCK, CPUVAR(ILEVEL) + sti + incl CPUVAR(IDEPTH) + call _C_LABEL(cpu_xcall_dispatch) + decl CPUVAR(IDEPTH) + jmp *%r13 Index: arch/amd64/conf/GENERIC.MP =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/GENERIC.MP,v retrieving revision 1.11 diff -u -p -r1.11 GENERIC.MP --- arch/amd64/conf/GENERIC.MP 3 Sep 2014 07:44:33 -0000 1.11 +++ arch/amd64/conf/GENERIC.MP 21 Feb 2017 12:40:05 -0000 @@ -3,6 +3,6 @@ include "arch/amd64/conf/GENERIC" option MULTIPROCESSOR -#option MP_LOCKDEBUG +option MP_LOCKDEBUG cpu* at mainbus? Index: arch/amd64/conf/files.amd64 =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/files.amd64,v retrieving revision 1.87 diff -u -p -r1.87 files.amd64 --- arch/amd64/conf/files.amd64 21 Jan 2017 10:58:15 -0000 1.87 +++ arch/amd64/conf/files.amd64 21 Feb 2017 12:40:05 -0000 @@ -26,7 +26,6 @@ file arch/amd64/amd64/fpu.c file arch/amd64/amd64/softintr.c file arch/amd64/amd64/i8259.c file arch/amd64/amd64/cacheinfo.c -file arch/amd64/amd64/mutex.S file arch/amd64/amd64/vector.S file arch/amd64/amd64/copy.S file arch/amd64/amd64/spl.S Index: arch/amd64/include/cpu.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v retrieving revision 1.107 diff -u -p -r1.107 cpu.h --- arch/amd64/include/cpu.h 14 Dec 2016 10:30:59 -0000 1.107 +++ arch/amd64/include/cpu.h 21 Feb 2017 12:40:05 -0000 @@ -52,6 +52,9 @@ #include #include +/* for xcalls */ +#include +#include #ifdef _KERNEL /* VMXON region (Intel) */ @@ -171,6 +174,9 @@ struct cpu_info { #ifdef MULTIPROCESSOR struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM]; + struct lock_log *ci_lock_log; + struct mutex ci_xcall_mtx; + struct task_list ci_xcall_list; #endif struct ksensordev ci_sensordev; @@ -281,10 +287,6 @@ extern struct cpu_info cpu_info_primary; #include #endif /* _KERNEL */ - -#ifdef MULTIPROCESSOR -#include -#endif #define aston(p) ((p)->p_md.md_astpending = 1) Index: arch/amd64/include/intr.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/intr.h,v retrieving revision 1.29 diff -u -p -r1.29 intr.h --- arch/amd64/include/intr.h 13 Sep 2015 11:48:17 -0000 1.29 +++ arch/amd64/include/intr.h 21 Feb 2017 12:40:05 -0000 @@ -219,6 +219,8 @@ void x86_ipi_handler(void); void x86_setperf_ipi(struct cpu_info *); extern void (*ipifunc[X86_NIPI])(struct cpu_info *); + +extern void Xxcallintr(void); #endif #endif /* !_LOCORE */ Index: arch/amd64/include/intrdefs.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/intrdefs.h,v retrieving revision 1.16 diff -u -p -r1.16 intrdefs.h --- arch/amd64/include/intrdefs.h 22 Jun 2016 01:12:38 -0000 1.16 +++ arch/amd64/include/intrdefs.h 21 Feb 2017 12:40:05 -0000 @@ -53,9 +53,10 @@ #define SIR_CLOCK 61 #define SIR_NET 60 #define SIR_TTY 59 +#define SIR_XCALL 58 -#define LIR_XEN 58 -#define LIR_HYPERV 57 +#define LIR_XEN 57 +#define LIR_HYPERV 56 /* * Maximum # of interrupt sources per CPU. 64 to fit in one word. @@ -83,13 +84,14 @@ #define X86_IPI_DDB 0x00000080 #define X86_IPI_START_VMM 0x00000100 #define X86_IPI_STOP_VMM 0x00000200 +#define X86_IPI_XCALL 0x00000400 -#define X86_NIPI 10 +#define X86_NIPI 11 #define X86_IPI_NAMES { "halt IPI", "nop IPI", "FPU flush IPI", \ "FPU synch IPI", "TLB shootdown IPI", \ "MTRR update IPI", "setperf IPI", "ddb IPI", \ - "VMM start IPI", "VMM stop IPI" } + "VMM start IPI", "VMM stop IPI", "xcall IPI" } #define IREENT_MAGIC 0x18041969 Index: arch/amd64/include/mplock.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/mplock.h,v retrieving revision 1.3 diff -u -p -r1.3 mplock.h --- arch/amd64/include/mplock.h 14 Mar 2014 02:08:57 -0000 1.3 +++ arch/amd64/include/mplock.h 21 Feb 2017 12:40:05 -0000 @@ -34,20 +34,9 @@ struct __mp_lock_cpu { struct __mp_lock { struct __mp_lock_cpu mpl_cpus[MAXCPUS]; + struct cpu_info *mpl_owner; volatile u_int mpl_ticket; u_int mpl_users; }; - -#ifndef _LOCORE - -void __mp_lock_init(struct __mp_lock *); -void __mp_lock(struct __mp_lock *); -void __mp_unlock(struct __mp_lock *); -int __mp_release_all(struct __mp_lock *); -int __mp_release_all_but_one(struct __mp_lock *); -void __mp_acquire_count(struct __mp_lock *, int); -int __mp_lock_held(struct __mp_lock *); - -#endif #endif /* !_MACHINE_MPLOCK_H */ Index: arch/amd64/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- arch/amd64/include/mutex.h 29 Mar 2014 18:09:28 -0000 1.7 +++ arch/amd64/include/mutex.h 21 Feb 2017 12:40:05 -0000 @@ -27,12 +27,6 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -struct mutex { - int mtx_wantipl; - int mtx_oldipl; - volatile void *mtx_owner; -}; - /* * To prevent lock ordering problems with the kernel lock, we need to * make sure we block all interrupts that can grab the kernel lock. @@ -43,25 +37,6 @@ struct mutex { #ifdef MULTIPROCESSOR #define __MUTEX_IPL(ipl) \ (((ipl) > IPL_NONE && (ipl) < IPL_TTY) ? IPL_TTY : (ipl)) -#else -#define __MUTEX_IPL(ipl) (ipl) #endif - -#define MUTEX_INITIALIZER(ipl) { __MUTEX_IPL((ipl)), 0, NULL } - -void __mtx_init(struct mutex *, int); -#define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) - -#define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_owner != curcpu()) \ - panic("mutex %p not held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_owner == curcpu()) \ - panic("mutex %p held in %s", (mtx), __func__); \ -} while (0) - -#define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl #endif Index: ddb/db_command.c =================================================================== RCS file: /cvs/src/sys/ddb/db_command.c,v retrieving revision 1.71 diff -u -p -r1.71 db_command.c --- ddb/db_command.c 19 Apr 2016 12:23:25 -0000 1.71 +++ ddb/db_command.c 21 Feb 2017 12:40:06 -0000 @@ -505,6 +505,17 @@ db_extent_print_cmd(db_expr_t addr, int extent_print_all(); } +#ifdef MP_LOCKDEBUG +void _lock_log_print(int (*)(const char *, ...)); +/*ARGSUSED*/ +void +db_lock_log_print_cmd(db_expr_t addr, int have_addr, db_expr_t count, + char *modif) +{ + _lock_log_print(db_printf); +} +#endif + /*ARGSUSED*/ void db_pool_print_cmd(db_expr_t addr, int have_addr, db_expr_t count, char *modif) @@ -562,6 +573,9 @@ struct db_command db_show_cmds[] = { { "breaks", db_listbreak_cmd, 0, NULL }, { "buf", db_buf_print_cmd, 0, NULL }, { "extents", db_extent_print_cmd, 0, NULL }, +#ifdef MP_LOCKDEBUG + { "locklog", db_lock_log_print_cmd, 0, NULL }, +#endif { "malloc", db_malloc_print_cmd, 0, NULL }, { "map", db_map_print_cmd, 0, NULL }, { "mbuf", db_mbuf_print_cmd, 0, NULL }, Index: kern/kern_fork.c =================================================================== RCS file: /cvs/src/sys/kern/kern_fork.c,v retrieving revision 1.195 diff -u -p -r1.195 kern_fork.c --- kern/kern_fork.c 12 Feb 2017 04:55:08 -0000 1.195 +++ kern/kern_fork.c 21 Feb 2017 12:40:06 -0000 @@ -665,7 +665,7 @@ void proc_trampoline_mp(void) { SCHED_ASSERT_LOCKED(); - __mp_unlock(&sched_lock); + __mp_unlock(sched_lock); spl0(); SCHED_ASSERT_UNLOCKED(); KERNEL_ASSERT_UNLOCKED(); Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.47 diff -u -p -r1.47 kern_lock.c --- kern/kern_lock.c 19 Jun 2016 11:54:33 -0000 1.47 +++ kern/kern_lock.c 21 Feb 2017 12:40:06 -0000 @@ -40,46 +40,392 @@ #include #include +#include /* for SPINLOCK_SPIN_HOOK */ + +#if defined(MULTIPROCESSOR) +#include + +struct __mp_lock _kernel_lock; +struct __mp_lock * const kernel_lock = &_kernel_lock; + #ifdef MP_LOCKDEBUG /* CPU-dependent timing, this needs to be settable from ddb. */ int __mp_lock_spinout = 200000000; #endif -#if defined(MULTIPROCESSOR) -/* - * Functions for manipulating the kernel_lock. We put them here - * so that they show up in profiles. - */ +#endif /* MULTIPROCESSOR */ + +void +spinlock_enter(struct spinlock *spinlock) +{ +#ifdef MP_LOCKDEBUG + int spins = __mp_lock_spinout; +#endif + + while (spinlock_enter_try(spinlock) == 0) { +#ifdef MP_LOCKDEBUG + if (--spins == 0) + panic("%s: lock %p spun out", __func__, spinlock); +#endif -struct __mp_lock kernel_lock; + SPINLOCK_SPIN_HOOK; + } +} void -_kernel_lock_init(void) +mtx_init(struct mutex *mtx, int wantipl) { - __mp_lock_init(&kernel_lock); +#ifdef MULTIPROCESSOR + mtx->mtx_owner = NULL; + spinlock_init(&mtx->mtx_spinlock); +#endif + mtx->mtx_wantipl = __MUTEX_IPL(wantipl); + mtx->mtx_oldipl = 0; } -/* - * Acquire/release the kernel lock. Intended for use in the scheduler - * and the lower half of the kernel. - */ +int +mtx_enter_try(struct mutex *mtx) +{ +#ifdef MULTIPROCESSOR + struct lock_log_entry *lle; +#endif + int s; + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + +#ifdef MULTIPROCESSOR + KASSERTMSG(mtx->mtx_owner != curcpu(), + "%p: locking against myself", mtx); + + lle = lock_enter(mtx, LOCK_TYPE_MUTEX); + if (__predict_false(spinlock_enter_try(&mtx->mtx_spinlock) == 0)) { + _lock_log_clear(lle); + + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); + + return (0); + } + + mtx->mtx_oldipl = s; + mtx->mtx_owner = curcpu(); + membar_enter(); +#endif /* MULTIPROCESSOR */ + + return (1); +} + +static inline void +mtx_spin(struct mutex *mtx) +{ +#ifdef MP_LOCKDEBUG + struct cpu_info *owner = NULL; + int spins = __mp_lock_spinout; +#endif + + while (spinlock_enter_try(&mtx->mtx_spinlock) == 0) { +#ifdef MP_LOCKDEBUG + struct cpu_info *them = mtx->mtx_owner; + if (owner != them) { + owner = them; + spins = __mp_lock_spinout; + } else if (--spins == 0) { + /* check for deadlock */ + _lock_check(curcpu(), owner); + spins = __mp_lock_spinout; + } +#endif + + SPINLOCK_SPIN_HOOK; + } +} void -_kernel_lock(void) +mtx_enter(struct mutex *mtx) { - SCHED_ASSERT_UNLOCKED(); - __mp_lock(&kernel_lock); + int s; + + if (mtx->mtx_wantipl != IPL_NONE) + s = splraise(mtx->mtx_wantipl); + +#ifdef MULTIPROCESSOR + KASSERTMSG(mtx->mtx_owner != curcpu(), + "%p: locking against myself", mtx); + + lock_enter(mtx, LOCK_TYPE_MUTEX); + + mtx_spin(mtx); + + mtx->mtx_oldipl = s; + mtx->mtx_owner = curcpu(); + membar_enter(); +#endif /* MULTIPROCESSOR */ } void -_kernel_unlock(void) +mtx_leave(struct mutex *mtx) +{ + int s; + + s = mtx->mtx_oldipl; + +#ifdef MULTIPROCESSOR +#ifdef MP_LOCKDEBUG + MUTEX_ASSERT_LOCKED(mtx); +#endif + + membar_exit(); + mtx->mtx_owner = NULL; + spinlock_leave(&mtx->mtx_spinlock); + + lock_leave(mtx, LOCK_TYPE_MUTEX); +#endif /* MULTIPROCESSOR */ + + if (mtx->mtx_wantipl != IPL_NONE) + splx(s); +} + +#ifdef MULTIPROCESSOR + +const char * +_lock_type_name(unsigned int type) { - __mp_unlock(&kernel_lock); + switch (type) { + case LOCK_TYPE_NONE: + return ("(none)"); + case LOCK_TYPE_MPLOCK: + return ("mplock"); + case LOCK_TYPE_MUTEX: + return ("mutex"); + } + + return "(unknown!)"; } -int -_kernel_lock_held(void) +static inline struct lock_log_entry * +_lock_log_entry(struct lock_log *ll, unsigned int index) { - return (__mp_lock_held(&kernel_lock)); + return (&ll->ll_entries[index & LOCK_LOG_MASK]); } -#endif /* MULTIPROCESSOR */ + +static inline unsigned int +_lle_type(const struct lock_log_entry *lle) +{ + return (lle->lle_flags & LOCK_TYPE_MASK); +} + +static inline unsigned int +_lle_op(const struct lock_log_entry *lle) +{ + return (lle->lle_flags & LOCK_OP_MASK); +} + +struct lock_log_entry * +_lock_log(vaddr_t rip, void *lock, unsigned int type, unsigned int op) +{ + struct lock_log *ll; + unsigned int index; + struct lock_log_entry *lle; + extern int db_active; + + if (db_active) + return (NULL); + + ll = curcpu()->ci_lock_log; + index = atomic_inc_int_nv(&ll->ll_index); + lle = _lock_log_entry(ll, index); + + lle->lle_rip = rip; + lle->lle_lock = lock; + lle->lle_flags = type | op; + + return (lle); +} + +void +_lock_log_clear(struct lock_log_entry *lle) +{ + if (lle != NULL) + lle->lle_flags = LOCK_TYPE_NONE | LOCK_OP_LEAVE; +} + +void +_lock_idle(void) +{ + +} + +void +_lock_log_check_others(struct cpu_info *src) +{ + CPU_INFO_ITERATOR cii; + struct cpu_info *tgt; + struct lock_log *src_ll, *tgt_ll; + struct lock_log_entry *src_get, *tgt_get; + struct lock_log_entry *src_own, *tgt_own; + unsigned int src_idx, tgt_idx; + + src_ll = src->ci_lock_log; + src_idx = src_ll->ll_index - 1; + src_get = _lock_log_entry(src_ll, src_idx); + + CPU_INFO_FOREACH(cii, tgt) { + if (src == tgt) { + /* can't deadlock against ourselves */ + continue; + } + + tgt_ll = tgt->ci_lock_log; + tgt_idx = tgt_ll->ll_index - 1; + tgt_get = _lock_log_entry(tgt_ll, tgt_idx); + + if (_lle_type(tgt_get) == LOCK_TYPE_NONE || + _lle_op(tgt_get) == LOCK_OP_LEAVE) { + /* this cpu isnt spinning for a lock, so skip it */ + continue; + } + + while ((tgt_own = _lock_log_entry(tgt_ll, --tgt_idx)) != + tgt_get) { + /* does tgt own the lock src is trying to get? */ + if (_lle_op(tgt_own) == LOCK_OP_LEAVE || + src_get->lle_lock != tgt_own->lle_lock) + continue; + + /* check if tgt owns the lock src is trying to get */ + + src_idx = src_ll->ll_index - 1; + while ((src_own = _lock_log_entry(src_ll, --src_idx)) != + src_get) { + if (_lle_op(src_own) == LOCK_OP_LEAVE || + tgt_get->lle_lock != src_own->lle_lock) + continue; + + /* we have a winner */ + printf("deadlock detected!\n"); + printf("cpu%d: owns %s %p via 0x%lx", + src->ci_cpuid, + _lock_type_name(_lle_type(src_own)), + src_own->lle_lock, src_own->lle_rip); + printf("cpu%d: owns %s %p via 0x%lx", + tgt->ci_cpuid, + _lock_type_name(_lle_type(tgt_own)), + src_own->lle_lock, tgt_own->lle_rip); + printf("cpu%d: wants %s %p at 0x%lx", + src->ci_cpuid, + _lock_type_name(_lle_type(src_get)), + src_get->lle_lock, src_get->lle_rip); + printf("cpu%d: wants %s %p at 0x%lx", + tgt->ci_cpuid, + _lock_type_name(_lle_type(tgt_get)), + tgt_get->lle_lock, tgt_get->lle_rip); + } + } + } +} + +void +_lock_log_check(void) +{ + CPU_INFO_ITERATOR cii; + struct cpu_info *ci; + struct lock_log *ll; + unsigned int index; + struct lock_log_entry *lle; + + CPU_INFO_FOREACH(cii, ci) { + ll = ci->ci_lock_log; + index = ll->ll_index - 1; + lle = _lock_log_entry(ll, index); + + if ((lle->lle_flags & LOCK_TYPE_MASK) == LOCK_TYPE_NONE || + (lle->lle_flags & LOCK_OP_MASK) == LOCK_OP_LEAVE) { + /* this cpu isnt getting a lock */ + continue; + } + + _lock_log_check_others(ci); + } +} + +void +_lock_check(struct cpu_info *self, struct cpu_info *them) +{ + struct lock_log *src_ll = self->ci_lock_log; + struct lock_log *tgt_ll = them->ci_lock_log; + struct lock_log_entry *src_get, *tgt_get, *lle; + unsigned int src_idx, tgt_idx; + void *lock; + + KASSERTMSG(self != them, "cannot deadlock against self"); + + src_idx = src_ll->ll_index; + src_get = _lock_log_entry(src_ll, src_idx); + + tgt_idx = tgt_ll->ll_index; + tgt_get = _lock_log_entry(tgt_ll, tgt_idx); + lock = tgt_get->lle_lock; + + /* if the target isnt "spinning" too, we cant be deadlocked */ + if (tgt_ll->ll_index != tgt_idx && _lle_op(tgt_get) != LOCK_OP_ENTER) + return; + + /* check to see if we own the lock they want */ + while ((lle = _lock_log_entry(src_ll, --src_idx)) != src_get) { + if (lock != lle->lle_lock) { + /* ignore other locks */ + continue; + } + + if (_lle_op(lle) == LOCK_OP_LEAVE) { + /* self gave up the lock so cant be deadlocked */ + return; + } + + if (tgt_ll->ll_index != tgt_idx) { + /* tgt has made progress */ + return; + } + + /* we may have a winner */ + printf("potential deadlock between cpu%u and cpu%u\n", + self->ci_cpuid, them->ci_cpuid); + Debugger(); + } +} + +#include +#include +#include +#include + +void +_lock_log_print(int (*pr)(const char *, ...)) +{ + struct lock_log *ll; + unsigned int index; + struct lock_log_entry *last, *lle; + + ll = curcpu()->ci_lock_log; + index = ll->ll_index; + last = _lock_log_entry(ll, index); + + printf("lock log on cpu%d\n", CPU_INFO_UNIT(curcpu())); + + do { + lle = _lock_log_entry(ll, ++index); + if (_lle_type(lle) == LOCK_TYPE_NONE) + continue; + + (*pr)("%u %s %s ", index, (_lle_op(lle) == LOCK_OP_ENTER) ? + "enter" : "leave", _lock_type_name(_lle_type(lle))); + db_printsym((db_expr_t)lle->lle_lock, DB_STGY_XTRN, pr); + (*pr)(" at "); + db_printsym((db_expr_t)lle->lle_rip, DB_STGY_PROC, pr); + (*pr)("\n"); + + } while (lle != last); +} + +#endif /* MULTIPROCESOR */ Index: kern/kern_sched.c =================================================================== RCS file: /cvs/src/sys/kern/kern_sched.c,v retrieving revision 1.45 diff -u -p -r1.45 kern_sched.c --- kern/kern_sched.c 12 Feb 2017 04:55:08 -0000 1.45 +++ kern/kern_sched.c 21 Feb 2017 12:40:06 -0000 @@ -320,7 +320,7 @@ again: * This is kind of like a stupid idle loop. */ #ifdef MULTIPROCESSOR - __mp_unlock(&sched_lock); + __mp_unlock(sched_lock); #endif spl0(); delay(10); Index: kern/kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v retrieving revision 1.138 diff -u -p -r1.138 kern_synch.c --- kern/kern_synch.c 31 Jan 2017 12:16:20 -0000 1.138 +++ kern/kern_synch.c 21 Feb 2017 12:40:06 -0000 @@ -114,7 +114,7 @@ tsleep(const volatile void *ident, int p KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); #ifdef MULTIPROCESSOR - KASSERT(timo || __mp_lock_held(&kernel_lock)); + KASSERT(timo || __mp_lock_held(kernel_lock)); #endif #ifdef DDB @@ -132,9 +132,9 @@ tsleep(const volatile void *ident, int p s = splhigh(); splx(safepri); #ifdef MULTIPROCESSOR - if (__mp_lock_held(&kernel_lock)) { - hold_count = __mp_release_all(&kernel_lock); - __mp_acquire_count(&kernel_lock, hold_count); + if (__mp_lock_held(kernel_lock)) { + hold_count = __mp_release_all(kernel_lock); + __mp_acquire_count(kernel_lock, hold_count); } #endif splx(s); @@ -184,9 +184,9 @@ msleep(const volatile void *ident, struc MUTEX_OLDIPL(mtx) = safepri; mtx_leave(mtx); #ifdef MULTIPROCESSOR - if (__mp_lock_held(&kernel_lock)) { - hold_count = __mp_release_all(&kernel_lock); - __mp_acquire_count(&kernel_lock, hold_count); + if (__mp_lock_held(kernel_lock)) { + hold_count = __mp_release_all(kernel_lock); + __mp_acquire_count(kernel_lock, hold_count); } #endif if ((priority & PNORELOCK) == 0) { Index: kern/sched_bsd.c =================================================================== RCS file: /cvs/src/sys/kern/sched_bsd.c,v retrieving revision 1.46 diff -u -p -r1.46 sched_bsd.c --- kern/sched_bsd.c 14 Feb 2017 10:31:15 -0000 1.46 +++ kern/sched_bsd.c 21 Feb 2017 12:40:06 -0000 @@ -57,7 +57,10 @@ int lbolt; /* once a second sleep addr int rrticks_init; /* # of hardclock ticks per roundrobin() */ #ifdef MULTIPROCESSOR -struct __mp_lock sched_lock; +#include + +struct __mp_lock _sched_lock; +struct __mp_lock * const sched_lock = &_sched_lock; #endif void schedcpu(void *); @@ -353,10 +356,10 @@ mi_switch(void) /* * Release the kernel_lock, as we are about to yield the CPU. */ - sched_count = __mp_release_all_but_one(&sched_lock); - if (__mp_lock_held(&kernel_lock)) - hold_count = __mp_release_all(&kernel_lock); - else + sched_count = __mp_release_all_but_one(sched_lock); + if (__mp_lock_held(kernel_lock)) { + hold_count = __mp_release_all(kernel_lock); + } else hold_count = 0; #endif @@ -423,7 +426,7 @@ mi_switch(void) * just release it here. */ #ifdef MULTIPROCESSOR - __mp_unlock(&sched_lock); + __mp_unlock(sched_lock); #endif SCHED_ASSERT_UNLOCKED(); @@ -443,9 +446,10 @@ mi_switch(void) * released the scheduler lock to avoid deadlock, and before * we reacquire the interlock and the scheduler lock. */ - if (hold_count) - __mp_acquire_count(&kernel_lock, hold_count); - __mp_acquire_count(&sched_lock, sched_count + 1); + if (hold_count) { + __mp_acquire_count(kernel_lock, hold_count); + } + __mp_acquire_count(sched_lock, sched_count + 1); #endif } Index: kern/vfs_subr.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_subr.c,v retrieving revision 1.257 diff -u -p -r1.257 vfs_subr.c --- kern/vfs_subr.c 15 Jan 2017 23:18:05 -0000 1.257 +++ kern/vfs_subr.c 21 Feb 2017 12:40:06 -0000 @@ -1676,15 +1676,15 @@ vfs_syncwait(int verbose) if (verbose) printf("%d ", nbusy); #ifdef MULTIPROCESSOR - if (__mp_lock_held(&kernel_lock)) - hold_count = __mp_release_all(&kernel_lock); + if (__mp_lock_held(kernel_lock)) + hold_count = __mp_release_all(kernel_lock); else hold_count = 0; #endif DELAY(40000 * iter); #ifdef MULTIPROCESSOR if (hold_count) - __mp_acquire_count(&kernel_lock, hold_count); + __mp_acquire_count(kernel_lock, hold_count); #endif } Index: sys/lock.h =================================================================== RCS file: /cvs/src/sys/sys/lock.h,v retrieving revision 1.27 diff -u -p -r1.27 lock.h --- sys/lock.h 19 Jun 2016 11:54:33 -0000 1.27 +++ sys/lock.h 21 Feb 2017 12:40:06 -0000 @@ -52,4 +52,50 @@ #define LK_DRAIN 0x1000UL /* wait for all lock activity to end */ #define LK_RETRY 0x2000UL /* vn_lock: retry until locked */ -#endif /* !_LOCK_H_ */ +#ifdef MULTIPROCESSOR + +#define LOCK_LOG_ENTRIES 256 /* must be a power of two */ +#define LOCK_LOG_MASK (LOCK_LOG_ENTRIES - 1) + +struct lock_log_entry { + vaddr_t lle_rip; + void *lle_lock; + unsigned int lle_flags; +#define LOCK_TYPE_MASK (0xf << 0) +#define LOCK_TYPE_NONE (0x0 << 0) +#define LOCK_TYPE_MPLOCK (0x1 << 0) +#define LOCK_TYPE_MUTEX (0x2 << 0) + +#define LOCK_OP_MASK (0x1 << 4) +#define LOCK_OP_ENTER (0x1 << 4) +#define LOCK_OP_LEAVE (0x0 << 4) + unsigned int lle_serial; +}; + +struct lock_log { + struct lock_log_entry ll_entries[LOCK_LOG_ENTRIES]; + unsigned int ll_index; + unsigned int ll_rollbacks; +}; + +#ifdef _KERNEL +struct lock_log_entry * + _lock_log(vaddr_t, void *, unsigned int, unsigned int); +void _lock_log_clear(struct lock_log_entry *); +void _lock_check(struct cpu_info *, struct cpu_info *); +void _lock_idle(void); + +#define _lock_enter(_rip, _lock, _type) \ + _lock_log((_rip), (_lock), (_type), LOCK_OP_ENTER) +#define _lock_leave(_rip, _lock, _type) \ + _lock_log((_rip), (_lock), (_type), LOCK_OP_LEAVE) + +#define lock_enter(_lock, _type) \ + _lock_enter(__return_address(), (_lock), (_type)) +#define lock_leave(_lock, _type) \ + _lock_leave(__return_address(), (_lock), (_type)) +#endif + +#endif /* MULTIPROCESSOR */ + +#endif /* _LOCK_H_ */ Index: sys/mplock.h =================================================================== RCS file: /cvs/src/sys/sys/mplock.h,v retrieving revision 1.9 diff -u -p -r1.9 mplock.h --- sys/mplock.h 26 Nov 2007 17:15:29 -0000 1.9 +++ sys/mplock.h 21 Feb 2017 12:40:06 -0000 @@ -27,22 +27,14 @@ #ifndef _MPLOCK_H_ #define _MPLOCK_H_ -#ifdef notyet -/* - * Enable the prototypes once the architectures stop playing around - * with inlines. - */ -void __mp_lock_init(struct __mp_lock *); -void __mp_lock(struct __mp_lock *); -void __mp_unlock(struct __mp_lock *); -int __mp_release_all(struct __mp_lock *); -int __mp_release_all_but_one(struct __mp_lock *); -void __mp_acquire_count(struct __mp_lock *, int); -int __mp_lock_held(struct __mp_lock *); -#endif +struct __mp_lock; -#include +void __mp_lock_init(struct __mp_lock *); +void __mp_lock(struct __mp_lock *); +void __mp_unlock(struct __mp_lock *); +int __mp_release_all(struct __mp_lock *); +int __mp_release_all_but_one(struct __mp_lock *); +void __mp_acquire_count(struct __mp_lock *, int); +int __mp_lock_held(struct __mp_lock *); -extern struct __mp_lock kernel_lock; - -#endif /* !_MPLOCK_H */ +#endif /* _MPLOCK_H */ Index: sys/mutex.h =================================================================== RCS file: /cvs/src/sys/sys/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- sys/mutex.h 13 Aug 2009 13:24:55 -0000 1.7 +++ sys/mutex.h 21 Feb 2017 12:40:06 -0000 @@ -28,6 +28,46 @@ #ifndef _SYS_MUTEX_H_ #define _SYS_MUTEX_H_ +#include + +#ifndef MD_SPINLOCK + +#include + +#define SPINLOCK_UNLOCKED 1 +#define SPINLOCK_LOCKED 0 + +struct spinlock { + unsigned int lock; +}; + +#define SPINLOCK_INITIALIZER() { .lock = SPINLOCK_UNLOCKED } + +static inline void +spinlock_init(struct spinlock *spinlock) +{ + spinlock->lock = SPINLOCK_UNLOCKED; +} + +static inline int +spinlock_enter_try(struct spinlock *spinlock) +{ + return (atomic_swap_uint(&spinlock->lock, SPINLOCK_LOCKED)); +} + +static inline void +spinlock_leave(struct spinlock *spinlock) +{ + spinlock->lock = SPINLOCK_UNLOCKED; +} + +#endif /* MD_SPINLOCK */ + +/* + * spinlock_enter is implemented in MI code using spinlock_enter_try + */ +void spinlock_enter(struct spinlock *); + /* * A mutex is: * - owned by a cpu. @@ -42,16 +82,57 @@ * "mtx_enter(foo); mtx_enter(bar); mtx_leave(foo); mtx_leave(bar);" */ -#include - -/* - * Some architectures need to do magic for the ipl, so they need a macro. - */ -#ifndef mtx_init -void mtx_init(struct mutex *, int); +struct mutex { +#ifdef MULTIPROCESSOR + struct cpu_info *mtx_owner; + struct spinlock mtx_spinlock; #endif -void mtx_enter(struct mutex *); -void mtx_leave(struct mutex *); -int mtx_enter_try(struct mutex *); + int mtx_wantipl; + int mtx_oldipl; +}; + +#ifdef MULTIPROCESSOR +#ifndef __MUTEX_IPL +#define __MUTEX_IPL(ipl) (ipl) #endif + +#define MUTEX_INITIALIZER(_ipl) { \ + .mtx_owner = NULL, \ + .mtx_spinlock = SPINLOCK_INITIALIZER(), \ + .mtx_wantipl = __MUTEX_IPL(_ipl), \ + .mtx_oldipl = 0, \ +} + +#define MUTEX_ASSERT_LOCKED(_mtx) do { \ + if ((_mtx)->mtx_owner != curcpu()) \ + panic("mutex %p not held in %s", (_mtx), __func__); \ +} while (0) + +#define MUTEX_ASSERT_UNLOCKED(_mtx) do { \ + if ((_mtx)->mtx_owner == curcpu()) \ + panic("mutex %p held in %s", (_mtx), __func__); \ +} while (0) + +#else /* MULTIPROCESSOR */ + +#define __MUTEX_IPL(ipl) (ipl) + +#define MUTEX_INITIALIZER(_ipl) { \ + .mtx_wantipl = __MUTEX_IPL(_ipl), \ + .mtx_oldipl = 0, \ +} + +#define MUTEX_ASSERT_LOCKED(_mtx) splassert((_mtx)->mtx_wantipl) +#define MUTEX_ASSERT_UNLOCKED(_mtx) /* nop */ + +#endif /* MULTIPROCESSOR */ + +void mtx_init(struct mutex *, int); +int mtx_enter_try(struct mutex *); +void mtx_enter(struct mutex *); +void mtx_leave(struct mutex *); + +#define MUTEX_OLDIPL(_mtx) (_mtx)->mtx_oldipl + +#endif /* _SYS_MUTEX_H_ */ Index: sys/sched.h =================================================================== RCS file: /cvs/src/sys/sys/sched.h,v retrieving revision 1.42 diff -u -p -r1.42 sched.h --- sys/sched.h 14 Feb 2017 10:31:15 -0000 1.42 +++ sys/sched.h 21 Feb 2017 12:40:06 -0000 @@ -186,34 +186,29 @@ void remrunqueue(struct proc *); } while (0) #if defined(MULTIPROCESSOR) -#include +#include -/* - * XXX Instead of using struct lock for the kernel lock and thus requiring us - * XXX to implement simplelocks, causing all sorts of fine-grained locks all - * XXX over our tree to be activated, the sched_lock is a different kind of - * XXX lock to avoid introducing locking protocol bugs. - */ -extern struct __mp_lock sched_lock; +extern struct __mp_lock * const sched_lock; + +#define SCHED_LOCK_INIT() __mp_lock_init(sched_lock) #define SCHED_ASSERT_LOCKED() \ do { \ splassert(IPL_SCHED); \ - KASSERT(__mp_lock_held(&sched_lock)); \ + KASSERT(__mp_lock_held(sched_lock)); \ } while (0) -#define SCHED_ASSERT_UNLOCKED() KASSERT(__mp_lock_held(&sched_lock) == 0) -#define SCHED_LOCK_INIT() __mp_lock_init(&sched_lock) +#define SCHED_ASSERT_UNLOCKED() KASSERT(__mp_lock_held(sched_lock) == 0) #define SCHED_LOCK(s) \ do { \ s = splsched(); \ - __mp_lock(&sched_lock); \ + __mp_lock(sched_lock); \ } while (/* CONSTCOND */ 0) #define SCHED_UNLOCK(s) \ do { \ - __mp_unlock(&sched_lock); \ + __mp_unlock(sched_lock); \ splx(s); \ } while (/* CONSTCOND */ 0) @@ -229,5 +224,5 @@ do { \ #endif /* MULTIPROCESSOR */ -#endif /* _KERNEL */ -#endif /* _SYS_SCHED_H_ */ +#endif /* _KERNEL */ +#endif /* _SYS_SCHED_H_ */ Index: sys/systm.h =================================================================== RCS file: /cvs/src/sys/sys/systm.h,v retrieving revision 1.124 diff -u -p -r1.124 systm.h --- sys/systm.h 14 Feb 2017 09:46:21 -0000 1.124 +++ sys/systm.h 21 Feb 2017 12:40:06 -0000 @@ -355,17 +355,21 @@ void Debugger(void); /* in DDB only */ void user_config(void); #endif +#define __return_address() \ + ((vaddr_t)__builtin_extract_return_addr(__builtin_return_address(0))) + +vaddr_t __current_address(void); + #if defined(MULTIPROCESSOR) -void _kernel_lock_init(void); -void _kernel_lock(void); -void _kernel_unlock(void); -int _kernel_lock_held(void); +#include + +extern struct __mp_lock * const kernel_lock; -#define KERNEL_LOCK_INIT() _kernel_lock_init() -#define KERNEL_LOCK() _kernel_lock() -#define KERNEL_UNLOCK() _kernel_unlock() -#define KERNEL_ASSERT_LOCKED() KASSERT(_kernel_lock_held()) -#define KERNEL_ASSERT_UNLOCKED() KASSERT(!_kernel_lock_held()) +#define KERNEL_LOCK_INIT() __mp_lock_init(kernel_lock); +#define KERNEL_LOCK() __mp_lock(kernel_lock); +#define KERNEL_UNLOCK() __mp_unlock(kernel_lock); +#define KERNEL_ASSERT_LOCKED() KASSERT(__mp_lock_held(kernel_lock)) +#define KERNEL_ASSERT_UNLOCKED() KASSERT(!__mp_lock_held(kernel_lock)) #else /* ! MULTIPROCESSOR */ Index: uvm/uvm_anon.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_anon.c,v retrieving revision 1.48 diff -u -p -r1.48 uvm_anon.c --- uvm/uvm_anon.c 15 Sep 2016 02:00:18 -0000 1.48 +++ uvm/uvm_anon.c 21 Feb 2017 12:40:06 -0000 @@ -41,6 +41,32 @@ #include struct pool uvm_anon_pool; +void uvm_anon_task(void *); +void uvm_anon_tick(void *); + +#include +#include + +struct task uvm_task = TASK_INITIALIZER(uvm_anon_task, NULL); +struct timeout uvm_tick = TIMEOUT_INITIALIZER(uvm_anon_tick, NULL); + +void +uvm_anon_tick(void *null) +{ + task_add(systqmp, &uvm_task); +} + +void +uvm_anon_task(void *null) +{ + mtx_enter(&uvm_anon_pool.pr_mtx); + KERNEL_LOCK(); + delay(1000); + KERNEL_UNLOCK(); + mtx_leave(&uvm_anon_pool.pr_mtx); + + timeout_add(&uvm_tick, 1); +} /* * allocate anons @@ -51,6 +77,8 @@ uvm_anon_init(void) pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_NONE, PR_WAITOK, "anonpl", NULL); pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16); + +// uvm_anon_tick(NULL); } /*