Index: arch/sparc64/conf/GENERIC.MP =================================================================== RCS file: /cvs/src/sys/arch/sparc64/conf/GENERIC.MP,v retrieving revision 1.4 diff -u -p -r1.4 GENERIC.MP --- arch/sparc64/conf/GENERIC.MP 12 Jul 2008 14:26:07 -0000 1.4 +++ arch/sparc64/conf/GENERIC.MP 22 Jan 2014 02:49:14 -0000 @@ -9,3 +9,5 @@ cpu* at mainbus? cpu* at ssm? cpu* at cmp? cpu* at core? + +makeoptions DEBUG="-g" Index: arch/sparc64/include/cpu.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/cpu.h,v retrieving revision 1.83 diff -u -p -r1.83 cpu.h --- arch/sparc64/include/cpu.h 31 May 2013 17:00:59 -0000 1.83 +++ arch/sparc64/include/cpu.h 22 Jan 2014 02:49:15 -0000 @@ -78,6 +78,7 @@ #include #include +#include /* * The cpu_info structure is part of a 64KB structure mapped both the kernel @@ -155,6 +156,7 @@ struct cpu_info { #ifdef GPROF struct gmonparam *ci_gmon; #endif + struct ktr_ring *ci_ktr; }; #define CPUF_RUNNING 0x0001 /* CPU is running */ Index: arch/sparc64/include/mutex.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/mutex.h,v retrieving revision 1.3 diff -u -p -r1.3 mutex.h --- arch/sparc64/include/mutex.h 14 Jul 2013 21:22:08 -0000 1.3 +++ arch/sparc64/include/mutex.h 22 Jan 2014 02:49:15 -0000 @@ -32,6 +32,7 @@ struct mutex { __volatile void *mtx_owner; /* mutex.S relies upon this being first */ int mtx_wantipl; int mtx_oldipl; + long mtx_tick; }; /* Index: arch/sparc64/sparc64/mutex.S =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/mutex.S,v retrieving revision 1.8 diff -u -p -r1.8 mutex.S --- arch/sparc64/sparc64/mutex.S 14 Jul 2013 21:22:09 -0000 1.8 +++ arch/sparc64/sparc64/mutex.S 22 Jan 2014 02:49:15 -0000 @@ -44,7 +44,7 @@ ENTRY(__mtx_init) retl stw %g0, [%o0 + MTX_OLDIPL] -ENTRY(mtx_enter) +ENTRY(__mtx_enter) rdpr %pil, %g4 GET_CURCPU(%g1) 1: @@ -80,7 +80,7 @@ ENTRY(mtx_enter) retl membar #LoadLoad | #LoadStore -ENTRY(mtx_enter_try) +ENTRY(__mtx_enter_try) rdpr %pil, %g4 GET_CURCPU(%g1) 1: @@ -112,7 +112,7 @@ ENTRY(mtx_enter_try) retl mov 1, %o0 -ENTRY(mtx_leave) +ENTRY(__mtx_leave) #ifdef DIAGNOSTIC GET_CURCPU(%g1) ld [%g1 + CI_MUTEX_LEVEL], %g5 Index: conf/files =================================================================== RCS file: /cvs/src/sys/conf/files,v retrieving revision 1.565 diff -u -p -r1.565 files --- conf/files 19 Jan 2014 06:04:03 -0000 1.565 +++ conf/files 22 Jan 2014 02:49:15 -0000 @@ -673,6 +673,7 @@ file kern/kern_rwlock.c file kern/kern_physio.c file kern/kern_proc.c file kern/kern_prot.c +file kern/kern_ktr.c file kern/kern_resource.c file kern/kern_sched.c file kern/kern_sensors.c !small_kernel Index: kern/init_main.c =================================================================== RCS file: /cvs/src/sys/kern/init_main.c,v retrieving revision 1.203 diff -u -p -r1.203 init_main.c --- kern/init_main.c 20 Jan 2014 21:19:27 -0000 1.203 +++ kern/init_main.c 22 Jan 2014 02:49:16 -0000 @@ -76,6 +76,7 @@ #include #include #include +#include #include #include @@ -454,6 +455,8 @@ main(void *framep) (void) tsleep((void *)&config_pending, PWAIT, "cfpend", 0); dostartuphooks(); + + ktr_enable(); #if NVSCSI > 0 config_rootfound("vscsi", NULL); Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.43 diff -u -p -r1.43 kern_lock.c --- kern/kern_lock.c 21 Jan 2014 01:48:44 -0000 1.43 +++ kern/kern_lock.c 22 Jan 2014 02:49:16 -0000 @@ -120,16 +120,105 @@ _kernel_lock_init(void) * and the lower half of the kernel. */ +long _kernel_tick = 0; + void -_kernel_lock(void) +__kernel_lock(const char *f, u_int l) { + long t = sys_tick(); + SCHED_ASSERT_UNLOCKED(); __mp_lock(&kernel_lock); + + _kernel_tick = sys_tick(); + ktr_log(f, l, "KERNEL_LOCK", _kernel_tick - t); } void -_kernel_unlock(void) +__kernel_unlock(const char *f, u_int l) { + long diff; + + diff = sys_tick() - _kernel_tick; + ktr_log(f, l, "KERNEL_UNLOCK", diff); + if (ktr_enabled && !cold && diff > 500000) { + printf("%s: big diff %lld\n", __func__, diff); + ktr_enabled = 0; + } + __mp_unlock(&kernel_lock); } + +int +_kernel_lock_held() +{ + return (__mp_lock_held(&kernel_lock)); +} + +int +__kernel_release_all(const char *f, u_int l) +{ + long diff; + + diff = sys_tick() - _kernel_tick; + ktr_log(f, l, "KERNEL_RELEASE_ALL", diff); + if (ktr_enabled && !cold && diff > 500000) { + printf("%s: big diff %lld\n", __func__, diff); + ktr_enabled = 0; + } + + return (__mp_release_all(&kernel_lock)); +} + +void +__kernel_acquire_count(const char *f, u_int l, int c) +{ + long t = sys_tick(); + + SCHED_ASSERT_UNLOCKED(); + __mp_acquire_count(&kernel_lock, c); + + _kernel_tick = sys_tick(); + ktr_log(f, l, "KERNEL_ACQUIRE", _kernel_tick - t); +} + +inline void +_mtx_enter(struct mutex *mtx, const char *f, u_int l) +{ + long t = sys_tick(); + + __mtx_enter(mtx); + + mtx->mtx_tick = sys_tick(); + + ktr_log(f, l, "mtx_enter", mtx->mtx_tick - t); +} + +inline void +_mtx_leave(struct mutex *mtx, const char *f, u_int l) +{ + long diff = sys_tick() - mtx->mtx_tick; + + ktr_log(f, l, "mtx_leave", diff); + + __mtx_leave(mtx); +} + +inline int +_mtx_enter_try(struct mutex *mtx, const char *f, u_int l) +{ + long t = sys_tick(); + int rv; + + rv = __mtx_enter_try(mtx); + if (rv == 0) + return (rv); + + mtx->mtx_tick = sys_tick(); + + ktr_log(f, l, "mtx_enter", mtx->mtx_tick - t); + + return (rv); +} + #endif /* MULTIPROCESSOR */ Index: kern/kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v retrieving revision 1.112 diff -u -p -r1.112 kern_synch.c --- kern/kern_synch.c 24 Dec 2013 01:11:04 -0000 1.112 +++ kern/kern_synch.c 22 Jan 2014 02:49:16 -0000 @@ -102,7 +102,7 @@ extern int safepri; * call should be interrupted by the signal (return EINTR). */ int -tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) +__tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) { struct sleep_state sls; int error, error1; @@ -143,7 +143,7 @@ tsleep(const volatile void *ident, int p * entered the sleep queue we drop the mutex. After sleeping we re-lock. */ int -msleep(const volatile void *ident, struct mutex *mtx, int priority, +__msleep(const volatile void *ident, struct mutex *mtx, int priority, const char *wmesg, int timo) { struct sleep_state sls; @@ -356,7 +356,7 @@ unsleep(struct proc *p) * Make a number of processes sleeping on the specified identifier runnable. */ void -wakeup_n(const volatile void *ident, int n) +__wakeup_n(const volatile void *ident, int n) { struct slpque *qp; struct proc *p; @@ -386,7 +386,7 @@ wakeup_n(const volatile void *ident, int * Make all processes sleeping on the specified identifier runnable. */ void -wakeup(const volatile void *chan) +__wakeup(const volatile void *chan) { wakeup_n(chan, -1); } Index: kern/sched_bsd.c =================================================================== RCS file: /cvs/src/sys/kern/sched_bsd.c,v retrieving revision 1.33 diff -u -p -r1.33 sched_bsd.c --- kern/sched_bsd.c 3 Jun 2013 16:55:22 -0000 1.33 +++ kern/sched_bsd.c 22 Jan 2014 02:49:16 -0000 @@ -295,7 +295,7 @@ updatepri(struct proc *p) * performs a voluntary context switch. */ void -yield(void) +__yield(void) { struct proc *p = curproc; int s; @@ -362,8 +362,8 @@ mi_switch(void) * Release the kernel_lock, as we are about to yield the CPU. */ sched_count = __mp_release_all_but_one(&sched_lock); - if (__mp_lock_held(&kernel_lock)) - hold_count = __mp_release_all(&kernel_lock); + if (KERNEL_HELD()) + hold_count = KERNEL_RELEASE(); else hold_count = 0; #endif @@ -452,7 +452,7 @@ mi_switch(void) * we reacquire the interlock and the scheduler lock. */ if (hold_count) - __mp_acquire_count(&kernel_lock, hold_count); + KERNEL_ACQUIRE(hold_count); __mp_acquire_count(&sched_lock, sched_count + 1); #endif } Index: kern/subr_prf.c =================================================================== RCS file: /cvs/src/sys/kern/subr_prf.c,v retrieving revision 1.81 diff -u -p -r1.81 subr_prf.c --- kern/subr_prf.c 12 Dec 2013 21:00:09 -0000 1.81 +++ kern/subr_prf.c 22 Jan 2014 02:49:16 -0000 @@ -120,7 +120,7 @@ int db_panic = 1; #ifdef DDB_SAFE_CONSOLE int db_console = 1; #else -int db_console = 0; +int db_console = 1; #endif /* Index: sys/mutex.h =================================================================== RCS file: /cvs/src/sys/sys/mutex.h,v retrieving revision 1.7 diff -u -p -r1.7 mutex.h --- sys/mutex.h 13 Aug 2009 13:24:55 -0000 1.7 +++ sys/mutex.h 22 Jan 2014 02:49:16 -0000 @@ -42,16 +42,30 @@ * "mtx_enter(foo); mtx_enter(bar); mtx_leave(foo); mtx_leave(bar);" */ +#include + #include +#ifdef _KERNEL + /* * Some architectures need to do magic for the ipl, so they need a macro. */ #ifndef mtx_init void mtx_init(struct mutex *, int); #endif -void mtx_enter(struct mutex *); -void mtx_leave(struct mutex *); -int mtx_enter_try(struct mutex *); +void __mtx_enter(struct mutex *); +void __mtx_leave(struct mutex *); +int __mtx_enter_try(struct mutex *); -#endif +void _mtx_enter(struct mutex *, const char *, u_int); +void _mtx_leave(struct mutex *, const char *, u_int); +int _mtx_enter_try(struct mutex *, const char *, u_int); + +#define mtx_enter(_m) _mtx_enter((_m), __FILE__, __LINE__) +#define mtx_leave(_m) _mtx_leave((_m), __FILE__, __LINE__) +#define mtx_enter_try(_m) _mtx_enter_try((_m), __FILE__, __LINE__) + +#endif /* _KERNEL */ + +#endif /* _SYS_MUTEX_H_ */ Index: sys/syscall_mi.h =================================================================== RCS file: /cvs/src/sys/sys/syscall_mi.h,v retrieving revision 1.2 diff -u -p -r1.2 syscall_mi.h --- sys/syscall_mi.h 7 Aug 2012 23:22:38 -0000 1.2 +++ sys/syscall_mi.h 22 Jan 2014 02:49:16 -0000 @@ -55,6 +55,7 @@ mi_syscall(struct proc *p, register_t co int lock = !(callp->sy_flags & SY_NOLOCK); int error; + KTR_LOG("syscall in", callp - sysent); #ifdef SYSCALL_DEBUG KERNEL_LOCK(); scdebug_call(p, code, argp); @@ -82,6 +83,8 @@ mi_syscall(struct proc *p, register_t co if (lock) KERNEL_UNLOCK(); } + + KTR_LOG("syscall err", error); return (error); } Index: sys/systm.h =================================================================== RCS file: /cvs/src/sys/sys/systm.h,v retrieving revision 1.100 diff -u -p -r1.100 systm.h --- sys/systm.h 11 Jun 2013 18:15:54 -0000 1.100 +++ sys/systm.h 22 Jan 2014 02:49:17 -0000 @@ -42,6 +42,7 @@ #include #include +#include /* * The `securelevel' variable controls the security level of the system. @@ -249,12 +250,55 @@ int sleep_finish_signal(struct sleep_sta void sleep_queue_init(void); struct mutex; -void wakeup_n(const volatile void *, int); -void wakeup(const volatile void *); +void __wakeup_n(const volatile void *, int); +#define wakeup_n(_p, _n) do { \ + KTR_LOG("wakeup n", (long)(_p)); \ + __wakeup_n((_p), (_n)); \ +} while (0) +void __wakeup(const volatile void *); +#define wakeup(_p) do {\ + KTR_LOG("wakeup", (long)(_p)); \ + __wakeup(_p); \ +} while (0) #define wakeup_one(c) wakeup_n((c), 1) -int tsleep(const volatile void *, int, const char *, int); -int msleep(const volatile void *, struct mutex *, int, const char*, int); -void yield(void); +int __tsleep(const volatile void *, int, const char *, int); +static inline int +_tsleep(const volatile void *a, int b, const char *c, int d, + const char *f, u_int l) +{ + int rv; + + ktr_log(f, l, "tsleep in", (long)a); + rv = __tsleep(a, b, c, d); + ktr_log(f, l, "tsleep out", (long)a); + + return (rv); +} +#define tsleep(_a, _b, _c, _d) \ + _tsleep((_a), (_b), (_c), (_d), __FILE__, __LINE__) + +int __msleep(const volatile void *, struct mutex *, int, const char*, int); +static inline int +_msleep(const volatile void *a, struct mutex *b, int c, const char *d, int e, + const char *f, u_int l) +{ + int rv; + + ktr_log(f, l, "msleep in", (long)a); + rv = __msleep(a, b, c, d, e); + ktr_log(f, l, "msleep out", (long)a); + + return (rv); +} +#define msleep(_a, _b, _c, _d, _e) \ + _msleep((_a), (_b), (_c), (_d), (_e), __FILE__, __LINE__) + +void __yield(void); +#define yield() do { \ + KTR_LOG("yield in", (long)curproc); \ + __yield(); \ + KTR_LOG("yield out", (long)curproc); \ +} while (0) void wdog_register(int (*)(void *, int), void *); void wdog_shutdown(int (*)(void *, int), void *); @@ -338,12 +382,25 @@ void user_config(void); #if defined(MULTIPROCESSOR) void _kernel_lock_init(void); -void _kernel_lock(void); -void _kernel_unlock(void); +int _kernel_lock_held(void); +void __kernel_lock(const char *, u_int); +#define _kernel_lock() __kernel_lock(__FILE__, __LINE__) +void __kernel_unlock(const char *, u_int); +#define _kernel_unlock() __kernel_unlock(__FILE__, __LINE__) +int __kernel_release_all(const char *, u_int); +#define _kernel_release_all() __kernel_release_all(__FILE__, __LINE__) +void __kernel_acquire_count(const char *, u_int, int); +#define _kernel_acquire_count(_c) \ + __kernel_acquire_count(__FILE__, __LINE__, (_c)) + +void __kernel_aquire_count(const char *, u_int, int); #define KERNEL_LOCK_INIT() _kernel_lock_init() +#define KERNEL_HELD() _kernel_lock_held() #define KERNEL_LOCK() _kernel_lock() #define KERNEL_UNLOCK() _kernel_unlock() +#define KERNEL_RELEASE() _kernel_release_all() +#define KERNEL_ACQUIRE(_c) _kernel_acquire_count(_c) #else /* ! MULTIPROCESSOR */