Index: kern/exec_elf.c =================================================================== RCS file: /cvs/src/sys/kern/exec_elf.c,v retrieving revision 1.160 diff -u -p -r1.160 exec_elf.c --- kern/exec_elf.c 10 Mar 2021 10:21:47 -0000 1.160 +++ kern/exec_elf.c 11 Nov 2021 12:26:30 -0000 @@ -82,6 +82,7 @@ #include #include #include +#include #include #include #include @@ -1247,8 +1248,10 @@ coredump_notes_elf(struct proc *p, void cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist; cpi.cpi_sigmask = p->p_sigmask; + rw_enter_read(&pr->ps_sigacts->ps_rwl); cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore; cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch; + rw_exit_read(&pr->ps_sigacts->ps_rwl); cpi.cpi_pid = pr->ps_pid; cpi.cpi_ppid = pr->ps_ppid; Index: kern/kern_exit.c =================================================================== RCS file: /cvs/src/sys/kern/kern_exit.c,v retrieving revision 1.200 diff -u -p -r1.200 kern_exit.c --- kern/kern_exit.c 24 Oct 2021 00:02:24 -0000 1.200 +++ kern/kern_exit.c 11 Nov 2021 12:26:30 -0000 @@ -222,6 +222,10 @@ exit1(struct proc *p, int xexit, int xsi * If parent has the SAS_NOCLDWAIT flag set, we're not * going to become a zombie. */ + /* + * XXX ps_rwl isn't needed as no intermediate + * inconsistent state. + */ if (pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDWAIT) atomic_setbits_int(&pr->ps_flags, PS_NOZOMBIE); } Index: kern/kern_sig.c =================================================================== RCS file: /cvs/src/sys/kern/kern_sig.c,v retrieving revision 1.287 diff -u -p -r1.287 kern_sig.c --- kern/kern_sig.c 24 Oct 2021 00:02:25 -0000 1.287 +++ kern/kern_sig.c 11 Nov 2021 12:26:30 -0000 @@ -122,7 +122,7 @@ const int sigprop[NSIG + 1] = { #define STOPSIGMASK (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \ sigmask(SIGTTIN) | sigmask(SIGTTOU)) -void setsigvec(struct proc *, int, struct sigaction *); +void setsigvec(struct proc *, int, const struct sigaction *); void proc_stop(struct proc *p, int); void proc_stop_sweep(void *); @@ -218,7 +218,11 @@ sigactsinit(struct process *pr) struct sigacts *ps; ps = pool_get(&sigacts_pool, PR_WAITOK); + rw_enter_read(&pr->ps_sigacts->ps_rwl); memcpy(ps, pr->ps_sigacts, sizeof(struct sigacts)); + rw_exit_read(&pr->ps_sigacts->ps_rwl); + rw_init(&ps->ps_rwl, "sigactlk"); + rdseq_init(&ps->ps_rdseq); return (ps); } @@ -269,9 +273,16 @@ sys_sigaction(struct proc *p, void *v, r nsa = SCARG(uap, nsa); osa = SCARG(uap, osa); + error = rw_enter(&ps->ps_rwl, RW_INTR | (nsa ? RW_WRITE : RW_READ)); + if (error != 0) + return (error); + if (signum <= 0 || signum >= NSIG || - (nsa && (signum == SIGKILL || signum == SIGSTOP))) - return (EINVAL); + (nsa && (signum == SIGKILL || signum == SIGSTOP))) { + error = EINVAL; + goto exit; + } + sa = &vec; if (osa) { sa->sa_handler = ps->ps_sigact[signum]; @@ -297,7 +308,7 @@ sys_sigaction(struct proc *p, void *v, r sa->sa_mask &= ~bit; error = copyout(sa, osa, sizeof (vec)); if (error) - return (error); + goto exit; #ifdef KTRACE if (KTRPOINT(p, KTR_STRUCT)) ovec = vec; @@ -306,36 +317,40 @@ sys_sigaction(struct proc *p, void *v, r if (nsa) { error = copyin(nsa, sa, sizeof (vec)); if (error) - return (error); + goto exit; #ifdef KTRACE if (KTRPOINT(p, KTR_STRUCT)) ktrsigaction(p, sa); #endif + rdseq_enter_write(&ps->ps_rdseq); setsigvec(p, signum, sa); + rdseq_leave_write(&ps->ps_rdseq); } #ifdef KTRACE if (osa && KTRPOINT(p, KTR_STRUCT)) ktrsigaction(p, &ovec); #endif - return (0); +exit: + rw_exit(&ps->ps_rwl); + return (error); } void -setsigvec(struct proc *p, int signum, struct sigaction *sa) +setsigvec(struct proc *p, int signum, const struct sigaction *sa) { struct sigacts *ps = p->p_p->ps_sigacts; + sigset_t sa_mask = sa->sa_mask; int bit; - int s; bit = sigmask(signum); /* * Change setting atomically. */ - s = splhigh(); + rw_assert_wrlock(&ps->ps_rwl); ps->ps_sigact[signum] = sa->sa_handler; if ((sa->sa_flags & SA_NODEFER) == 0) - sa->sa_mask |= sigmask(signum); - ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; + sa_mask |= sigmask(signum); + ps->ps_catchmask[signum] = sa_mask &~ sigcantmask; if (signum == SIGCHLD) { if (sa->sa_flags & SA_NOCLDSTOP) atomic_setbits_int(&ps->ps_sigflags, SAS_NOCLDSTOP); @@ -392,7 +407,6 @@ setsigvec(struct proc *p, int signum, st else ps->ps_sigcatch |= bit; } - splx(s); } /* @@ -416,16 +430,16 @@ siginit(struct sigacts *ps) void execsigs(struct proc *p) { - struct sigacts *ps; + struct sigacts *ps = p->p_p->ps_sigacts; int nc, mask; - ps = p->p_p->ps_sigacts; - /* * Reset caught signals. Held signals remain held * through p_sigmask (unless they were caught, * and are now ignored by default). */ + rw_enter_write(&ps->ps_rwl); + rdseq_enter_write(&ps->ps_rdseq); while (ps->ps_sigcatch) { nc = ffs((long)ps->ps_sigcatch); mask = sigmask(nc); @@ -446,6 +460,8 @@ execsigs(struct proc *p) atomic_clearbits_int(&ps->ps_sigflags, SAS_NOCLDWAIT); if (ps->ps_sigact[SIGCHLD] == SIG_IGN) ps->ps_sigact[SIGCHLD] = SIG_DFL; + rdseq_leave_write(&ps->ps_rdseq); + rw_exit_write(&ps->ps_rwl); } /* @@ -489,7 +505,6 @@ sys_sigprocmask(struct proc *p, void *v, int sys_sigpending(struct proc *p, void *v, register_t *retval) { - *retval = p->p_siglist | p->p_p->ps_siglist; return (0); } @@ -780,7 +795,7 @@ postsig_done(struct proc *p, int signum, { int mask = sigmask(signum); - KERNEL_ASSERT_LOCKED(); + rw_assert_wrlock(&ps->ps_rwl); p->p_ru.ru_nsignals++; atomic_setbits_int(&p->p_sigmask, ps->ps_catchmask[signum]); @@ -803,6 +818,8 @@ trapsignal(struct proc *p, int signum, u { struct process *pr = p->p_p; struct sigacts *ps = pr->ps_sigacts; + sigset_t sigcatch, siginfo, sigonstack, sigignore; + sig_t sigact; int mask; KERNEL_LOCK(); @@ -815,26 +832,39 @@ trapsignal(struct proc *p, int signum, u } mask = sigmask(signum); + + rw_enter_read(&ps->ps_rwl); + sigcatch = ps->ps_sigcatch; + siginfo = ps->ps_siginfo; + sigonstack = ps->ps_sigonstack; + sigignore = ps->ps_sigignore; + sigact = ps->ps_sigact[signum]; + rw_exit_read(&ps->ps_rwl); + if ((pr->ps_flags & PS_TRACED) == 0 && - (ps->ps_sigcatch & mask) != 0 && + (sigcatch & mask) != 0 && (p->p_sigmask & mask) == 0) { siginfo_t si; - int info = (ps->ps_siginfo & mask) != 0; - int onstack = (ps->ps_sigonstack & mask) != 0; + int info = (siginfo & mask) != 0; + int onstack = (sigonstack & mask) != 0; initsiginfo(&si, signum, trapno, code, sigval); #ifdef KTRACE if (KTRPOINT(p, KTR_PSIG)) { - ktrpsig(p, signum, ps->ps_sigact[signum], + ktrpsig(p, signum, sigact, p->p_sigmask, code, &si); } #endif - if (sendsig(ps->ps_sigact[signum], signum, p->p_sigmask, &si, + if (sendsig(sigact, signum, p->p_sigmask, &si, info, onstack)) { sigexit(p, SIGILL); /* NOTREACHED */ } + rw_enter_write(&ps->ps_rwl); + rdseq_enter_write(&ps->ps_rdseq); postsig_done(p, signum, ps); + rdseq_leave_write(&ps->ps_rdseq); + rw_exit_write(&ps->ps_rwl); } else { p->p_sisig = signum; p->p_sitrapno = trapno; /* XXX for core dump/debugger */ @@ -854,7 +884,7 @@ trapsignal(struct proc *p, int signum, u */ if ((pr->ps_flags & PS_TRACED) == 0 && (sigprop[signum] & SA_KILL) && - ((p->p_sigmask & mask) || (ps->ps_sigignore & mask)) && + ((p->p_sigmask & mask) || (sigignore & mask)) && pr->ps_pid != 1) sigexit(p, signum); ptsignal(p, signum, STHREAD); @@ -969,6 +999,15 @@ ptsignal(struct proc *p, int signum, enu if (pr->ps_flags & PS_TRACED) { action = SIG_DFL; } else { + struct sigacts *ps = pr->ps_sigacts; + sigset_t sigignore, sigcatch; + struct rdseq_ref rdr; + + RDSEQ_LOOP(&ps->ps_rdseq, &rdr) { + sigignore = ps->ps_sigignore; + sigcatch = ps->ps_sigcatch; + } + /* * If the signal is being ignored, * then we forget about it immediately. @@ -976,11 +1015,11 @@ ptsignal(struct proc *p, int signum, enu * and if it is set to SIG_IGN, * action will be SIG_DFL here.) */ - if (pr->ps_sigacts->ps_sigignore & mask) + if (sigignore & mask) return; if (p->p_sigmask & mask) { action = SIG_HOLD; - } else if (pr->ps_sigacts->ps_sigcatch & mask) { + } else if (sigcatch & mask) { action = SIG_CATCH; } else { action = SIG_DFL; @@ -1188,12 +1227,13 @@ int cursig(struct proc *p) { struct process *pr = p->p_p; + struct sigacts *ps = pr->ps_sigacts; + sigset_t sigignore; + sig_t sigact; int sigpending, signum, mask, prop; int dolock = (p->p_flag & P_SINTR) == 0; int s; - KERNEL_ASSERT_LOCKED(); - sigpending = (p->p_siglist | pr->ps_siglist); if (sigpending == 0) return 0; @@ -1212,11 +1252,16 @@ cursig(struct proc *p) atomic_clearbits_int(&p->p_siglist, mask); atomic_clearbits_int(&pr->ps_siglist, mask); + rw_enter_read(&ps->ps_rwl); + sigignore = ps->ps_sigignore; + sigact = ps->ps_sigact[signum]; + rw_exit_read(&ps->ps_rwl); + /* * We should see pending but ignored signals * only if PS_TRACED was on when they were posted. */ - if (mask & pr->ps_sigacts->ps_sigignore && + if (mask & sigignore && (pr->ps_flags & PS_TRACED) == 0) continue; @@ -1268,7 +1313,7 @@ cursig(struct proc *p) * Return the signal's number, or fall through * to clear it from the pending mask. */ - switch ((long)pr->ps_sigacts->ps_sigact[signum]) { + switch ((long)sigact) { case (long)SIG_DFL: /* * Don't take default actions on system processes. @@ -1374,13 +1419,24 @@ void proc_stop_sweep(void *v) { struct process *pr; + struct sigacts *ps; + int sigflags; LIST_FOREACH(pr, &allprocess, ps_list) { if ((pr->ps_flags & PS_STOPPED) == 0) continue; atomic_clearbits_int(&pr->ps_flags, PS_STOPPED); - if ((pr->ps_pptr->ps_sigacts->ps_sigflags & SAS_NOCLDSTOP) == 0) + ps = pr->ps_pptr->ps_sigacts; + /* + * XXX doesn't need lock because no inconsistent + * intermediate state. + */ + rw_enter_read(&ps->ps_rwl); + sigflags = ps->ps_sigflags; + rw_exit_read(&ps->ps_rwl); + + if ((sigflags & SAS_NOCLDSTOP) == 0) prsignal(pr->ps_pptr, SIGCHLD); wakeup(pr->ps_pptr); } @@ -1403,13 +1459,14 @@ postsig(struct proc *p, int signum) int s, code, info, onstack; KASSERT(signum != 0); - KERNEL_ASSERT_LOCKED(); mask = sigmask(signum); atomic_clearbits_int(&p->p_siglist, mask); + rw_enter_read(&ps->ps_rwl); action = ps->ps_sigact[signum]; info = (ps->ps_siginfo & mask) != 0; onstack = (ps->ps_sigonstack & mask) != 0; + rw_exit_read(&ps->ps_rwl); sigval.sival_ptr = NULL; if (p->p_sisig != signum) { @@ -1475,7 +1532,11 @@ postsig(struct proc *p, int signum) sigexit(p, SIGILL); /* NOTREACHED */ } + rw_enter_write(&ps->ps_rwl); + rdseq_enter_write(&ps->ps_rdseq); postsig_done(p, signum, ps); + rdseq_leave_write(&ps->ps_rdseq); + rw_exit_write(&ps->ps_rwl); splx(s); } } @@ -1491,6 +1552,8 @@ postsig(struct proc *p, int signum) void sigexit(struct proc *p, int signum) { + rw_assert_unlocked(&p->p_p->ps_sigacts->ps_rwl); + /* Mark process as going away */ atomic_setbits_int(&p->p_flag, P_WEXIT); @@ -1516,10 +1579,15 @@ void sigabort(struct proc *p) { struct sigaction sa; + struct sigacts *ps = p->p_p->ps_sigacts; memset(&sa, 0, sizeof sa); sa.sa_handler = SIG_DFL; + rw_enter_write(&ps->ps_rwl); + rdseq_enter_write(&ps->ps_rdseq); setsigvec(p, SIGABRT, &sa); + rdseq_leave_write(&ps->ps_rdseq); + rw_exit_write(&ps->ps_rwl); atomic_clearbits_int(&p->p_sigmask, sigmask(SIGABRT)); psignal(p, SIGABRT); } @@ -1532,12 +1600,16 @@ int sigismasked(struct proc *p, int sig) { struct process *pr = p->p_p; + struct sigacts *ps = pr->ps_sigacts; + int bit = sigmask(sig); + int rv; - if ((pr->ps_sigacts->ps_sigignore & sigmask(sig)) || - (p->p_sigmask & sigmask(sig))) - return 1; + /* XXX there's no intermediate state to read, so lock isnt needed */ + rw_enter_read(&ps->ps_rwl); + rv = ISSET(ps->ps_sigignore, bit) || ISSET(p->p_sigmask, bit); + rw_exit_read(&ps->ps_rwl); - return 0; + return rv; } int nosuidcoredump = 1; Index: kern/kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v retrieving revision 1.180 diff -u -p -r1.180 kern_synch.c --- kern/kern_synch.c 7 Oct 2021 08:51:00 -0000 1.180 +++ kern/kern_synch.c 11 Nov 2021 12:26:30 -0000 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -219,9 +220,6 @@ msleep(const volatile void *ident, struc KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); KASSERT(mtx != NULL); - if (priority & PCATCH) - KERNEL_ASSERT_LOCKED(); - if (cold || panicstr) { /* * After a panic, or during autoconfiguration, @@ -360,16 +358,6 @@ sleep_setup(struct sleep_state *sls, con sls->sls_locked = 0; sls->sls_timeout = 0; - /* - * The kernel has to be locked for signal processing. - * This is done here and not in sleep_finish() because - * KERNEL_LOCK() has to be taken before SCHED_LOCK(). - */ - if (sls->sls_catch != 0) { - KERNEL_LOCK(); - sls->sls_locked = 1; - } - SCHED_LOCK(sls->sls_s); TRACEPOINT(sched, sleep, NULL); @@ -394,9 +382,6 @@ sleep_finish(struct sleep_state *sls, in int error = 0, error1 = 0; if (sls->sls_catch != 0) { - /* sleep_setup() has locked the kernel. */ - KERNEL_ASSERT_LOCKED(); - /* * We put ourselves on the sleep queue and start our * timeout before calling sleep_signal_check(), as we could @@ -458,9 +443,6 @@ sleep_finish(struct sleep_state *sls, in if (sls->sls_catch != 0) error = sleep_signal_check(); - if (sls->sls_locked) - KERNEL_UNLOCK(); - /* Signal errors are higher priority than timeouts. */ if (error == 0 && error1 != 0) error = error1; @@ -480,6 +462,7 @@ sleep_signal_check(void) if ((err = single_thread_check(p, 1)) != 0) return err; if ((sig = cursig(p)) != 0) { + /* XXX no ps_sigacts->ps_rwl: no inconsistent state is read */ if (p->p_p->ps_sigacts->ps_sigintr & sigmask(sig)) return EINTR; else @@ -873,4 +856,35 @@ cond_wait(struct cond *c, const char *wm wait = c->c_wait; sleep_finish(&sls, wait); } +} + +unsigned int +rdseq_read(struct rdseq *rs) +{ + unsigned int seq; + + while ((seq = rs->seq) & 1) + CPU_BUSY_CYCLE(); + + return (seq); +} + +/* XXX this is very quick and dirty */ + +void +rdseq_init(struct rdseq *rs) +{ + rs->seq = 0; +} + +void +rdseq_enter_write(struct rdseq *rs) +{ + rs->seq++; +} + +void +rdseq_leave_write(struct rdseq *rs) +{ + rs->seq++; } Index: kern/kern_event.c =================================================================== RCS file: /cvs/src/sys/kern/kern_event.c,v retrieving revision 1.170 diff -u -p -r1.170 kern_event.c --- kern/kern_event.c 6 Nov 2021 05:48:47 -0000 1.170 +++ kern/kern_event.c 11 Nov 2021 12:26:30 -0000 @@ -1295,13 +1295,13 @@ retry: error = 0; /* msleep() with PCATCH requires kernel lock. */ - KERNEL_LOCK(); + //KERNEL_LOCK(); mtx_enter(&kq->kq_lock); if (kq->kq_state & KQ_DYING) { mtx_leave(&kq->kq_lock); - KERNEL_UNLOCK(); + //KERNEL_UNLOCK(); error = EBADF; goto done; } @@ -1314,14 +1314,14 @@ retry: if ((tsp != NULL && !timespecisset(tsp)) || scan->kqs_nevent != 0) { mtx_leave(&kq->kq_lock); - KERNEL_UNLOCK(); + //KERNEL_UNLOCK(); error = 0; goto done; } kq->kq_state |= KQ_SLEEP; error = kqueue_sleep(kq, tsp); /* kqueue_sleep() has released kq_lock. */ - KERNEL_UNLOCK(); + //KERNEL_UNLOCK(); if (error == 0 || error == EWOULDBLOCK) goto retry; /* don't restart after signals... */ @@ -1331,7 +1331,7 @@ retry: } /* The actual scan does not sleep on kq, so unlock the kernel. */ - KERNEL_UNLOCK(); + //KERNEL_UNLOCK(); /* * Put the end marker in the queue to limit the scan to the events Index: nfs/nfs_socket.c =================================================================== RCS file: /cvs/src/sys/nfs/nfs_socket.c,v retrieving revision 1.137 diff -u -p -r1.137 nfs_socket.c --- nfs/nfs_socket.c 2 Jan 2021 02:41:42 -0000 1.137 +++ nfs/nfs_socket.c 11 Nov 2021 12:26:30 -0000 @@ -1219,14 +1219,20 @@ nfs_timer(void *arg) int nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct proc *p) { + int rv = 0; if (rep && (rep->r_flags & R_SOFTTERM)) return (EINTR); if (!(nmp->nm_flag & NFSMNT_INT)) return (0); - if (p && (SIGPENDING(p) & ~p->p_p->ps_sigacts->ps_sigignore & - NFSINT_SIGMASK)) - return (EINTR); + if (p == NULL) + return (0); + rw_enter_read(&p->p_p->ps_sigacts->ps_rwl); + if (SIGPENDING(p) & ~p->p_p->ps_sigacts->ps_sigignore & + NFSINT_SIGMASK) + rv = EINTR; + rw_exit_read(&p->p_p->ps_sigacts->ps_rwl); + return (0); } Index: sys/signalvar.h =================================================================== RCS file: /cvs/src/sys/sys/signalvar.h,v retrieving revision 1.50 diff -u -p -r1.50 signalvar.h --- sys/signalvar.h 6 Oct 2021 15:46:03 -0000 1.50 +++ sys/signalvar.h 11 Nov 2021 12:26:30 -0000 @@ -35,6 +35,9 @@ #ifndef _SYS_SIGNALVAR_H_ /* tmp for user.h */ #define _SYS_SIGNALVAR_H_ +/* XXX */ +#include + /* * Kernel signal definitions and data structures, * not exported to user programs. @@ -54,6 +57,9 @@ struct sigacts { sigset_t ps_sigignore; /* signals being ignored */ sigset_t ps_sigcatch; /* signals being caught by user */ int ps_sigflags; /* signal flags, below */ + + struct rwlock ps_rwl; + struct rdseq ps_rdseq; }; /* signal flags */ Index: sys/percpu.h =================================================================== RCS file: /cvs/src/sys/sys/percpu.h,v retrieving revision 1.8 diff -u -p -r1.8 percpu.h --- sys/percpu.h 28 Aug 2018 15:15:02 -0000 1.8 +++ sys/percpu.h 11 Nov 2021 12:26:30 -0000 @@ -44,6 +44,16 @@ struct counters_ref { uint64_t *c; }; +struct rdseq { + volatile unsigned int + seq; +}; + +struct rdseq_ref { + int loop; + unsigned int seq; +}; + #ifdef _KERNEL #include @@ -193,6 +203,33 @@ counters_pkt(struct cpumem *cm, unsigned #endif #define COUNTERS_BOOT_INITIALIZER(_name) CPUMEM_BOOT_INITIALIZER(_name) + +void rdseq_init(struct rdseq *); +void rdseq_enter_write(struct rdseq *); +void rdseq_leave_write(struct rdseq *); + +unsigned int rdseq_read(struct rdseq *); + +static inline void +rdseq_enter(struct rdseq *rs, struct rdseq_ref *rsr) +{ + rsr->seq = rdseq_read(rs); + rsr->loop = 1; +} + +static inline void +rdseq_leave(struct rdseq *rs, struct rdseq_ref *rsr) +{ + unsigned int seq; + + seq = rdseq_read(rs); + rsr->loop = rsr->seq != seq; + if (rsr->loop) + rsr->seq = seq; +} + +#define RDSEQ_LOOP(_rs, _rsr) \ + for (rdseq_enter((_rs), (_rsr)); (_rsr)->loop; rdseq_leave((_rs), (_rsr))) #endif /* _KERNEL */ #endif /* _SYS_PERCPU_H_ */ Index: net/bpf.c =================================================================== RCS file: /cvs/src/sys/net/bpf.c,v retrieving revision 1.206 diff -u -p -r1.206 bpf.c --- net/bpf.c 23 Oct 2021 15:00:11 -0000 1.206 +++ net/bpf.c 11 Nov 2021 12:26:30 -0000 @@ -438,6 +438,7 @@ bpfread(dev_t dev, struct uio *uio, int return (ENXIO); bpf_get(d); + KERNEL_UNLOCK(); mtx_enter(&d->bd_mtx); /* @@ -554,6 +555,7 @@ out: mtx_leave(&d->bd_mtx); bpf_put(d); + KERNEL_LOCK(); return (error); } @@ -609,6 +611,7 @@ bpfwrite(dev_t dev, struct uio *uio, int return (ENXIO); bpf_get(d); + KERNEL_UNLOCK(); ifp = d->bd_bif->bif_ifp; if (ifp == NULL || (ifp->if_flags & IFF_UP) == 0) { @@ -643,6 +646,7 @@ bpfwrite(dev_t dev, struct uio *uio, int out: bpf_put(d); + KERNEL_LOCK(); return (error); }