Index: bpf.c =================================================================== RCS file: /cvs/src/sys/net/bpf.c,v retrieving revision 1.199 diff -u -p -r1.199 bpf.c --- bpf.c 26 Dec 2020 16:30:58 -0000 1.199 +++ bpf.c 27 Dec 2020 00:54:28 -0000 @@ -1,4 +1,4 @@ -/* $OpenBSD: bpf.c,v 1.199 2020/12/26 16:30:58 cheloha Exp $ */ +/* $OpenBSD: bpf.c,v 1.197 2020/12/12 11:49:02 jan Exp $ */ /* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */ /* @@ -380,7 +380,8 @@ bpfopen(dev_t dev, int flag, int mode, s smr_init(&bd->bd_smr); sigio_init(&bd->bd_sigio); - bd->bd_rtout = 0; /* no timeout by default */ + TAILQ_INIT(&bd->bd_readers); + bd->bd_rtout = INFSLP; /* no timeout by default */ bd->bd_rnonblock = ISSET(flag, FNONBLOCK); bpf_get(bd); @@ -430,7 +431,7 @@ bpfread(dev_t dev, struct uio *uio, int { struct bpf_d *d; caddr_t hbuf; - int end, error, hlen, nticks; + int error, hlen; KERNEL_ASSERT_LOCKED(); @@ -451,12 +452,6 @@ bpfread(dev_t dev, struct uio *uio, int } /* - * If there's a timeout, mark when the read should end. - */ - if (d->bd_rtout) - end = ticks + (int)d->bd_rtout; - - /* * If the hold buffer is empty, then do a timed sleep, which * ends when the timeout expires or when enough packets * have arrived to fill the store buffer. @@ -483,21 +478,12 @@ bpfread(dev_t dev, struct uio *uio, int if (d->bd_rnonblock) { /* User requested non-blocking I/O */ error = EWOULDBLOCK; - } else if (d->bd_rtout == 0) { - /* No read timeout set. */ - d->bd_nreaders++; - error = msleep_nsec(d, &d->bd_mtx, PRINET|PCATCH, - "bpf", INFSLP); - d->bd_nreaders--; - } else if ((nticks = end - ticks) > 0) { - /* Read timeout has not expired yet. */ - d->bd_nreaders++; - error = msleep(d, &d->bd_mtx, PRINET|PCATCH, "bpf", - nticks); - d->bd_nreaders--; } else { - /* Read timeout has expired. */ - error = EWOULDBLOCK; + struct bpf_d_reader bdr; + TAILQ_INSERT_TAIL(&d->bd_readers, &bdr, bdr_entry); + error = msleep_nsec(&bdr, &d->bd_mtx, PRINET|PCATCH, + "bpf", d->bd_rtout); + TAILQ_REMOVE(&d->bd_readers, &bdr, bdr_entry); } if (error == EINTR || error == ERESTART) goto out; @@ -561,16 +547,27 @@ out: void bpf_wakeup(struct bpf_d *d) { + struct bpf_d_reader *bdr; + MUTEX_ASSERT_LOCKED(&d->bd_mtx); /* + * We have a thread sleeping in bpfread(), wake it up. + */ + bdr = TAILQ_FIRST(&d->bd_readers); + if (bdr != NULL) + wakeup(bdr); + + /* * As long as pgsigio() and selwakeup() need to be protected * by the KERNEL_LOCK() we have to delay the wakeup to * another context to keep the hot path KERNEL_LOCK()-free. */ - bpf_get(d); - if (!task_add(systq, &d->bd_wake_task)) - bpf_put(d); + if ((d->bd_async && d->bd_sig) || d->bd_sel.si_seltid) { + bpf_get(d); + if (!task_add(systq, &d->bd_wake_task)) + bpf_put(d); + } } void @@ -578,7 +575,6 @@ bpf_wakeup_cb(void *xd) { struct bpf_d *d = xd; - wakeup(d); if (d->bd_async && d->bd_sig) pgsigio(&d->bd_sigio, d->bd_sig, 0); @@ -861,27 +857,27 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t case BIOCSRTIMEOUT: { struct timeval *tv = (struct timeval *)addr; - u_long rtout; + uint64_t rtout; /* Compute number of ticks. */ if (tv->tv_sec < 0 || !timerisvalid(tv)) { error = EINVAL; break; } - if (tv->tv_sec > INT_MAX / hz) { - error = EOVERFLOW; - break; - } - rtout = tv->tv_sec * hz; - if (tv->tv_usec / tick > INT_MAX - rtout) { - error = EOVERFLOW; - break; + + CTASSERT(INFSLP == UINT64_MAX); + if (tv->tv_sec == 0 && tv->tv_usec == 0) + rtout = INFSLP; + else { + rtout = TIMEVAL_TO_NSEC(tv); + if (rtout == UINT64_MAX) { + error = EOVERFLOW; + break; + } } - rtout += tv->tv_usec / tick; + mtx_enter(&d->bd_mtx); d->bd_rtout = rtout; - if (d->bd_rtout == 0 && tv->tv_usec != 0) - d->bd_rtout = 1; mtx_leave(&d->bd_mtx); break; } @@ -892,11 +888,17 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t case BIOCGRTIMEOUT: { struct timeval *tv = (struct timeval *)addr; + uint64_t rtout; mtx_enter(&d->bd_mtx); - tv->tv_sec = d->bd_rtout / hz; - tv->tv_usec = (d->bd_rtout % hz) * tick; + rtout = d->bd_rtout; mtx_leave(&d->bd_mtx); + + if (rtout == INFSLP) { + tv->tv_sec = 0; + tv->tv_usec = 0; + } else + NSEC_TO_TIMEVAL(rtout, tv); break; } @@ -1542,17 +1544,6 @@ bpf_catchpacket(struct bpf_d *d, u_char * reads should be woken up. */ do_wakeup = 1; - } - - if (d->bd_nreaders > 0) { - /* - * We have one or more threads sleeping in bpfread(). - * We got a packet, so wake up all readers. - */ - if (d->bd_fbuf != NULL) { - ROTATE_BUFFERS(d); - do_wakeup = 1; - } } if (do_wakeup) Index: bpfdesc.h =================================================================== RCS file: /cvs/src/sys/net/bpfdesc.h,v retrieving revision 1.43 diff -u -p -r1.43 bpfdesc.h --- bpfdesc.h 26 Dec 2020 16:30:58 -0000 1.43 +++ bpfdesc.h 27 Dec 2020 00:54:28 -0000 @@ -1,4 +1,4 @@ -/* $OpenBSD: bpfdesc.h,v 1.43 2020/12/26 16:30:58 cheloha Exp $ */ +/* $OpenBSD: bpfdesc.h,v 1.42 2020/12/11 05:00:21 cheloha Exp $ */ /* $NetBSD: bpfdesc.h,v 1.11 1995/09/27 18:30:42 thorpej Exp $ */ /* @@ -53,6 +53,11 @@ struct bpf_program_smr { struct smr_entry bps_smr; }; +struct bpf_d_reader { + TAILQ_ENTRY(bpf_d_reader) bdr_entry; +}; +TAILQ_HEAD(bpf_d_readers, bpf_d_reader); + /* * Descriptor associated with each open bpf file. */ @@ -78,8 +83,9 @@ struct bpf_d { int bd_in_uiomove; /* for debugging purpose */ struct bpf_if *bd_bif; /* interface descriptor */ - u_long bd_rtout; /* [m] Read timeout in 'ticks' */ - u_long bd_nreaders; /* [m] # threads asleep in bpfread() */ + uint64_t bd_rtout; /* [m] Read timeout in 'ticks' */ + struct bpf_d_readers + bd_readers; /* [m] threads asleep in bpfread() */ int bd_rnonblock; /* true if nonblocking reads are set */ struct bpf_program_smr *bd_rfilter; /* read filter code */