Index: kern_timeout.c =================================================================== RCS file: /cvs/src/sys/kern/kern_timeout.c,v diff -u -p -r1.101 kern_timeout.c --- kern_timeout.c 13 Jan 2025 03:21:10 -0000 1.101 +++ kern_timeout.c 27 Apr 2025 05:58:06 -0000 @@ -435,6 +435,9 @@ timeout_del(struct timeout *to) { int ret = 0; + if (!ISSET(to->to_flags, TIMEOUT_ONQUEUE)) + return (0); + mtx_enter(&timeout_mutex); if (ISSET(to->to_flags, TIMEOUT_ONQUEUE)) { CIRCQ_REMOVE(&to->to_list); Index: kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v diff -u -p -r1.219 kern_synch.c --- kern_synch.c 5 Feb 2025 12:21:27 -0000 1.219 +++ kern_synch.c 27 Apr 2025 05:58:06 -0000 @@ -424,14 +424,13 @@ sleep_finish(int timo, int do_sleep) */ atomic_clearbits_int(&p->p_flag, P_SINTR); - if (timo != 0) { - if (p->p_flag & P_TIMEOUT) { - error1 = EWOULDBLOCK; - } else { - /* This can sleep. It must not use timeouts. */ - timeout_del_barrier(&p->p_sleep_to); - } + if (timo != 0 && !timeout_del(&p->p_sleep_to)) { + /* Wait for endtsleep timeout to finish running */ + while (!ISSET(atomic_load_int(&p->p_flag), P_TIMEOUT)) + CPU_BUSY_CYCLE(); atomic_clearbits_int(&p->p_flag, P_TIMEOUT); + + error1 = EWOULDBLOCK; } /* @@ -531,8 +530,10 @@ endtsleep(void *arg) struct proc *p = arg; SCHED_LOCK(); - wakeup_proc(p, P_TIMEOUT); + wakeup_proc(p, 0); SCHED_UNLOCK(); + + atomic_setbits_int(&p->p_flag, P_TIMEOUT); } /*