Index: sys/proc.h =================================================================== RCS file: /cvs/src/sys/sys/proc.h,v diff -u -p -r1.384 proc.h --- sys/proc.h 22 Apr 2025 04:53:50 -0000 1.384 +++ sys/proc.h 28 Apr 2025 06:01:19 -0000 @@ -440,6 +449,7 @@ struct proc { #define P_SINTR 0x00000080 /* Sleep is interruptible. */ #define P_SYSTEM 0x00000200 /* No sigs, stats or swapping. */ #define P_TIMEOUT 0x00000400 /* Timing out during sleep. */ +#define P_TIMEOUTRAN 0x00000800 /* Timeout handler has finished. */ #define P_TRACESINGLE 0x00001000 /* Ptrace: keep single threaded. */ #define P_WEXIT 0x00002000 /* Working on exiting. */ #define P_OWEUPC 0x00008000 /* Owe proc an addupc() at next ast. */ @@ -451,6 +461,7 @@ struct proc { #define P_BITS \ ("\20" "\01INKTR" "\02PROFPEND" "\03ALRMPEND" "\04SIGSUSPEND" \ "\05CANTSLEEP" "\06INSCHED" "\010SINTR" "\012SYSTEM" "\013TIMEOUT" \ + "\014TIMEOUTRAN" \ "\015TRACESINGLE" "\016WEXIT" "\020OWEUPC" "\024SUSPSINGLE" \ "\033THREAD" "\034SUSPSIG" "\037CPUPEG") Index: kern/kern_synch.c =================================================================== RCS file: /cvs/src/sys/kern/kern_synch.c,v diff -u -p -r1.220 kern_synch.c --- kern/kern_synch.c 10 Mar 2025 09:28:56 -0000 1.220 +++ kern/kern_synch.c 28 Apr 2025 06:01:19 -0000 @@ -422,14 +422,39 @@ sleep_finish(int timo, int do_sleep) */ atomic_clearbits_int(&p->p_flag, P_SINTR); - if (timo != 0) { - if (p->p_flag & P_TIMEOUT) { + /* + * There are three situations to handle when cancelling the + * p_sleep_to timeout: + * + * 1. The timeout has not fired yet + * 2. The timeout is running + * 3. The timeout has run + * + * If timeout_del succeeds then the timeout won't run and + * situation 1 is dealt with. + * + * If timeout_del does not remove the timeout, then we're + * handling 2 or 3, but it won't tell us which one. Instead, + * the P_TIMEOUTRAN flag is used to figure out when we move + * from 2 to 3. endtsleep() (the p_sleep_to handler) sets the + * flag when it's finished running, so we spin waiting for + * it. + * + * We spin instead of sleeping because endtsleep() takes + * the sched lock to do all it's work. If we wanted to go + * to sleep to wait for endtsleep to run, we'd also have to + * take the sched lock, so we'd be spinning against it anyway. + */ + if (timo != 0 && !timeout_del(&p->p_sleep_to)) { + int flag; + + /* Wait for endtsleep timeout to finish running */ + while (!ISSET(flag = atomic_load_int(&p->p_flag), P_TIMEOUTRAN)) + CPU_BUSY_CYCLE(); + atomic_clearbits_int(&p->p_flag, P_TIMEOUT | P_TIMEOUTRAN); + + if (ISSET(flag, P_TIMEOUT)) error1 = EWOULDBLOCK; - } else { - /* This can sleep. It must not use timeouts. */ - timeout_del_barrier(&p->p_sleep_to); - } - atomic_clearbits_int(&p->p_flag, P_TIMEOUT); } /* @@ -546,6 +571,9 @@ endtsleep(void *arg) SCHED_LOCK(); wakeup_proc(p, P_TIMEOUT); SCHED_UNLOCK(); + + atomic_setbits_int(&p->p_flag, P_TIMEOUTRAN); + /* p is unsafe to deref after this point */ } /*