Index: kern_timeout.c =================================================================== RCS file: /cvs/src/sys/kern/kern_timeout.c,v diff -u -p -r1.103 kern_timeout.c --- kern_timeout.c 2 May 2025 00:51:09 -0000 1.103 +++ kern_timeout.c 20 May 2025 01:34:14 -0000 @@ -383,19 +383,29 @@ timeout_add_sec(struct timeout *to, int { uint64_t to_ticks; - to_ticks = (uint64_t)hz * secs; + KASSERT(secs >= 0); + /* secs is a 31bit int, so this can't overflow 64bits */ + to_ticks = (uint64_t)hz * (uint64_t)secs; return timeout_add_ticks(to, to_ticks, 1); } +/* + * interpret the specified times below as a AT LEAST how long the + * system should wait before firing the the timeouts. this requires + * rounding up, which has the potential to overflow. if we detect + * overflow, interpret it as "wait for as long as possible". this will + * be shorter than specified time, which violates the "wait at least + * this much time", but it's on the other end of the timescale. + */ + int timeout_add_msec(struct timeout *to, uint64_t msecs) { - uint64_t to_ticks; - - to_ticks = msecs * 1000 / tick; + if (msecs >= (UINT64_MAX / 1000)) + return timeout_add(to, INT_MAX); - return timeout_add_ticks(to, to_ticks, msecs > 0); + return timeout_add_usec(to, msecs * 1000); } int @@ -403,7 +413,10 @@ timeout_add_usec(struct timeout *to, uin { uint64_t to_ticks; - to_ticks = usecs / tick; + if (usecs >= (UINT64_MAX - tick)) + return timeout_add(to, INT_MAX); + + to_ticks = (usecs + (tick - 1)) / tick; return timeout_add_ticks(to, to_ticks, usecs > 0); } @@ -413,7 +426,10 @@ timeout_add_nsec(struct timeout *to, uin { uint64_t to_ticks; - to_ticks = nsecs / (tick * 1000); + if (nsecs >= (UINT64_MAX - tick_nsec)) + return timeout_add(to, INT_MAX); + + to_ticks = (nsecs + (tick_nsec - 1)) / tick_nsec; return timeout_add_ticks(to, to_ticks, nsecs > 0); }