Index: kern_timeout.c =================================================================== RCS file: /cvs/src/sys/kern/kern_timeout.c,v diff -u -p -r1.103 kern_timeout.c --- kern_timeout.c 2 May 2025 00:51:09 -0000 1.103 +++ kern_timeout.c 19 May 2025 23:54:24 -0000 @@ -383,19 +383,32 @@ timeout_add_sec(struct timeout *to, int { uint64_t to_ticks; - to_ticks = (uint64_t)hz * secs; + KASSERT(secs >= 0); + /* secs is a 31bit int, so this can't overflow 64bits */ + to_ticks = (uint64_t)hz * (uint64_t)secs; return timeout_add_ticks(to, to_ticks, 1); } +/* + * interpret the specified times below as a AT LEAST how long the + * system should wait before firing the the timeouts. this requires + * rounding up, which has the potential to overflow. if we detect + * overflow, interpret it as "wait for as long as possible". this will + * be shorter than specified time, which violates the "wait at least + * this much time", but it's on the other end of the timescale. + */ + int timeout_add_msec(struct timeout *to, uint64_t msecs) { - uint64_t to_ticks; + uint64_t usecs; - to_ticks = msecs * 1000 / tick; + usecs = msecs * 1000; + if (usecs < msecs) + return timeout_add(to, INT_MAX); - return timeout_add_ticks(to, to_ticks, msecs > 0); + return timeout_add_usec(to, usecs); } int @@ -403,7 +416,10 @@ timeout_add_usec(struct timeout *to, uin { uint64_t to_ticks; - to_ticks = usecs / tick; + to_ticks = usecs + (tick - 1); + if (to_ticks < usecs) + return timeout_add(to, INT_MAX); + to_ticks /= tick; return timeout_add_ticks(to, to_ticks, usecs > 0); } @@ -413,7 +429,10 @@ timeout_add_nsec(struct timeout *to, uin { uint64_t to_ticks; - to_ticks = nsecs / (tick * 1000); + to_ticks = nsecs + (tick_nsec - 1); + if (to_ticks < nsecs) + return timeout_add(to, INT_MAX); + to_ticks /= tick_nsec; return timeout_add_ticks(to, to_ticks, nsecs > 0); }