Index: kern_task.c =================================================================== RCS file: /cvs/src/sys/kern/kern_task.c,v retrieving revision 1.31 diff -u -p -r1.31 kern_task.c --- kern_task.c 1 Aug 2020 08:40:20 -0000 1.31 +++ kern_task.c 16 Feb 2022 12:22:00 -0000 @@ -16,6 +16,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + #include #include #include @@ -30,6 +31,11 @@ #include #endif +#include "kstat.h" +#if NKSTAT > 0 +#include +#endif + #ifdef WITNESS static struct lock_type taskq_lock_type = { @@ -41,6 +47,30 @@ static struct lock_type taskq_lock_type #endif /* WITNESS */ +#if NKSTAT > 0 +struct taskq_kstat { + struct kstat_kv tk_adds; + struct kstat_kv tk_add_retries; + struct kstat_kv tk_dels; + struct kstat_kv tk_del_retries; + struct kstat_kv tk_nexts; + struct kstat_kv tk_next_retries; + struct kstat_kv tk_next_pending; + struct kstat_kv tk_sleeps; +}; + +static const struct taskq_kstat taskq_kstat_tpl = { + KSTAT_KV_INITIALIZER("adds", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("add_retries", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("dels", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("del_retries", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("nexts", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("next_retries", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("next_pending", KSTAT_KV_T_COUNTER32), + KSTAT_KV_INITIALIZER("sleeps", KSTAT_KV_T_COUNTER32), +}; +#endif /* NKSTAT > 0 */ + struct taskq_thread { SLIST_ENTRY(taskq_thread) tt_entry; @@ -67,9 +97,22 @@ struct taskq { unsigned int tq_bgen; unsigned int tq_bthreads; + unsigned int tq_adds; + unsigned int tq_add_retries; + unsigned int tq_dels; + unsigned int tq_del_retries; + unsigned int tq_nexts; + unsigned int tq_next_retries; + unsigned int tq_next_pending; + unsigned int tq_sleeps; + #ifdef WITNESS struct lock_object tq_lock_object; #endif + +#if NKSTAT > 0 + struct kstat *tq_kstat; +#endif }; static const char taskq_sys_name[] = "systq"; @@ -130,9 +173,13 @@ void taskq_create_thread(void *); void taskq_barrier_task(void *); int taskq_sleep(const volatile void *, struct mutex *, int, const char *, int); -int taskq_next_work(struct taskq *, struct task *); +//int taskq_next_work(struct taskq *, struct task *); void taskq_thread(void *); +#if NKSTAT > 0 +static int taskq_kstat_copy(struct kstat *, void *); +#endif + void taskq_init(void) { @@ -148,7 +195,7 @@ taskq_create(const char *name, unsigned { struct taskq *tq; - tq = malloc(sizeof(*tq), M_DEVBUF, M_WAITOK); + tq = malloc(sizeof(*tq), M_DEVBUF, M_WAITOK|M_ZERO); if (tq == NULL) return (NULL); @@ -173,6 +220,15 @@ taskq_create(const char *name, unsigned witness_init(&tq->tq_lock_object, &taskq_lock_type); #endif +#if NKSTAT > 0 + tq->tq_kstat = kstat_create(name, 0, "taskq", 0, KSTAT_T_KV, 0); + kstat_set_mutex(tq->tq_kstat, &tq->tq_mtx); + tq->tq_kstat->ks_softc = tq; + tq->tq_kstat->ks_datalen = sizeof(taskq_kstat_tpl); + tq->tq_kstat->ks_copy = taskq_kstat_copy; + kstat_install(tq->tq_kstat); +#endif + /* try to create a thread to guarantee that tasks will be serviced */ kthread_create_deferred(taskq_create_thread, tq); @@ -357,7 +413,12 @@ task_add(struct taskq *tq, struct task * if (ISSET(w->t_flags, TASK_ONQUEUE)) return (0); - mtx_enter(&tq->tq_mtx); + if (!mtx_enter_try(&tq->tq_mtx)) { + mtx_enter(&tq->tq_mtx); + tq->tq_add_retries++; + } + tq->tq_adds++; + if (!ISSET(w->t_flags, TASK_ONQUEUE)) { rv = 1; SET(w->t_flags, TASK_ONQUEUE); @@ -382,7 +443,12 @@ task_del(struct taskq *tq, struct task * if (!ISSET(w->t_flags, TASK_ONQUEUE)) return (0); - mtx_enter(&tq->tq_mtx); + if (!mtx_enter_try(&tq->tq_mtx)) { + mtx_enter(&tq->tq_mtx); + tq->tq_del_retries++; + } + tq->tq_dels++; + if (ISSET(w->t_flags, TASK_ONQUEUE)) { rv = 1; CLR(w->t_flags, TASK_ONQUEUE); @@ -393,18 +459,24 @@ task_del(struct taskq *tq, struct task * return (rv); } -int +static inline int taskq_next_work(struct taskq *tq, struct task *work) { struct task *next; - mtx_enter(&tq->tq_mtx); + if (!mtx_enter_try(&tq->tq_mtx)) { + mtx_enter(&tq->tq_mtx); + tq->tq_next_retries++; + } + tq->tq_nexts++; + while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) { if (tq->tq_state != TQ_S_RUNNING) { mtx_leave(&tq->tq_mtx); return (0); } + tq->tq_sleeps++; msleep_nsec(tq, &tq->tq_mtx, PWAIT, "bored", INFSLP); } @@ -414,6 +486,8 @@ taskq_next_work(struct taskq *tq, struct *work = *next; /* copy to caller to avoid races */ next = TAILQ_FIRST(&tq->tq_worklist); + if (next != NULL) + tq->tq_next_pending++; mtx_leave(&tq->tq_mtx); if (next != NULL && tq->tq_nthreads > 1) @@ -465,3 +539,24 @@ taskq_thread(void *xtq) kthread_exit(0); } + +#if NKSTAT > 0 +static int +taskq_kstat_copy(struct kstat *ks, void *dst) +{ + struct taskq *tq = ks->ks_softc; + struct taskq_kstat *tk = dst; + + *tk = taskq_kstat_tpl; + kstat_kv_u32(&tk->tk_adds) = tq->tq_adds; + kstat_kv_u32(&tk->tk_add_retries) = tq->tq_add_retries; + kstat_kv_u32(&tk->tk_dels) = tq->tq_dels; + kstat_kv_u32(&tk->tk_del_retries) = tq->tq_del_retries; + kstat_kv_u32(&tk->tk_nexts) = tq->tq_nexts; + kstat_kv_u32(&tk->tk_next_retries) = tq->tq_next_retries; + kstat_kv_u32(&tk->tk_next_pending) = tq->tq_next_pending; + kstat_kv_u32(&tk->tk_sleeps) = tq->tq_sleeps; + + return (0); +} +#endif