Index: ifq.c =================================================================== RCS file: /cvs/src/sys/net/ifq.c,v retrieving revision 1.49 diff -u -p -r1.49 ifq.c --- ifq.c 9 Jan 2023 03:39:14 -0000 1.49 +++ ifq.c 15 May 2023 02:57:16 -0000 @@ -78,6 +78,22 @@ void ifq_restart_task(void *); void ifq_barrier_task(void *); void ifq_bundle_task(void *); +static void +ifq_senter(struct ifqueue *ifq) +{ + if (!mtx_enter_try(&ifq->ifq_task_mtx)) { + mtx_enter(&ifq->ifq_task_mtx); + ifq->ifq_scontended++; + } + ifq->ifq_slocks++; +} + +static inline void +ifq_sleave(struct ifqueue *ifq) +{ + mtx_leave(&ifq->ifq_task_mtx); +} + static inline void ifq_run_start(struct ifqueue *ifq) { @@ -92,7 +108,7 @@ ifq_serialize(struct ifqueue *ifq, struc if (ISSET(t->t_flags, TASK_ONQUEUE)) return; - mtx_enter(&ifq->ifq_task_mtx); + ifq_senter(ifq); if (!ISSET(t->t_flags, TASK_ONQUEUE)) { SET(t->t_flags, TASK_ONQUEUE); TAILQ_INSERT_TAIL(&ifq->ifq_task_list, t, t_entry); @@ -106,16 +122,16 @@ ifq_serialize(struct ifqueue *ifq, struc CLR(t->t_flags, TASK_ONQUEUE); work = *t; /* copy to caller to avoid races */ - mtx_leave(&ifq->ifq_task_mtx); + ifq_sleave(ifq); (*work.t_func)(work.t_arg); - mtx_enter(&ifq->ifq_task_mtx); + ifq_senter(ifq); } ifq->ifq_serializer = NULL; } - mtx_leave(&ifq->ifq_task_mtx); + ifq_sleave(ifq); } int @@ -202,6 +218,11 @@ struct ifq_kstat_data { struct kstat_kv kd_qlen; struct kstat_kv kd_maxqlen; struct kstat_kv kd_oactive; + + struct kstat_kv kd_qlocks; + struct kstat_kv kd_qcontended; + struct kstat_kv kd_slocks; + struct kstat_kv kd_scontended; }; static const struct ifq_kstat_data ifq_kstat_tpl = { @@ -218,6 +239,11 @@ static const struct ifq_kstat_data ifq_k KSTAT_KV_UNIT_INITIALIZER("maxqlen", KSTAT_KV_T_UINT32, KSTAT_KV_U_PACKETS), KSTAT_KV_INITIALIZER("oactive", KSTAT_KV_T_BOOL), + + KSTAT_KV_INITIALIZER("qlocks", KSTAT_KV_T_COUNTER64), + KSTAT_KV_INITIALIZER("qcontended", KSTAT_KV_T_COUNTER64), + KSTAT_KV_INITIALIZER("slocks", KSTAT_KV_T_COUNTER64), + KSTAT_KV_INITIALIZER("scontended", KSTAT_KV_T_COUNTER64), }; int @@ -235,6 +261,11 @@ ifq_kstat_copy(struct kstat *ks, void *d kstat_kv_u32(&kd->kd_maxqlen) = ifq->ifq_maxlen; kstat_kv_bool(&kd->kd_oactive) = ifq->ifq_oactive; + kstat_kv_u64(&kd->kd_qlocks) = ifq->ifq_qlocks; + kstat_kv_u64(&kd->kd_qcontended) = ifq->ifq_qcontended; + kstat_kv_u64(&kd->kd_slocks) = ifq->ifq_slocks; + kstat_kv_u64(&kd->kd_scontended) = ifq->ifq_scontended; + return (0); } #endif @@ -287,6 +318,22 @@ ifq_init(struct ifqueue *ifq, struct ifn #endif } +static void +ifq_qenter(struct ifqueue *ifq) +{ + if (!mtx_enter_try(&ifq->ifq_mtx)) { + mtx_enter(&ifq->ifq_mtx); + ifq->ifq_qcontended++; + } + ifq->ifq_qlocks++; +} + +static inline void +ifq_qleave(struct ifqueue *ifq) +{ + mtx_leave(&ifq->ifq_mtx); +} + void ifq_attach(struct ifqueue *ifq, const struct ifq_ops *newops, void *opsarg) { @@ -298,7 +345,7 @@ ifq_attach(struct ifqueue *ifq, const st newq = newops->ifqop_alloc(ifq->ifq_idx, opsarg); - mtx_enter(&ifq->ifq_mtx); + ifq_qenter(ifq); ifq->ifq_ops->ifqop_purge(ifq, &ml); ifq->ifq_len = 0; @@ -316,7 +363,7 @@ ifq_attach(struct ifqueue *ifq, const st } else ifq->ifq_len++; } - mtx_leave(&ifq->ifq_mtx); + ifq_qleave(ifq); oldops->ifqop_free(ifq->ifq_idx, oldq); @@ -361,7 +408,7 @@ ifq_enqueue(struct ifqueue *ifq, struct { struct mbuf *dm; - mtx_enter(&ifq->ifq_mtx); + ifq_qenter(ifq); dm = ifq->ifq_ops->ifqop_enq(ifq, m); if (dm != m) { ifq->ifq_packets++; @@ -374,7 +421,7 @@ ifq_enqueue(struct ifqueue *ifq, struct ifq->ifq_len++; else ifq->ifq_qdrops++; - mtx_leave(&ifq->ifq_mtx); + ifq_qleave(ifq); if (dm != NULL) m_freem(dm); @@ -385,7 +432,7 @@ ifq_enqueue(struct ifqueue *ifq, struct static inline void ifq_deq_enter(struct ifqueue *ifq) { - mtx_enter(&ifq->ifq_mtx); + ifq_qenter(ifq); } static inline void @@ -396,7 +443,7 @@ ifq_deq_leave(struct ifqueue *ifq) ml = ifq->ifq_free; ml_init(&ifq->ifq_free); - mtx_leave(&ifq->ifq_mtx); + ifq_qleave(ifq); if (!ml_empty(&ml)) ml_purge(&ml); @@ -518,12 +565,12 @@ ifq_purge(struct ifqueue *ifq) struct mbuf_list ml = MBUF_LIST_INITIALIZER(); unsigned int rv; - mtx_enter(&ifq->ifq_mtx); + ifq_qenter(ifq); ifq->ifq_ops->ifqop_purge(ifq, &ml); rv = ifq->ifq_len; ifq->ifq_len = 0; ifq->ifq_qdrops += rv; - mtx_leave(&ifq->ifq_mtx); + ifq_qleave(ifq); KASSERT(rv == ml_len(&ml)); @@ -586,6 +633,9 @@ struct ifiq_kstat_data { struct kstat_kv kd_enqueues; struct kstat_kv kd_dequeues; + + struct kstat_kv kd_qlocks; + struct kstat_kv kd_qcontended; }; static const struct ifiq_kstat_data ifiq_kstat_tpl = { @@ -606,6 +656,11 @@ static const struct ifiq_kstat_data ifiq KSTAT_KV_T_COUNTER64), KSTAT_KV_INITIALIZER("dequeues", KSTAT_KV_T_COUNTER64), + + KSTAT_KV_INITIALIZER("qlocks", + KSTAT_KV_T_COUNTER64), + KSTAT_KV_INITIALIZER("qcontended", + KSTAT_KV_T_COUNTER64), }; int @@ -625,6 +680,9 @@ ifiq_kstat_copy(struct kstat *ks, void * kstat_kv_u64(&kd->kd_enqueues) = ifiq->ifiq_enqueues; kstat_kv_u64(&kd->kd_dequeues) = ifiq->ifiq_dequeues; + kstat_kv_u64(&kd->kd_qlocks) = ifiq->ifiq_qlocks; + kstat_kv_u64(&kd->kd_qcontended) = ifiq->ifiq_qcontended; + return (0); } #endif @@ -649,6 +707,12 @@ ifiq_init(struct ifiqueue *ifiq, struct ifiq->ifiq_qdrops = 0; ifiq->ifiq_errors = 0; + ifiq->ifiq_enqueues = 0; + ifiq->ifiq_dequeues = 0; + + ifiq->ifiq_qlocks = 0; + ifiq->ifiq_qcontended = 0; + ifiq->ifiq_idx = idx; #if NKSTAT > 0 @@ -679,6 +743,22 @@ ifiq_destroy(struct ifiqueue *ifiq) ml_purge(&ifiq->ifiq_ml); } +static void +ifiq_qenter(struct ifiqueue *ifiq) +{ + if (!mtx_enter_try(&ifiq->ifiq_mtx)) { + mtx_enter(&ifiq->ifiq_mtx); + ifiq->ifiq_qcontended++; + } + ifiq->ifiq_qlocks++; +} + +static inline void +ifiq_qleave(struct ifiqueue *ifiq) +{ + mtx_leave(&ifiq->ifiq_mtx); +} + unsigned int ifiq_maxlen_drop = 2048 * 5; unsigned int ifiq_maxlen_return = 2048 * 3; @@ -721,18 +801,18 @@ ifiq_input(struct ifiqueue *ifiq, struct } if (ml_empty(ml)) { - mtx_enter(&ifiq->ifiq_mtx); + ifiq_qenter(ifiq); ifiq->ifiq_packets += packets; ifiq->ifiq_bytes += bytes; ifiq->ifiq_fdrops += fdrops; - mtx_leave(&ifiq->ifiq_mtx); + ifiq_qleave(ifiq); return (0); } } #endif - mtx_enter(&ifiq->ifiq_mtx); + ifiq_qenter(ifiq); ifiq->ifiq_packets += packets; ifiq->ifiq_bytes += bytes; ifiq->ifiq_fdrops += fdrops; @@ -746,7 +826,7 @@ ifiq_input(struct ifiqueue *ifiq, struct ml_enlist(&ifiq->ifiq_ml, ml); } } - mtx_leave(&ifiq->ifiq_mtx); + ifiq_qleave(ifiq); if (ml_empty(ml)) task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task); @@ -781,11 +861,11 @@ ifiq_enqueue(struct ifiqueue *ifiq, stru if_bpf = ifp->if_bpf; if (if_bpf) { if ((*ifp->if_bpf_mtap)(if_bpf, m, BPF_DIRECTION_IN)) { - mtx_enter(&ifiq->ifiq_mtx); + ifiq_qenter(ifiq); ifiq->ifiq_packets++; ifiq->ifiq_bytes += m->m_pkthdr.len; ifiq->ifiq_fdrops++; - mtx_leave(&ifiq->ifiq_mtx); + ifiq_qleave(ifiq); m_freem(m); return (0); @@ -793,12 +873,12 @@ ifiq_enqueue(struct ifiqueue *ifiq, stru } #endif - mtx_enter(&ifiq->ifiq_mtx); + ifiq_qenter(ifiq); ifiq->ifiq_packets++; ifiq->ifiq_bytes += m->m_pkthdr.len; ifiq->ifiq_enqueues++; ml_enqueue(&ifiq->ifiq_ml, m); - mtx_leave(&ifiq->ifiq_mtx); + ifiq_qleave(ifiq); task_add(ifiq->ifiq_softnet, &ifiq->ifiq_task); @@ -814,11 +894,11 @@ ifiq_process(void *arg) if (ifiq_empty(ifiq)) return; - mtx_enter(&ifiq->ifiq_mtx); + ifiq_qenter(ifiq); ifiq->ifiq_dequeues++; ml = ifiq->ifiq_ml; ml_init(&ifiq->ifiq_ml); - mtx_leave(&ifiq->ifiq_mtx); + ifiq_qleave(ifiq); if_input_process(ifiq->ifiq_if, &ml); } Index: ifq.h =================================================================== RCS file: /cvs/src/sys/net/ifq.h,v retrieving revision 1.37 diff -u -p -r1.37 ifq.h --- ifq.h 9 Jan 2023 03:37:44 -0000 1.37 +++ ifq.h 15 May 2023 02:57:16 -0000 @@ -55,6 +55,11 @@ struct ifqueue { uint64_t ifq_errors; uint64_t ifq_mcasts; + uint64_t ifq_qlocks; + uint64_t ifq_qcontended; + uint64_t ifq_slocks; + uint64_t ifq_scontended; + struct kstat *ifq_kstat; /* work serialisation */ @@ -100,6 +105,9 @@ struct ifiqueue { uint64_t ifiq_enqueues; /* number of times a list of packets were pulled off ifiq_ml */ uint64_t ifiq_dequeues; + + uint64_t ifiq_qlocks; + uint64_t ifiq_qcontended; struct kstat *ifiq_kstat;