Index: if.c =================================================================== RCS file: /cvs/src/sys/net/if.c,v retrieving revision 1.548 diff -u -p -r1.548 if.c --- if.c 2 Mar 2018 15:52:11 -0000 1.548 +++ if.c 28 Mar 2018 01:28:03 -0000 @@ -607,6 +607,7 @@ if_attach_common(struct ifnet *ifp) ifp->if_snd.ifq_ifqs[0] = &ifp->if_snd; ifp->if_ifqs = ifp->if_snd.ifq_ifqs; ifp->if_nifqs = 1; + ifp->if_sndmit = IF_SNDMIT_DEFAULT; ifiq_init(&ifp->if_rcv, ifp, 0); Index: if_var.h =================================================================== RCS file: /cvs/src/sys/net/if_var.h,v retrieving revision 1.89 diff -u -p -r1.89 if_var.h --- if_var.h 10 Jan 2018 23:50:39 -0000 1.89 +++ if_var.h 28 Mar 2018 01:28:03 -0000 @@ -170,6 +170,7 @@ struct ifnet { /* and the entries */ struct ifqueue **if_ifqs; /* [I] pointer to an array of sndqs */ void (*if_qstart)(struct ifqueue *); unsigned int if_nifqs; /* [I] number of output queues */ + unsigned int if_sndmit; /* [c] tx mitigation amount */ struct ifiqueue if_rcv; /* rx/input queue */ struct ifiqueue **if_iqs; /* [I] pointer to the array of iqs */ @@ -279,6 +280,9 @@ do { \ #define IFQ_LEN(ifq) ifq_len(ifq) #define IFQ_IS_EMPTY(ifq) ifq_empty(ifq) #define IFQ_SET_MAXLEN(ifq, len) ifq_set_maxlen(ifq, len) + +#define IF_SNDMIT_MIN 1 +#define IF_SNDMIT_DEFAULT 16 /* default interface priorities */ #define IF_WIRED_DEFAULT_PRIORITY 0 Index: ifq.c =================================================================== RCS file: /cvs/src/sys/net/ifq.c,v retrieving revision 1.22 diff -u -p -r1.22 ifq.c --- ifq.c 25 Jan 2018 14:04:36 -0000 1.22 +++ ifq.c 28 Mar 2018 01:28:03 -0000 @@ -70,9 +70,16 @@ struct priq { void ifq_start_task(void *); void ifq_restart_task(void *); void ifq_barrier_task(void *); +void ifq_bundle_task(void *); #define TASK_ONQUEUE 0x1 +static inline void +ifq_run_start(struct ifqueue *ifq) +{ + ifq_serialize(ifq, &ifq->ifq_start); +} + void ifq_serialize(struct ifqueue *ifq, struct task *t) { @@ -114,6 +121,16 @@ ifq_is_serialized(struct ifqueue *ifq) } void +ifq_start(struct ifqueue *ifq) +{ + if (ifq_len(ifq) >= min(ifq->ifq_if->if_sndmit, ifq->ifq_maxlen)) { + task_del(ifq->ifq_softnet, &ifq->ifq_bundle); + ifq_run_start(ifq); + } else + task_add(ifq->ifq_softnet, &ifq->ifq_bundle); +} + +void ifq_start_task(void *p) { struct ifqueue *ifq = p; @@ -137,11 +154,31 @@ ifq_restart_task(void *p) } void +ifq_bundle_task(void *p) +{ + struct ifqueue *ifq = p; + + ifq_run_start(ifq); +} + +void ifq_barrier(struct ifqueue *ifq) { struct cond c = COND_INITIALIZER(); struct task t = TASK_INITIALIZER(ifq_barrier_task, &c); + if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) { + int netlocked = (rw_status(&netlock) == RW_WRITE); + + if (netlocked) /* XXXSMP breaks atomicity */ + NET_UNLOCK(); + + taskq_barrier(ifq->ifq_softnet); + + if (netlocked) + NET_LOCK(); + } + if (ifq->ifq_serializer == NULL) return; @@ -166,6 +203,7 @@ void ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx) { ifq->ifq_if = ifp; + ifq->ifq_softnet = net_tq(ifp->if_index); ifq->ifq_softc = NULL; mtx_init(&ifq->ifq_mtx, IPL_NET); @@ -187,6 +225,7 @@ ifq_init(struct ifqueue *ifq, struct ifn mtx_init(&ifq->ifq_task_mtx, IPL_NET); TAILQ_INIT(&ifq->ifq_task_list); ifq->ifq_serializer = NULL; + task_set(&ifq->ifq_bundle, ifq_bundle_task, ifq); task_set(&ifq->ifq_start, ifq_start_task, ifq); task_set(&ifq->ifq_restart, ifq_restart_task, ifq); @@ -237,6 +276,8 @@ void ifq_destroy(struct ifqueue *ifq) { struct mbuf_list ml = MBUF_LIST_INITIALIZER(); + + ifq_barrier(ifq); /* ensure nothing is running with the ifq */ /* don't need to lock because this is the last use of the ifq */ Index: ifq.h =================================================================== RCS file: /cvs/src/sys/net/ifq.h,v retrieving revision 1.20 diff -u -p -r1.20 ifq.h --- ifq.h 4 Jan 2018 11:02:57 -0000 1.20 +++ ifq.h 28 Mar 2018 01:28:03 -0000 @@ -25,6 +25,7 @@ struct ifq_ops; struct ifqueue { struct ifnet *ifq_if; + struct taskq *ifq_softnet; union { void *_ifq_softc; /* @@ -57,6 +58,7 @@ struct ifqueue { struct mutex ifq_task_mtx; struct task_list ifq_task_list; void *ifq_serializer; + struct task ifq_bundle; /* work to be serialised */ struct task ifq_start; @@ -405,6 +407,7 @@ void ifq_attach(struct ifqueue *, cons void ifq_destroy(struct ifqueue *); void ifq_add_data(struct ifqueue *, struct if_data *); int ifq_enqueue(struct ifqueue *, struct mbuf *); +void ifq_start(struct ifqueue *); struct mbuf *ifq_deq_begin(struct ifqueue *); void ifq_deq_commit(struct ifqueue *, struct mbuf *); void ifq_deq_rollback(struct ifqueue *, struct mbuf *); @@ -438,12 +441,6 @@ static inline unsigned int ifq_is_oactive(struct ifqueue *ifq) { return (ifq->ifq_oactive); -} - -static inline void -ifq_start(struct ifqueue *ifq) -{ - ifq_serialize(ifq, &ifq->ifq_start); } static inline void