Index: if_pfsync.c =================================================================== RCS file: /cvs/src/sys/net/if_pfsync.c,v retrieving revision 1.295 diff -u -p -r1.295 if_pfsync.c --- if_pfsync.c 23 Jun 2021 06:53:51 -0000 1.295 +++ if_pfsync.c 24 Jun 2021 06:21:24 -0000 @@ -93,7 +93,8 @@ #include "bpfilter.h" #include "pfsync.h" -#define PFSYNC_DEFER_NSEC 20000000ULL +#define PFSYNC_DEFER_NSEC 20000000ULL +#define PFSYNC_DEFER_MAX 128 #define PFSYNC_MINPKT ( \ sizeof(struct ip) + \ @@ -226,6 +227,7 @@ struct pfsync_softc { u_int sc_deferred; struct mutex sc_deferrals_mtx; struct timeout sc_deferrals_tmo; + struct task sc_deferrals_run; void *sc_plus; size_t sc_pluslen; @@ -277,6 +279,9 @@ void pfsync_ifdetach(void *); void pfsync_deferred(struct pf_state *, int); void pfsync_undefer(struct pfsync_deferral *, int); void pfsync_deferrals_tmo(void *); +static inline void + pfsync_deferrals_add(struct pfsync_softc *); +void pfsync_deferrals_run(void *); void pfsync_cancel_full_update(struct pfsync_softc *); void pfsync_request_full_update(struct pfsync_softc *); @@ -349,7 +354,8 @@ pfsync_clone_create(struct if_clone *ifc mtx_init(&sc->sc_upd_req_mtx, IPL_SOFTNET); TAILQ_INIT(&sc->sc_deferrals); mtx_init(&sc->sc_deferrals_mtx, IPL_SOFTNET); - timeout_set_proc(&sc->sc_deferrals_tmo, pfsync_deferrals_tmo, sc); + timeout_set(&sc->sc_deferrals_tmo, pfsync_deferrals_tmo, sc); + task_set(&sc->sc_deferrals_run, pfsync_deferrals_run, sc); task_set(&sc->sc_ltask, pfsync_syncdev_state, sc); task_set(&sc->sc_dtask, pfsync_ifdetach, sc); sc->sc_deferred = 0; @@ -399,8 +405,7 @@ pfsync_clone_destroy(struct ifnet *ifp) { struct pfsync_softc *sc = ifp->if_softc; struct ifnet *ifp0; - struct pfsync_deferral *pd; - struct pfsync_deferrals deferrals; + struct pfsync_deferral *pd, *npd; NET_LOCK(); @@ -416,6 +421,8 @@ pfsync_clone_destroy(struct ifnet *ifp) } if_put(ifp0); + pfsyncif = NULL; + /* XXXSMP breaks atomicity */ NET_UNLOCK(); if_detach(ifp); @@ -423,27 +430,33 @@ pfsync_clone_destroy(struct ifnet *ifp) pfsync_drop(sc); - if (sc->sc_deferred > 0) { - TAILQ_INIT(&deferrals); - mtx_enter(&sc->sc_deferrals_mtx); - TAILQ_CONCAT(&deferrals, &sc->sc_deferrals, pd_entry); - sc->sc_deferred = 0; - mtx_leave(&sc->sc_deferrals_mtx); - - while (!TAILQ_EMPTY(&deferrals)) { - pd = TAILQ_FIRST(&deferrals); - TAILQ_REMOVE(&deferrals, pd, pd_entry); - pfsync_undefer(pd, 0); - } - } - - pfsyncif = NULL; timeout_del(&sc->sc_bulkfail_tmo); timeout_del(&sc->sc_bulk_tmo); timeout_del(&sc->sc_tmo); NET_UNLOCK(); + timeout_del_barrier(&sc->sc_deferrals_tmo); + taskq_del_barrier(systqmp, &sc->sc_deferrals_run); + + mtx_enter(&sc->sc_deferrals_mtx); + npd = TAILQ_FIRST(&sc->sc_deferrals); + + TAILQ_INIT(&sc->sc_deferrals); + sc->sc_deferred = 0; + mtx_leave(&sc->sc_deferrals_mtx); + + if (npd != NULL) { + NET_LOCK(); + do { + pd = npd; + npd = TAILQ_NEXT(pd, pd_entry); + + pfsync_undefer(pd, 0); + } while (npd != NULL); + NET_UNLOCK(); + } + pool_destroy(&sc->sc_pool); free(sc->sc_imo.imo_membership, M_IPMOPTS, sc->sc_imo.imo_max_memberships * sizeof(struct in_multi *)); @@ -1935,7 +1948,7 @@ pfsync_defer(struct pf_state *st, struct { struct pfsync_softc *sc = pfsyncif; struct pfsync_deferral *pd; - unsigned int sched; + unsigned int count; NET_ASSERT_LOCKED(); @@ -1944,18 +1957,6 @@ pfsync_defer(struct pf_state *st, struct m->m_flags & (M_BCAST|M_MCAST)) return (0); - if (sc->sc_deferred >= 128) { - mtx_enter(&sc->sc_deferrals_mtx); - pd = TAILQ_FIRST(&sc->sc_deferrals); - if (pd != NULL) { - TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); - sc->sc_deferred--; - } - mtx_leave(&sc->sc_deferrals_mtx); - if (pd != NULL) - pfsync_undefer(pd, 0); - } - pd = pool_get(&sc->sc_pool, M_NOWAIT); if (pd == NULL) return (0); @@ -1969,13 +1970,13 @@ pfsync_defer(struct pf_state *st, struct pd->pd_deadline = getnsecuptime() + PFSYNC_DEFER_NSEC; mtx_enter(&sc->sc_deferrals_mtx); - sched = TAILQ_EMPTY(&sc->sc_deferrals); - TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); - sc->sc_deferred++; + count = ++sc->sc_deferred; mtx_leave(&sc->sc_deferrals_mtx); - if (sched) + if (count >= PFSYNC_DEFER_MAX) + pfsync_deferrals_add(sc); + else if (count == 0) timeout_add_nsec(&sc->sc_deferrals_tmo, PFSYNC_DEFER_NSEC); schednetisr(NETISR_PFSYNC); @@ -2062,14 +2063,34 @@ pfsync_undefer(struct pfsync_deferral *p pfsync_free_deferral(pd); } +static inline void +pfsync_deferrals_add(struct pfsync_softc *sc) +{ + task_add(systqmp, &sc->sc_deferrals_run); +} + void pfsync_deferrals_tmo(void *arg) { struct pfsync_softc *sc = arg; + + if (pfsyncif != sc) /* XXX !ISSET(sc->sc_if.if_flags, IFF_RUNNING) */ + return; + + pfsync_deferrals_add(sc); +} + +void +pfsync_deferrals_run(void *arg) +{ + struct pfsync_softc *sc = arg; struct pfsync_deferral *pd; uint64_t now, nsec = 0; struct pfsync_deferrals pds = TAILQ_HEAD_INITIALIZER(pds); + if (pfsyncif != sc) /* XXX !ISSET(sc->sc_if.if_flags, IFF_RUNNING) */ + return; + now = getnsecuptime(); mtx_enter(&sc->sc_deferrals_mtx); @@ -2078,7 +2099,8 @@ pfsync_deferrals_tmo(void *arg) if (pd == NULL) break; - if (now < pd->pd_deadline) { + if (sc->sc_deferred < PFSYNC_DEFER_MAX && + now < pd->pd_deadline) { nsec = pd->pd_deadline - now; break; }