Index: share/man/man9/HEAP_INIT.9 =================================================================== RCS file: share/man/man9/HEAP_INIT.9 diff -N share/man/man9/HEAP_INIT.9 --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ share/man/man9/HEAP_INIT.9 4 Oct 2016 00:09:00 -0000 @@ -0,0 +1,207 @@ +.\" $OpenBSD$ +.\" +.\" Copyright (c) 2016 David Gwynne +.\" +.\" Permission to use, copy, modify, and distribute this software for any +.\" purpose with or without fee is hereby granted, provided that the above +.\" copyright notice and this permission notice appear in all copies. +.\" +.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +.\" +.Dd $Mdocdate: September 5 2016 $ +.Dt HEAP_INIT 9 +.Os +.Sh NAME +.Nm HEAP_INIT , +.Nm HEAP_INSERT , +.Nm HEAP_REMOVE , +.Nm HEAP_FIRST , +.Nm HEAP_EXTRACT , +.Nm HEAP_CEXTRACT +.Nd Kernel Heap Data Structure +.Sh SYNOPSIS +.In sys/tree.h +.Fn HEAP_HEAD "NAME" +.Fn HEAP_PROTOTYPE "NAME" "TYPE" +.Fc +.Fo HEAP_GENERATE +.Fa "NAME" +.Fa "TYPE" +.Fa "ENTRY" +.Fa "int (*compare)(const struct TYPE *, const struct TYPE *)" +.Fc +.Ft struct NAME +.Fn HEAP_INITIALIZER +.Ft void +.Fn HEAP_INIT "NAME" "struct NAME *heap" +.Ft struct TYPE * +.Fn HEAP_INSERT "NAME" "struct NAME *heap" "struct TYPE *elm" +.Ft struct TYPE * +.Fn HEAP_REMOVE "NAME" "struct NAME *heap" "struct TYPE *elm" +.Ft struct TYPE * +.Fn HEAP_FIRST "NAME" "struct NAME *heap" +.Ft struct TYPE * +.Fn HEAP_EXTRACT "NAME" "struct NAME *heap" +.Ft struct TYPE * +.Fn HEAP_CEXTRACT "NAME" "struct NAME *heap" "const struct TYPE *key" +.Sh DESCRIPTION +The heap API provides data structures and operations for storing elements +in a heap. The API implements a pairing heap that sorts elements +according to a user defined comparison function. +.Pp +This API is implemented as a set of functions that operate on generic +pointers, but users of the API generate wrappers and macros that provide +type safety when calling the functions. +.Pp +In the macro definitions, +.Fa TYPE +is the name of a structure that will be stored in a heap. +The +.Fa TYPE +structure must contain an +heap_entry +structure that allows the element to be connected to a heap. +The argument +.Fa NAME +is the name of a heap type that can store a particular +.Fa TYPE +element. +.Pp +The +.Fn HEAP_HEAD +macro creates a heap type to store +.Fa TYPE +structures as elements in the heap. +The argument +.Fa NAME +must uniquely identify every type of heap that is defined. +.Pp +.Fn HEAP_PROTOTYPE +produces the wrappers for a heap type identified by +.Fa NAME +to operate on elements of type +.Fa TYPE . +.Pp +.Fn HEAP_GENERATE +produces the internal data structures used by the heap +type identified by +.Fa NAME +to operate on elements of type +.Fa TYPE . +.Fa ENTRY +specifies which field in the +.Fa TYPE +structure is used to connect elements to +.Fa NAME +heaps. +Elements in the heap are ordered according to the result of comparing +them with the +.Fa compare +function. +If the first argument to +.Fa compare +is to be ordered lower than the second, +the function returns a value smaller than zero. +If the first argument is to be ordered higher than the second, +the function returns a value greater than zero. +If they are equal, the function returns zero. +.Pp +.Fn HEAP_INIT +initialises +.Fa heap +of type +.Fa NAME +to an empty state. +.Pp +.Fn HEAP_INITIALIZER +can be used to initialise a declaration of a heap +to an empty state. +.Pp +.Fn HEAP_INSERT +inserts the element +.Fa elm +into the +.Fa heap +structure of type +.Fa NAME . +.Pp +.Fn HEAP_REMOVE +removes the element +.Fa elm +from the +.Fa heap +of type +.Fa NAME . +.Fa elm +must exist in the +.Fa heap +before it is removed. +.Pp +.Fn HEAP_FIRST +returns the lowest ordered element in the +.Fa heap +of type +.Fa NAME . +.Pp +.Fn HEAP_EXTRACT +removes the lowest ordered element from the +.Fa heap +of type +.Fa NAME +and returns it. +.Pp +.Fn HEAP_CEXTRACT +conditionally removes the lowest ordered element from the +.Fa heap +of type +.Fa NAME +and returns it if it compares lower than the +.Fa key +element. +.Sh CONTEXT +.Fn HEAP_INIT , +.Fn HEAP_INSERT, +.Fn HEAP_REMOVE , +.Fn HEAP_FIRST , +.Fn HEAP_EXTRACT , +and +.Fn HEAP_CEXTRACT +can be called during autoconf, from process context, or from interrupt +context. +.Pp +It is up to the caller to provide appropriate locking around calls to +these functions to prevent concurrent access to the relevant data structures. +.Sh RETURN VALUES +.Fn HEAP_FIRST +returns a reference to the lowest ordered element in the heap, +or +.Dv NULL +if it is empty. +.Pp +.Fn HEAP_EXTRACT +returns a reference to the lowest ordered element in the heap, +or +.Dv NULL +if it is empty. +.Pp +.Fn HEAP_CEXTRACT +returns a reference to the lowest ordered element in the heap if it compares +lower than the +.Fa key +argument, +or +.Dv NULL +if it is empty or the lowest ordered element is higher than +.Fa key . +.Sh SEE ALSO +.Xr RBT_INIT 3 , +.Xr TAILQ_INIT 3 +.Sh HISTORY +The kernel heap API first appeared in +.Ox 6.1 . Index: share/man/man9/task_add.9 =================================================================== RCS file: /cvs/src/share/man/man9/task_add.9,v retrieving revision 1.16 diff -u -p -r1.16 task_add.9 --- share/man/man9/task_add.9 14 Sep 2015 15:14:55 -0000 1.16 +++ share/man/man9/task_add.9 4 Oct 2016 00:09:00 -0000 @@ -22,7 +22,12 @@ .Nm taskq_destroy , .Nm task_set , .Nm task_add , +.Nm task_add_ticks , +.Nm task_add_sec , +.Nm task_add_msec , .Nm task_del , +.Nm task_initialized , +.Nm task_pending , .Nm TASK_INITIALIZER .Nd task queues .Sh SYNOPSIS @@ -42,9 +47,19 @@ .Fn task_add "struct taskq *tq" "struct task *t" .Ft int .Fn task_del "struct taskq *tq" "struct task *t" +.Ft int +.Fn task_add_ticks "struct taskq *tq" "struct task *t" "int ticks" +.Ft int +.Fn task_add_sec "struct taskq *tq" "struct task *t" "int secs" +.Ft int +.Fn task_add_msec "struct taskq *tq" "struct task *t" "int msecs" +.Ft int +.Fn task_initialized "struct task *t" +.Ft int +.Fn task_pending "struct task *t" +.Fn TASK_INITIALIZER "void (*fn)(void *)" "void *arg" .Vt extern struct taskq *const systq; .Vt extern struct taskq *const systqmp; -.Fn TASK_INITIALIZER "void (*fn)(void *)" "void *arg" .Sh DESCRIPTION The taskq @@ -52,7 +67,7 @@ API provides a mechanism to defer work t .Pp .Fn taskq_create allocates a taskq and a set of threads to be used to complete work -that would be inappropriate for the shared system taskq. +that would be inappropriate for the shared system taskqs. The .Fa name argument specifies the name of the kernel threads that are created @@ -60,10 +75,7 @@ to service the work on the taskq. .Fa nthreads specifies the number of threads that will be created to handle the work. .Fa ipl -specifies the highest interrupt protection level at which -.Fn task_add -and -.Fn task_del +specifies the highest interrupt protection level at which the task API will be called against the created taskq. See .Xr spl 9 @@ -85,46 +97,72 @@ It will wait till all the tasks in the w returning. Calling .Fn taskq_destroy -against the system taskq is an error and will lead to undefined +against the system taskqs is an error and will lead to undefined behaviour or a system fault. .Pp It is the responsibility of the caller to provide the -.Fn task_set , -.Fn task_add , -and -.Fn task_del -functions with pre-allocated task structures. +pre-allocated task structures for the rest of the API. .Pp .Fn task_set prepares the task structure .Fa t -to be used in future calls to -.Fn task_add -and -.Fn task_del . +to be used with a taskq. .Fa t will be prepared to call the function .Fa fn with the argument specified by .Fa arg . -Once initialised, the -.Fa t -structure can be used repeatedly in calls to -.Fn task_add -and -.Fn task_del -and does not need to be reinitialised unless the function called -and/or its argument must change. .Pp .Fn task_add -schedules the execution of the work specified by the +schedules the immediate execution of the work specified by the task structure .Fa t on the .Fa tq taskq. -The task structure must already be initialised by -.Fn task_set . +Calls to +.Fn task_add +will not reschedule a task if +.Fn task_pending +would return true, even if the task is set to in the future. +.Pp +The following add functions offer similar functionality to the +.Xr timeout_add 9 +family of functions, except the work is scheduled to run in the +specified taskq instead of in the softclock interrupt context. +If the task is already scheduled, these functions will be rescheduled +it with the new execution deadline. +.Pp +.Fn task_add_ticks +schedules the task +.Fa t +to be executed on the +.Fa tq +taskq +in the future after the number of clock ticks specified by the +.Fa ticks +argument. +See +.Xr hz 9 +for a description of clock ticks. +.Pp +.Fn task_add_sec +schedules the task +.Fa t +to be executed on the +.Fa tq +taskq +.Fa secs +seconds in the future. +.Pp +.Fn task_add_msec +schedules the task +.Fa t +to be executed on the +.Fa tq +taskq +.Fa msecs +microseconds in the future. .Pp .Fn task_del will remove the task structure @@ -133,11 +171,24 @@ from the taskq .Fa tq . If the work was already executed or has not been added to the taskq, the call will have no effect. -Calling -.Fn task_del -against a different taskq than the one given in a previous call to -.Fn task_add -is an error and will lead to undefined behaviour. +.Pp +.Fn task_initialized +returns whether the task +.Fa t +has been initialized with +.Fn TASK_INITIALIZER +or a call to +.Fn task_set . +Use of +.Fn task_initialized +is discouraged as the bit that is set to indicate task initialisation +may be set if the task is allocated from non-zeroed memory. +Tasks should instead be initialised upon allocation. +.Pp +.Fn task_pending +returns whether the task +.Fa t +is currently scheduled for execution on a taskq. .Pp The kernel provides two system taskqs: .Va systq , @@ -165,8 +216,13 @@ and can be called during autoconf, or from process context. .Fn task_set , .Fn task_add , +.Fn task_add_ticks , +.Fn task_add_sec , +.Fn task_add_msec , +.Fn task_del , +.Fn task_initialiazed , and -.Fn task_del +.Fn task_pending can be called during autoconf, from process context, or from interrupt context. .Sh RETURN VALUES .Fn taskq_create @@ -174,7 +230,11 @@ returns a pointer to a taskq structure o .Dv NULL on failure. .Pp -.Fn task_add +.Fn task_add , +.Fn task_add_ticks , +.Fn task_add_sec , +and +.Fn task_add_msec will return 1 if the task .Fa t was added to the taskq @@ -187,8 +247,20 @@ will return 1 if the task was removed from the taskq .Fa tq or 0 if the task was not already on the queue. +.Pp +.Fn task_initialized +will return non-zero if the task was initialized with +.Fn task_set +or +.Fn TASK_INITIALIZER , +.Pp +.Fn task_pending +will return non-zero if the task is scheduled for execution on a taskq, +otherwise 0. .Sh SEE ALSO .Xr autoconf 9 , +.Xr hz 9 +.Xr timeout_add 9 , .Xr spl 9 .Sh HISTORY The task API was originally written by Index: sys/kern/kern_task.c =================================================================== RCS file: /cvs/src/sys/kern/kern_task.c,v retrieving revision 1.18 diff -u -p -r1.18 kern_task.c --- sys/kern/kern_task.c 11 Aug 2016 01:32:31 -0000 1.18 +++ sys/kern/kern_task.c 4 Oct 2016 00:09:00 -0000 @@ -21,9 +21,11 @@ #include #include #include +#include + #include -#define TASK_ONQUEUE 1 +HEAP_HEAD(task_heap); struct taskq { enum { @@ -37,9 +39,12 @@ struct taskq { const char *tq_name; struct mutex tq_mtx; - struct task_list tq_worklist; + struct task_heap tq_worklist; + struct timeout tq_schedule; }; +void taskq_wakeup(void *); + struct taskq taskq_sys = { TQ_S_CREATED, 0, @@ -47,7 +52,8 @@ struct taskq taskq_sys = { 0, "systq", MUTEX_INITIALIZER(IPL_HIGH), - TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist) + HEAP_INITIALIZER(), + TIMEOUT_INITIALIZER(taskq_wakeup, &taskq_sys) }; struct taskq taskq_sys_mp = { @@ -57,7 +63,8 @@ struct taskq taskq_sys_mp = { TASKQ_MPSAFE, "systqmp", MUTEX_INITIALIZER(IPL_HIGH), - TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist) + HEAP_INITIALIZER(), + TIMEOUT_INITIALIZER(taskq_wakeup, &taskq_sys_mp) }; typedef int (*sleepfn)(const volatile void *, struct mutex *, int, @@ -73,6 +80,8 @@ int taskq_sleep(const volatile void *, s int taskq_next_work(struct taskq *, struct task *, sleepfn); void taskq_thread(void *); +HEAP_PROTOTYPE(task_heap, task); + void taskq_init(void) { @@ -97,7 +106,8 @@ taskq_create(const char *name, unsigned tq->tq_flags = flags; mtx_init(&tq->tq_mtx, ipl); - TAILQ_INIT(&tq->tq_worklist); + HEAP_INIT(task_heap, &tq->tq_worklist); + timeout_set(&tq->tq_schedule, taskq_wakeup, tq); /* try to create a thread to guarantee that tasks will be serviced */ kthread_create_deferred(taskq_create_thread, tq); @@ -183,26 +193,47 @@ task_set(struct task *t, void (*fn)(void { t->t_func = fn; t->t_arg = arg; - t->t_flags = 0; + t->t_flags = TASK_INITIALIZED; } int task_add(struct taskq *tq, struct task *w) { + if (task_pending(w)) + return (0); + + return (task_add_ticks(tq, w, 0)); +} + +int +task_add_ticks(struct taskq *tq, struct task *w, int to) +{ + extern int ticks; int rv = 0; + int diff; - if (ISSET(w->t_flags, TASK_ONQUEUE)) - return (0); + if (to > INT_MAX) + to = INT_MAX; mtx_enter(&tq->tq_mtx); - if (!ISSET(w->t_flags, TASK_ONQUEUE)) { + + if (task_pending(w)) + HEAP_REMOVE(task_heap, &tq->tq_worklist, w); + else rv = 1; - SET(w->t_flags, TASK_ONQUEUE); - TAILQ_INSERT_TAIL(&tq->tq_worklist, w, t_entry); - } + + SET(w->t_flags, TASK_ONQUEUE); + w->t_deadline = ticks + to; + HEAP_INSERT(task_heap, &tq->tq_worklist, w); + + w = HEAP_FIRST(task_heap, &tq->tq_worklist); + diff = w->t_deadline - ticks; + if (diff > 0) + timeout_add(&tq->tq_schedule, diff); + mtx_leave(&tq->tq_mtx); - if (rv) + if (diff <= 0) wakeup_one(tq); return (rv); @@ -211,17 +242,19 @@ task_add(struct taskq *tq, struct task * int task_del(struct taskq *tq, struct task *w) { - int rv = 0; + int rv = 1; - if (!ISSET(w->t_flags, TASK_ONQUEUE)) + if (!task_pending(w)) return (0); mtx_enter(&tq->tq_mtx); - if (ISSET(w->t_flags, TASK_ONQUEUE)) { - rv = 1; + + if (task_pending(w)) { CLR(w->t_flags, TASK_ONQUEUE); - TAILQ_REMOVE(&tq->tq_worklist, w, t_entry); - } + HEAP_REMOVE(task_heap, &tq->tq_worklist, w); + } else + rv = 0; + mtx_leave(&tq->tq_mtx); return (rv); @@ -241,13 +274,26 @@ taskq_sleep(const volatile void *ident, return (tmo); } +static inline struct task * +taskq_extract(struct task_heap *heap) +{ + extern int ticks; + struct task key; + + key.t_deadline = ticks; + return (HEAP_CEXTRACT(task_heap, heap, &key)); +} + int taskq_next_work(struct taskq *tq, struct task *work, sleepfn tqsleep) { + extern int ticks; struct task *next; + int diff = -1; mtx_enter(&tq->tq_mtx); - while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) { + + while ((next = taskq_extract(&tq->tq_worklist)) == NULL) { if (tq->tq_state != TQ_S_RUNNING) { mtx_leave(&tq->tq_mtx); return (0); @@ -256,15 +302,21 @@ taskq_next_work(struct taskq *tq, struct tqsleep(tq, &tq->tq_mtx, PWAIT, "bored", 0); } - TAILQ_REMOVE(&tq->tq_worklist, next, t_entry); CLR(next->t_flags, TASK_ONQUEUE); - *work = *next; /* copy to caller to avoid races */ - next = TAILQ_FIRST(&tq->tq_worklist); + next = HEAP_FIRST(task_heap, &tq->tq_worklist); + if (next != NULL) { + diff = next->t_deadline - ticks; + if (diff > 0) + timeout_add(&tq->tq_schedule, diff); + else + diff = 0; + } + mtx_leave(&tq->tq_mtx); - if (next != NULL && tq->tq_nthreads > 1) + if (diff == 0 && tq->tq_nthreads > 1) wakeup_one(tq); return (1); @@ -306,3 +358,43 @@ taskq_thread(void *xtq) kthread_exit(0); } + +int +task_add_sec(struct taskq *tq, struct task *t, int secs) +{ + extern int hz; + uint64_t to_ticks; + + to_ticks = (uint64_t)hz * secs; + if (to_ticks > INT_MAX) + to_ticks = INT_MAX; + + return (task_add_ticks(tq, t, (int)to_ticks)); +} + +int +task_add_msec(struct taskq *tq, struct task *t, int msecs) +{ + extern int tick; + uint64_t to_ticks; + + to_ticks = (uint64_t)msecs * 1000 / tick; + if (to_ticks > INT_MAX) + to_ticks = INT_MAX; + + return (task_add_ticks(tq, t, (int)to_ticks)); +} + +void +taskq_wakeup(void *ident) +{ + wakeup_one(ident); +} + +static inline int +task_cmp(const struct task *a, const struct task *b) +{ + return (a->t_deadline - b->t_deadline); +} + +HEAP_GENERATE(task_heap, task, _t_entry._t_heap, task_cmp); Index: sys/kern/subr_tree.c =================================================================== RCS file: /cvs/src/sys/kern/subr_tree.c,v retrieving revision 1.6 diff -u -p -r1.6 subr_tree.c --- sys/kern/subr_tree.c 20 Sep 2016 01:11:27 -0000 1.6 +++ sys/kern/subr_tree.c 27 Sep 2016 04:22:52 -0000 @@ -610,3 +610,181 @@ _rb_check(const struct rb_type *t, void (unsigned long)RBE_LEFT(rbe) == poison && (unsigned long)RBE_RIGHT(rbe) == poison); } + +static inline struct heap_entry * +heap_n2e(const struct heap_type *t, void *node) +{ + caddr_t addr = (caddr_t)node; + + return ((struct heap_entry *)(addr + t->t_offset)); +} + +static inline void * +heap_e2n(const struct heap_type *t, struct heap_entry *rbe) +{ + caddr_t addr = (caddr_t)rbe; + + return ((void *)(addr - t->t_offset)); +} + +static struct heap_entry * +_heap_merge(const struct heap_type *t, + struct heap_entry *he1, struct heap_entry *he2) +{ + struct heap_entry *hi, *lo; + struct heap_entry *child; + + if (he1 == NULL) + return (he2); + if (he2 == NULL) + return (he1); + + if (t->t_compare(he1, he2) >= 0) { + hi = he1; + lo = he2; + } else { + lo = he1; + hi = he2; + } + + child = lo->he_child; + + hi->he_left = lo; + hi->he_nextsibling = child; + if (child != NULL) + child->he_left = hi; + lo->he_child = hi; + lo->he_left = NULL; + lo->he_nextsibling = NULL; + + return (lo); +} + +static inline void +_heap_sibling_remove(struct heap_entry *he) +{ + if (he->he_left == NULL) + return; + + if (he->he_left->he_child == he) { + if ((he->he_left->he_child = he->he_nextsibling) != NULL) + he->he_nextsibling->he_left = he->he_left; + } else { + if ((he->he_left->he_nextsibling = he->he_nextsibling) != NULL) + he->he_nextsibling->he_left = he->he_left; + } + + he->he_left = NULL; + he->he_nextsibling = NULL; +} + +static inline struct heap_entry * +_heap_2pass_merge(const struct heap_type *t, struct heap_entry *root) +{ + struct heap_entry *node, *next = NULL; + struct heap_entry *tmp, *list = NULL; + + node = root->he_child; + if (node == NULL) + return (NULL); + + root->he_child = NULL; + + /* first pass */ + for (next = node->he_nextsibling; next != NULL; + next = (node != NULL ? node->he_nextsibling : NULL)) { + tmp = next->he_nextsibling; + node = _heap_merge(t, node, next); + + /* insert head */ + node->he_nextsibling = list; + list = node; + node = tmp; + } + + /* odd child case */ + if (node != NULL) { + node->he_nextsibling = list; + list = node; + } + + /* second pass */ + while (list->he_nextsibling != NULL) { + tmp = list->he_nextsibling->he_nextsibling; + list = _heap_merge(t, list, list->he_nextsibling); + list->he_nextsibling = tmp; + } + + list->he_left = NULL; + list->he_nextsibling = NULL; + + return (list); +} + +void +_heap_insert(const struct heap_type *t, struct heap *h, void *node) +{ + struct heap_entry *he = heap_n2e(t, node); + + he->he_left = NULL; + he->he_child = NULL; + he->he_nextsibling = NULL; + + h->h_root = _heap_merge(t, h->h_root, he); +} + +void +_heap_remove(const struct heap_type *t, struct heap *h, void *node) +{ + struct heap_entry *he = heap_n2e(t, node); + + if (he->he_left == NULL) { + _heap_extract(t, h); + return; + } + + _heap_sibling_remove(he); + h->h_root = _heap_merge(t, h->h_root, _heap_2pass_merge(t, he)); +} + +void * +_heap_first(const struct heap_type *t, struct heap *h) +{ + struct heap_entry *first = h->h_root; + + if (first == NULL) + return (NULL); + + return (heap_e2n(t, first)); +} + +void * +_heap_extract(const struct heap_type *t, struct heap *h) +{ + struct heap_entry *first = h->h_root; + + if (first == NULL) + return (NULL); + + h->h_root = _heap_2pass_merge(t, first); + + return (heap_e2n(t, first)); +} + +void * +_heap_cextract(const struct heap_type *t, struct heap *h, const void *key) +{ + struct heap_entry *first = h->h_root; + void *node; + + if (first == NULL) + return (NULL); + + node = heap_e2n(t, first); + if (t->t_compare(node, key) > 0) + return (NULL); + + h->h_root = _heap_2pass_merge(t, first); + + return (node); +} Index: sys/net/if_pflow.c =================================================================== RCS file: /cvs/src/sys/net/if_pflow.c,v retrieving revision 1.61 diff -u -p -r1.61 if_pflow.c --- sys/net/if_pflow.c 29 Apr 2016 08:55:03 -0000 1.61 +++ sys/net/if_pflow.c 4 Oct 2016 00:09:00 -0000 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -258,12 +259,12 @@ pflow_clone_destroy(struct ifnet *ifp) error = 0; s = splnet(); - if (timeout_initialized(&sc->sc_tmo)) - timeout_del(&sc->sc_tmo); - if (timeout_initialized(&sc->sc_tmo6)) - timeout_del(&sc->sc_tmo6); - if (timeout_initialized(&sc->sc_tmo_tmpl)) - timeout_del(&sc->sc_tmo_tmpl); + if (task_initialized(&sc->sc_tmo)) + task_del(systq, &sc->sc_tmo); + if (task_initialized(&sc->sc_tmo6)) + task_del(systq, &sc->sc_tmo6); + if (task_initialized(&sc->sc_tmo_tmpl)) + task_del(systq, &sc->sc_tmo_tmpl); pflow_flush(sc); m_freem(sc->send_nam); if (sc->so != NULL) { @@ -543,22 +544,22 @@ pflow_init_timeouts(struct pflow_softc * { switch (sc->sc_version) { case PFLOW_PROTO_5: - if (timeout_initialized(&sc->sc_tmo6)) - timeout_del(&sc->sc_tmo6); - if (timeout_initialized(&sc->sc_tmo_tmpl)) - timeout_del(&sc->sc_tmo_tmpl); - if (!timeout_initialized(&sc->sc_tmo)) - timeout_set(&sc->sc_tmo, pflow_timeout, sc); + if (task_initialized(&sc->sc_tmo6)) + task_del(systq, &sc->sc_tmo6); + if (task_initialized(&sc->sc_tmo_tmpl)) + task_del(systq, &sc->sc_tmo_tmpl); + if (!task_initialized(&sc->sc_tmo)) + task_set(&sc->sc_tmo, pflow_timeout, sc); break; case PFLOW_PROTO_10: - if (!timeout_initialized(&sc->sc_tmo_tmpl)) - timeout_set(&sc->sc_tmo_tmpl, pflow_timeout_tmpl, sc); - if (!timeout_initialized(&sc->sc_tmo)) - timeout_set(&sc->sc_tmo, pflow_timeout, sc); - if (!timeout_initialized(&sc->sc_tmo6)) - timeout_set(&sc->sc_tmo6, pflow_timeout6, sc); + if (!task_initialized(&sc->sc_tmo_tmpl)) + task_set(&sc->sc_tmo_tmpl, pflow_timeout_tmpl, sc); + if (!task_initialized(&sc->sc_tmo)) + task_set(&sc->sc_tmo, pflow_timeout, sc); + if (!task_initialized(&sc->sc_tmo6)) + task_set(&sc->sc_tmo6, pflow_timeout6, sc); - timeout_add_sec(&sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT); + task_add_sec(systq, &sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT); break; default: /* NOTREACHED */ break; @@ -649,7 +650,7 @@ pflow_get_mbuf(struct pflow_softc *sc, u m_copyback(m, 0, PFLOW_HDRLEN, &h, M_NOWAIT); sc->sc_count = 0; - timeout_add_sec(&sc->sc_tmo, PFLOW_TIMEOUT); + task_add_sec(systq, &sc->sc_tmo, PFLOW_TIMEOUT); } else { /* populate pflow_set_header */ set_hdr.set_length = 0; @@ -891,7 +892,7 @@ copy_flow_ipfix_4_to_m(struct pflow_ipfi return (ENOBUFS); } sc->sc_count4 = 0; - timeout_add_sec(&sc->sc_tmo, PFLOW_TIMEOUT); + task_add_sec(systq, &sc->sc_tmo, PFLOW_TIMEOUT); } m_copyback(sc->sc_mbuf, PFLOW_SET_HDRLEN + (sc->sc_count4 * sizeof(struct pflow_ipfix_flow4)), @@ -921,7 +922,7 @@ copy_flow_ipfix_6_to_m(struct pflow_ipfi return (ENOBUFS); } sc->sc_count6 = 0; - timeout_add_sec(&sc->sc_tmo6, PFLOW_TIMEOUT); + task_add_sec(systq, &sc->sc_tmo6, PFLOW_TIMEOUT); } m_copyback(sc->sc_mbuf6, PFLOW_SET_HDRLEN + (sc->sc_count6 * sizeof(struct pflow_ipfix_flow6)), @@ -1076,7 +1077,7 @@ pflow_sendout_v5(struct pflow_softc *sc) struct ifnet *ifp = &sc->sc_if; struct timespec tv; - timeout_del(&sc->sc_tmo); + task_del(systq, &sc->sc_tmo); if (m == NULL) return (0); @@ -1115,7 +1116,7 @@ pflow_sendout_ipfix(struct pflow_softc * switch (af) { case AF_INET: m = sc->sc_mbuf; - timeout_del(&sc->sc_tmo); + task_del(systq, &sc->sc_tmo); if (m == NULL) return (0); sc->sc_mbuf = NULL; @@ -1125,7 +1126,7 @@ pflow_sendout_ipfix(struct pflow_softc * break; case AF_INET6: m = sc->sc_mbuf6; - timeout_del(&sc->sc_tmo6); + task_del(systq, &sc->sc_tmo6); if (m == NULL) return (0); sc->sc_mbuf6 = NULL; @@ -1170,7 +1171,7 @@ pflow_sendout_ipfix_tmpl(struct pflow_so struct pflow_v10_header *h10; struct ifnet *ifp = &sc->sc_if; - timeout_del(&sc->sc_tmo_tmpl); + task_del(systq, &sc->sc_tmo_tmpl); if (!(ifp->if_flags & IFF_RUNNING)) { return (0); @@ -1199,7 +1200,7 @@ pflow_sendout_ipfix_tmpl(struct pflow_so h10->flow_sequence = htonl(sc->sc_sequence); h10->observation_dom = htonl(PFLOW_ENGINE_TYPE); - timeout_add_sec(&sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT); + task_add_sec(systq, &sc->sc_tmo_tmpl, PFLOW_TMPL_TIMEOUT); return (pflow_sendout_mbuf(sc, m)); } Index: sys/net/if_pflow.h =================================================================== RCS file: /cvs/src/sys/net/if_pflow.h,v retrieving revision 1.14 diff -u -p -r1.14 if_pflow.h --- sys/net/if_pflow.h 3 Oct 2015 10:44:23 -0000 1.14 +++ sys/net/if_pflow.h 4 Oct 2016 00:09:00 -0000 @@ -181,9 +181,9 @@ struct pflow_softc { unsigned int sc_maxcount6; u_int64_t sc_gcounter; u_int32_t sc_sequence; - struct timeout sc_tmo; - struct timeout sc_tmo6; - struct timeout sc_tmo_tmpl; + struct task sc_tmo; + struct task sc_tmo_tmpl; + struct task sc_tmo6; struct socket *so; struct mbuf *send_nam; struct sockaddr *sc_flowsrc; Index: sys/net/if_pfsync.c =================================================================== RCS file: /cvs/src/sys/net/if_pfsync.c,v retrieving revision 1.234 diff -u -p -r1.234 if_pfsync.c --- sys/net/if_pfsync.c 27 Sep 2016 04:57:17 -0000 1.234 +++ sys/net/if_pfsync.c 4 Oct 2016 00:09:00 -0000 @@ -49,7 +49,7 @@ #include #include #include -#include +#include #include #include #include @@ -185,7 +185,7 @@ struct pfsync_deferral { TAILQ_ENTRY(pfsync_deferral) pd_entry; struct pf_state *pd_st; struct mbuf *pd_m; - struct timeout pd_tmo; + struct task pd_tmo; }; TAILQ_HEAD(pfsync_deferrals, pfsync_deferral); @@ -224,18 +224,18 @@ struct pfsync_softc { u_int32_t sc_ureq_sent; int sc_bulk_tries; - struct timeout sc_bulkfail_tmo; + struct task sc_bulkfail_tmo; u_int32_t sc_ureq_received; struct pf_state *sc_bulk_next; struct pf_state *sc_bulk_last; - struct timeout sc_bulk_tmo; + struct task sc_bulk_tmo; TAILQ_HEAD(, tdb) sc_tdb_q; void *sc_lhcookie; - struct timeout sc_tmo; + struct task sc_tmo; }; struct pfsync_softc *pfsyncif = NULL; @@ -328,9 +328,9 @@ pfsync_clone_create(struct if_clone *ifc IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_hdrlen = sizeof(struct pfsync_header); ifp->if_mtu = ETHERMTU; - timeout_set(&sc->sc_tmo, pfsync_timeout, sc); - timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); - timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); + task_set(&sc->sc_tmo, pfsync_timeout, sc); + task_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); + task_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); if_attach(ifp); if_alloc_sadl(ifp); @@ -353,10 +353,12 @@ pfsync_clone_destroy(struct ifnet *ifp) { struct pfsync_softc *sc = ifp->if_softc; struct pfsync_deferral *pd; + int s; - timeout_del(&sc->sc_bulkfail_tmo); - timeout_del(&sc->sc_bulk_tmo); - timeout_del(&sc->sc_tmo); + s = splsoftnet(); + task_del(systq, &sc->sc_bulkfail_tmo); + task_del(systq, &sc->sc_bulk_tmo); + task_del(systq, &sc->sc_tmo); #if NCARP > 0 if (!pfsync_sync_ok) carp_group_demote_adj(&sc->sc_if, -1, "pfsync destroy"); @@ -373,7 +375,7 @@ pfsync_clone_destroy(struct ifnet *ifp) while (sc->sc_deferred > 0) { pd = TAILQ_FIRST(&sc->sc_deferrals); - timeout_del(&pd->pd_tmo); + task_del(systq, &pd->pd_tmo); pfsync_undefer(pd, 0); } @@ -414,7 +416,7 @@ pfsync_syncdev_state(void *arg) } /* drop everything */ - timeout_del(&sc->sc_tmo); + task_del(systq, &sc->sc_tmo); pfsync_drop(sc); pfsync_cancel_full_update(sc); @@ -747,8 +749,8 @@ pfsync_in_clr(caddr_t buf, int len, int (kif = pfi_kif_find(clr->ifname)) == NULL) continue; - for (st = RB_MIN(pf_state_tree_id, &tree_id); st; st = nexts) { - nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); + for (st = RBT_MIN(pf_state_tree_id, &tree_id); st; st = nexts) { + nexts = RBT_NEXT(pf_state_tree_id, st); if (st->creatorid == creatorid && ((kif && st->kif == kif) || !kif)) { SET(st->state_flags, PFSTATE_NOSYNC); @@ -1103,7 +1105,7 @@ pfsync_in_bus(caddr_t buf, int len, int switch (bus->status) { case PFSYNC_BUS_START: - timeout_add(&sc->sc_bulkfail_tmo, 4 * hz + + task_add_ticks(systq, &sc->sc_bulkfail_tmo, 4 * hz + pf_pool_limits[PF_LIMIT_STATES].limit / ((sc->sc_if.if_mtu - PFSYNC_MINPKT) / sizeof(struct pfsync_state))); @@ -1116,7 +1118,7 @@ pfsync_in_bus(caddr_t buf, int len, int /* that's it, we're happy */ sc->sc_ureq_sent = 0; sc->sc_bulk_tries = 0; - timeout_del(&sc->sc_bulkfail_tmo); + task_del(systq, &sc->sc_bulkfail_tmo); #if NCARP > 0 if (!pfsync_sync_ok) carp_group_demote_adj(&sc->sc_if, -1, @@ -1262,7 +1264,7 @@ pfsyncioctl(struct ifnet *ifp, u_long cm ifp->if_flags &= ~IFF_RUNNING; /* drop everything */ - timeout_del(&sc->sc_tmo); + task_del(systq, &sc->sc_tmo); pfsync_drop(sc); pfsync_cancel_full_update(sc); @@ -1681,7 +1683,7 @@ pfsync_insert_state(struct pf_state *st) #endif if (sc->sc_len == PFSYNC_MINPKT) - timeout_add_sec(&sc->sc_tmo, 1); + task_add_ticks(systq, &sc->sc_tmo, 1); pfsync_q_ins(st, PFSYNC_S_INS); @@ -1703,7 +1705,7 @@ pfsync_defer(struct pf_state *st, struct if (sc->sc_deferred >= 128) { pd = TAILQ_FIRST(&sc->sc_deferrals); - if (timeout_del(&pd->pd_tmo)) + if (task_del(systq, &pd->pd_tmo)) pfsync_undefer(pd, 0); } @@ -1720,8 +1722,8 @@ pfsync_defer(struct pf_state *st, struct sc->sc_deferred++; TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); - timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd); - timeout_add_msec(&pd->pd_tmo, 20); + task_set(&pd->pd_tmo, pfsync_defer_tmo, pd); + task_add_msec(systq, &pd->pd_tmo, 20); schednetisr(NETISR_PFSYNC); @@ -1796,7 +1798,7 @@ pfsync_deferred(struct pf_state *st, int TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { if (pd->pd_st == st) { - if (timeout_del(&pd->pd_tmo)) + if (task_del(systq, &pd->pd_tmo)) pfsync_undefer(pd, drop); return; } @@ -1825,7 +1827,7 @@ pfsync_update_state(struct pf_state *st) } if (sc->sc_len == PFSYNC_MINPKT) - timeout_add_sec(&sc->sc_tmo, 1); + task_add_sec(systq, &sc->sc_tmo, 1); switch (st->sync_state) { case PFSYNC_S_UPD_C: @@ -1859,8 +1861,8 @@ pfsync_update_state(struct pf_state *st) void pfsync_cancel_full_update(struct pfsync_softc *sc) { - if (timeout_pending(&sc->sc_bulkfail_tmo) || - timeout_pending(&sc->sc_bulk_tmo)) { + if (task_pending(&sc->sc_bulkfail_tmo) || + task_pending(&sc->sc_bulk_tmo)) { #if NCARP > 0 if (!pfsync_sync_ok) carp_group_demote_adj(&sc->sc_if, -1, @@ -1874,8 +1876,8 @@ pfsync_cancel_full_update(struct pfsync_ pfsync_sync_ok = 1; DPFPRINTF(LOG_INFO, "cancelling bulk update"); } - timeout_del(&sc->sc_bulkfail_tmo); - timeout_del(&sc->sc_bulk_tmo); + task_del(systq, &sc->sc_bulkfail_tmo); + task_del(systq, &sc->sc_bulk_tmo); sc->sc_bulk_next = NULL; sc->sc_bulk_last = NULL; sc->sc_ureq_sent = 0; @@ -1895,7 +1897,7 @@ pfsync_request_full_update(struct pfsync #endif pfsync_sync_ok = 0; DPFPRINTF(LOG_INFO, "requesting bulk update"); - timeout_add(&sc->sc_bulkfail_tmo, 4 * hz + + task_add_ticks(systq, &sc->sc_bulkfail_tmo, 4 * hz + pf_pool_limits[PF_LIMIT_STATES].limit / ((sc->sc_if.if_mtu - PFSYNC_MINPKT) / sizeof(struct pfsync_state))); @@ -1994,7 +1996,7 @@ pfsync_delete_state(struct pf_state *st) } if (sc->sc_len == PFSYNC_MINPKT) - timeout_add_sec(&sc->sc_tmo, 1); + task_add_sec(systq, &sc->sc_tmo, 1); switch (st->sync_state) { case PFSYNC_S_INS: @@ -2187,7 +2189,7 @@ pfsync_bulk_start(void) sc->sc_bulk_last = sc->sc_bulk_next; pfsync_bulk_status(PFSYNC_BUS_START); - timeout_add(&sc->sc_bulk_tmo, 0); + task_add(systq, &sc->sc_bulk_tmo); } } @@ -2227,7 +2229,7 @@ pfsync_bulk_update(void *arg) sizeof(struct pfsync_state)) { /* we've filled a packet */ sc->sc_bulk_next = st; - timeout_add(&sc->sc_bulk_tmo, 1); + task_add_ticks(systq, &sc->sc_bulk_tmo, 1); break; } } @@ -2268,7 +2270,7 @@ pfsync_bulk_fail(void *arg) if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { /* Try again */ - timeout_add_sec(&sc->sc_bulkfail_tmo, 5); + task_add_sec(systq, &sc->sc_bulkfail_tmo, 5); pfsync_request_update(0, 0); } else { /* Pretend like the transfer was ok */ Index: sys/net/ifq.c =================================================================== RCS file: /cvs/src/sys/net/ifq.c,v retrieving revision 1.4 diff -u -p -r1.4 ifq.c --- sys/net/ifq.c 29 Dec 2015 12:35:43 -0000 1.4 +++ sys/net/ifq.c 4 Oct 2016 00:09:00 -0000 @@ -67,8 +67,6 @@ void ifq_start_task(void *); void ifq_restart_task(void *); void ifq_barrier_task(void *); -#define TASK_ONQUEUE 0x1 - void ifq_serialize(struct ifqueue *ifq, struct task *t) { Index: sys/netinet/ip_carp.c =================================================================== RCS file: /cvs/src/sys/netinet/ip_carp.c,v retrieving revision 1.293 diff -u -p -r1.293 ip_carp.c --- sys/netinet/ip_carp.c 25 Jul 2016 16:44:04 -0000 1.293 +++ sys/netinet/ip_carp.c 4 Oct 2016 00:09:00 -0000 @@ -41,7 +41,7 @@ #include #include #include -#include +#include #include #include #include @@ -107,9 +107,9 @@ struct carp_vhost_entry { int vhid; int advskew; enum { INIT = 0, BACKUP, MASTER } state; - struct timeout ad_tmo; /* advertisement timeout */ - struct timeout md_tmo; /* master down timeout */ - struct timeout md6_tmo; /* master down timeout */ + struct task ad_tmo; /* advertisement timeout */ + struct task md_tmo; /* master down timeout */ + struct task md6_tmo; /* master down timeout */ u_int64_t vhe_replay_cookie; @@ -675,7 +675,7 @@ carp_proto_input_c(struct ifnet *ifp, st timercmp(&sc_tv, &ch_tv, ==)) && (ch->carp_demote <= carp_group_demote_count(sc))) || ch->carp_demote < carp_group_demote_count(sc)) { - timeout_del(&vhe->ad_tmo); + task_del(systq, &vhe->ad_tmo); carp_set_state(vhe, BACKUP); carp_setrun(vhe, 0); } @@ -831,9 +831,9 @@ carp_new_vhost(struct carp_softc *sc, in vhe->vhid = vhid; vhe->advskew = advskew; vhe->state = INIT; - timeout_set(&vhe->ad_tmo, carp_send_ad, vhe); - timeout_set(&vhe->md_tmo, carp_master_down, vhe); - timeout_set(&vhe->md6_tmo, carp_master_down, vhe); + task_set(&vhe->ad_tmo, carp_send_ad, vhe); + task_set(&vhe->md_tmo, carp_master_down, vhe); + task_set(&vhe->md6_tmo, carp_master_down, vhe); KERNEL_ASSERT_LOCKED(); /* touching carp_vhosts */ @@ -879,9 +879,9 @@ carp_del_all_timeouts(struct carp_softc KERNEL_ASSERT_LOCKED(); /* touching carp_vhosts */ SRPL_FOREACH_LOCKED(vhe, &sc->carp_vhosts, vhost_entries) { - timeout_del(&vhe->ad_tmo); - timeout_del(&vhe->md_tmo); - timeout_del(&vhe->md6_tmo); + task_del(systq, &vhe->ad_tmo); + task_del(systq, &vhe->md_tmo); + task_del(systq, &vhe->md6_tmo); } } @@ -1248,7 +1248,7 @@ retry_later: sc->cur_vhe = NULL; splx(s); if (advbase != 255 || advskew != 255) - timeout_add(&vhe->ad_tmo, tvtohz(&tv)); + task_add_ticks(systq, &vhe->ad_tmo, tvtohz(&tv)); } /* @@ -1580,7 +1580,7 @@ carp_setrun(struct carp_vhost_entry *vhe carp_setrun(vhe, 0); break; case BACKUP: - timeout_del(&vhe->ad_tmo); + task_del(systq, &vhe->ad_tmo); tv.tv_sec = 3 * sc->sc_advbase; if (sc->sc_advbase == 0 && vhe->advskew == 0) tv.tv_usec = 3 * 1000000 / 256; @@ -1592,18 +1592,22 @@ carp_setrun(struct carp_vhost_entry *vhe sc->sc_delayed_arp = -1; switch (af) { case AF_INET: - timeout_add(&vhe->md_tmo, tvtohz(&tv)); + task_add_ticks(systq, &vhe->md_tmo, tvtohz(&tv)); break; #ifdef INET6 case AF_INET6: - timeout_add(&vhe->md6_tmo, tvtohz(&tv)); + task_add_ticks(systq, &vhe->md6_tmo, tvtohz(&tv)); break; #endif /* INET6 */ default: - if (sc->sc_naddrs) - timeout_add(&vhe->md_tmo, tvtohz(&tv)); - if (sc->sc_naddrs6) - timeout_add(&vhe->md6_tmo, tvtohz(&tv)); + if (sc->sc_naddrs) { + task_add_ticks(systq, &vhe->md_tmo, + tvtohz(&tv)); + } + if (sc->sc_naddrs6) { + task_add_ticks(systq, &vhe->md6_tmo, + tvtohz(&tv)); + } break; } break; @@ -1613,7 +1617,7 @@ carp_setrun(struct carp_vhost_entry *vhe tv.tv_usec = 1 * 1000000 / 256; else tv.tv_usec = vhe->advskew * 1000000 / 256; - timeout_add(&vhe->ad_tmo, tvtohz(&tv)); + task_add_ticks(systq, &vhe->ad_tmo, tvtohz(&tv)); break; } } @@ -2037,7 +2041,7 @@ carp_ioctl(struct ifnet *ifp, u_long cmd if (vhe->state != INIT && carpr.carpr_state != vhe->state) { switch (carpr.carpr_state) { case BACKUP: - timeout_del(&vhe->ad_tmo); + task_del(systq, &vhe->ad_tmo); carp_set_state_all(sc, BACKUP); carp_setrun_all(sc, 0); break; Index: sys/netinet/tcp_subr.c =================================================================== RCS file: /cvs/src/sys/netinet/tcp_subr.c,v retrieving revision 1.156 diff -u -p -r1.156 tcp_subr.c --- sys/netinet/tcp_subr.c 24 Sep 2016 14:51:37 -0000 1.156 +++ sys/netinet/tcp_subr.c 4 Oct 2016 00:09:00 -0000 @@ -73,12 +73,13 @@ #include #include #include -#include +#include #include #include #include #include +#include #include #include Index: sys/netinet/tcp_timer.h =================================================================== RCS file: /cvs/src/sys/netinet/tcp_timer.h,v retrieving revision 1.13 diff -u -p -r1.13 tcp_timer.h --- sys/netinet/tcp_timer.h 6 Jul 2011 23:44:20 -0000 1.13 +++ sys/netinet/tcp_timer.h 4 Oct 2016 00:09:00 -0000 @@ -116,16 +116,17 @@ const char *tcptimers[] = * Init, arm, disarm, and test TCP timers. */ #define TCP_TIMER_INIT(tp, timer) \ - timeout_set(&(tp)->t_timer[(timer)], tcp_timer_funcs[(timer)], tp) + task_set(&(tp)->t_timer[(timer)], tcp_timer_funcs[(timer)], tp) #define TCP_TIMER_ARM(tp, timer, nticks) \ - timeout_add(&(tp)->t_timer[(timer)], (nticks) * (hz / PR_SLOWHZ)) + task_add_ticks(systq, &(tp)->t_timer[(timer)], \ + (nticks) * (hz / PR_SLOWHZ)) #define TCP_TIMER_DISARM(tp, timer) \ - timeout_del(&(tp)->t_timer[(timer)]) + task_del(systq, &(tp)->t_timer[(timer)]) #define TCP_TIMER_ISARMED(tp, timer) \ - timeout_pending(&(tp)->t_timer[(timer)]) + task_pending(&(tp)->t_timer[(timer)]) /* * Force a time value to be in a certain range. Index: sys/netinet/tcp_var.h =================================================================== RCS file: /cvs/src/sys/netinet/tcp_var.h,v retrieving revision 1.115 diff -u -p -r1.115 tcp_var.h --- sys/netinet/tcp_var.h 20 Jul 2016 19:57:53 -0000 1.115 +++ sys/netinet/tcp_var.h 4 Oct 2016 00:09:00 -0000 @@ -35,7 +35,7 @@ #ifndef _NETINET_TCP_VAR_H_ #define _NETINET_TCP_VAR_H_ -#include +#include /* * Kernel variables for tcp. @@ -69,7 +69,7 @@ struct tcpqent { */ struct tcpcb { struct tcpqehead t_segq; /* sequencing queue */ - struct timeout t_timer[TCPT_NTIMERS]; /* tcp timers */ + struct task t_timer[TCPT_NTIMERS]; /* tcp timers */ short t_state; /* state of this connection */ short t_rxtshift; /* log(2) of rexmt exp. backoff */ short t_rxtcur; /* current retransmit value */ @@ -103,7 +103,7 @@ struct tcpcb { struct mbuf *t_template; /* skeletal packet for transmit */ struct inpcb *t_inpcb; /* back pointer to internet pcb */ - struct timeout t_delack_to; /* delayed ACK callback */ + struct task t_delack_to; /* delayed ACK callback */ /* * The following fields are used as in the protocol specification. * See RFC793, Dec. 1981, page 21. @@ -217,10 +217,10 @@ extern int tcp_delack_ticks; void tcp_delack(void *); #define TCP_INIT_DELACK(tp) \ - timeout_set(&(tp)->t_delack_to, tcp_delack, tp) + task_set(&(tp)->t_delack_to, tcp_delack, tp) #define TCP_RESTART_DELACK(tp) \ - timeout_add(&(tp)->t_delack_to, tcp_delack_ticks) + task_add_ticks(systq, &(tp)->t_delack_to, tcp_delack_ticks) #define TCP_SET_DELACK(tp) \ do { \ @@ -234,7 +234,7 @@ do { \ do { \ if ((tp)->t_flags & TF_DELACK) { \ (tp)->t_flags &= ~TF_DELACK; \ - timeout_del(&(tp)->t_delack_to); \ + task_del(systq, &(tp)->t_delack_to); \ } \ } while (/* CONSTCOND */ 0) Index: sys/sys/task.h =================================================================== RCS file: /cvs/src/sys/sys/task.h,v retrieving revision 1.11 diff -u -p -r1.11 task.h --- sys/sys/task.h 7 Jun 2016 07:53:33 -0000 1.11 +++ sys/sys/task.h 4 Oct 2016 00:09:00 -0000 @@ -20,32 +20,53 @@ #define _SYS_TASK_H_ #include +#include struct taskq; struct task { - TAILQ_ENTRY(task) t_entry; + union { + TAILQ_ENTRY(task) _t_list; + struct heap_entry _t_heap; + } _t_entry; +#define t_entry _t_entry._t_list + void (*t_func)(void *); void *t_arg; + unsigned int t_flags; + int t_deadline; }; - TAILQ_HEAD(task_list, task); +#define TASK_ONQUEUE 2 /* task is on the todo queue */ +#define TASK_INITIALIZED 4 /* task is initialized */ + #define TASKQ_MPSAFE (1 << 0) #define TASKQ_CANTSLEEP (1 << 1) -#define TASK_INITIALIZER(_f, _a) {{ NULL, NULL }, (_f), (_a), 0 } +#define task_pending(_t) ((_t)->t_flags & TASK_ONQUEUE) +#define task_initialized(_t) ((_t)->t_flags & TASK_INITIALIZED) #ifdef _KERNEL + extern struct taskq *const systq; extern struct taskq *const systqmp; +#define TASK_INITIALIZER(_f, _a) { \ + .t_func = (_f), \ + .t_arg = (_a), \ + .t_flags = TASK_INITIALIZED, \ +} + struct taskq *taskq_create(const char *, unsigned int, int, unsigned int); void taskq_destroy(struct taskq *); void task_set(struct task *, void (*)(void *), void *); int task_add(struct taskq *, struct task *); +int task_add_ticks(struct taskq *, struct task *, int); +int task_add_sec(struct taskq *, struct task *, int); +int task_add_msec(struct taskq *, struct task *, int); int task_del(struct taskq *, struct task *); #endif /* _KERNEL */ Index: sys/sys/tree.h =================================================================== RCS file: /cvs/src/sys/sys/tree.h,v retrieving revision 1.24 diff -u -p -r1.24 tree.h --- sys/sys/tree.h 15 Sep 2016 06:07:22 -0000 1.24 +++ sys/sys/tree.h 27 Sep 2016 04:22:52 -0000 @@ -984,4 +984,107 @@ RBT_GENERATE_INTERNAL(_name, _type, _fie #endif /* _KERNEL */ +struct heap_type { + int (*t_compare)(const void *, const void *); + unsigned int t_offset; /* offset of heap_entry in type */ +}; + +struct heap_entry { + struct heap_entry *he_left; + struct heap_entry *he_child; + struct heap_entry *he_nextsibling; +}; + +struct heap { + struct heap_entry *h_root; +}; + +#define HEAP_HEAD(_name) \ +struct _name { \ + struct heap heap; \ +} + +#ifdef _KERNEL + +static inline void +_heap_init(struct heap *h) +{ + h->h_root = NULL; +} + +static inline int +_heap_empty(struct heap *h) +{ + return (h->h_root == NULL); +} + +void _heap_insert(const struct heap_type *, struct heap *, void *); +void _heap_remove(const struct heap_type *, struct heap *, void *); +void *_heap_first(const struct heap_type *, struct heap *); +void *_heap_extract(const struct heap_type *, struct heap *); +void *_heap_cextract(const struct heap_type *, struct heap *, const void *); + +#define HEAP_INITIALIZER(_head) { { NULL } } + +#define HEAP_PROTOTYPE(_name, _type) \ +extern const struct heap_type *const _name##_HEAP_TYPE; \ + \ +static inline void \ +_name##_HEAP_INIT(struct _name *head) \ +{ \ + _heap_init(&head->heap); \ +} \ + \ +static inline void \ +_name##_HEAP_INSERT(struct _name *head, struct _type *elm) \ +{ \ + _heap_insert(_name##_HEAP_TYPE, &head->heap, elm); \ +} \ + \ +static inline void \ +_name##_HEAP_REMOVE(struct _name *head, struct _type *elm) \ +{ \ + _heap_remove(_name##_HEAP_TYPE, &head->heap, elm); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_FIRST(struct _name *head) \ +{ \ + return _heap_first(_name##_HEAP_TYPE, &head->heap); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_EXTRACT(struct _name *head) \ +{ \ + return _heap_extract(_name##_HEAP_TYPE, &head->heap); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_CEXTRACT(struct _name *head, const struct _type *key) \ +{ \ + return _heap_cextract(_name##_HEAP_TYPE, &head->heap, key); \ +} + +#define HEAP_GENERATE(_name, _type, _field, _cmp) \ +static int \ +_name##_HEAP_COMPARE(const void *lptr, const void *rptr) \ +{ \ + const struct _type *l = lptr, *r = rptr; \ + return _cmp(l, r); \ +} \ +static const struct heap_type _name##_HEAP_INFO = { \ + _name##_HEAP_COMPARE, \ + offsetof(struct _type, _field), \ +}; \ +const struct heap_type *const _name##_HEAP_TYPE = &_name##_HEAP_INFO + +#define HEAP_INIT(_name, _h) _name##_HEAP_INIT((_h)) +#define HEAP_INSERT(_name, _h, _e) _name##_HEAP_INSERT((_h), (_e)) +#define HEAP_REMOVE(_name, _h, _e) _name##_HEAP_REMOVE((_h), (_e)) +#define HEAP_FIRST(_name, _h) _name##_HEAP_FIRST((_h)) +#define HEAP_EXTRACT(_name, _h) _name##_HEAP_EXTRACT((_h)) +#define HEAP_CEXTRACT(_name, _h, _k) _name##_HEAP_CEXTRACT((_h), (_k)) + +#endif /* _KERNEL */ + #endif /* _SYS_TREE_H_ */ Index: usr.sbin/trpt/trpt.c =================================================================== RCS file: /cvs/src/usr.sbin/trpt/trpt.c,v retrieving revision 1.33 diff -u -p -r1.33 trpt.c --- usr.sbin/trpt/trpt.c 27 Aug 2016 01:50:07 -0000 1.33 +++ usr.sbin/trpt/trpt.c 4 Oct 2016 00:09:00 -0000 @@ -64,9 +64,7 @@ #include #define PRUREQUESTS #include -#define _KERNEL -#include /* to get timeout_pending() and such */ -#undef _KERNEL +#include /* to get task_pending() and such */ #include #include @@ -401,10 +399,10 @@ tcp_trace(short act, short ostate, struc int i; for (i = 0; i < TCPT_NTIMERS; i++) { - if (timeout_pending(&tp->t_timer[i])) + if (!task_pending(&tp->t_timer[i])) continue; printf("%s%s=%d", cp, tcptimers[i], - tp->t_timer[i].to_time); + tp->t_timer[i].t_deadline); if (i == TCPT_REXMT) printf(" (t_rxtshft=%d)", tp->t_rxtshift); cp = ", ";