Index: sys/task.h =================================================================== RCS file: /cvs/src/sys/sys/task.h,v retrieving revision 1.11 diff -u -p -r1.11 task.h --- sys/task.h 7 Jun 2016 07:53:33 -0000 1.11 +++ sys/task.h 20 Sep 2016 09:48:13 -0000 @@ -20,33 +20,49 @@ #define _SYS_TASK_H_ #include +#include struct taskq; +struct taskh; struct task { - TAILQ_ENTRY(task) t_entry; + union { + TAILQ_ENTRY(task) _t_list; + struct heap_entry _t_heap; + } _t_entry; + void (*t_func)(void *); void *t_arg; + unsigned int t_flags; + int t_deadline; }; +#define t_entry _t_entry._t_list TAILQ_HEAD(task_list, task); #define TASKQ_MPSAFE (1 << 0) #define TASKQ_CANTSLEEP (1 << 1) -#define TASK_INITIALIZER(_f, _a) {{ NULL, NULL }, (_f), (_a), 0 } +#define TASK_INITIALIZER(_f, _a) { .t_func = (_f), .t_arg = (_a), .t_flags = 0 } #ifdef _KERNEL extern struct taskq *const systq; extern struct taskq *const systqmp; +void task_set(struct task *, void (*)(void *), void *); + struct taskq *taskq_create(const char *, unsigned int, int, unsigned int); void taskq_destroy(struct taskq *); -void task_set(struct task *, void (*)(void *), void *); int task_add(struct taskq *, struct task *); int task_del(struct taskq *, struct task *); + +struct taskh *taskh_create(const char *, unsigned int, int, unsigned int); +void taskh_destroy(struct taskh *); + +int taskh_add(struct taskh *, struct task *, int); +int taskh_del(struct taskh *, struct task *); #endif /* _KERNEL */ Index: sys/tree.h =================================================================== RCS file: /cvs/src/sys/sys/tree.h,v retrieving revision 1.24 diff -u -p -r1.24 tree.h --- sys/tree.h 15 Sep 2016 06:07:22 -0000 1.24 +++ sys/tree.h 20 Sep 2016 09:48:13 -0000 @@ -984,4 +984,107 @@ RBT_GENERATE_INTERNAL(_name, _type, _fie #endif /* _KERNEL */ +struct heap_type { + int (*t_compare)(const void *, const void *); + unsigned int t_offset; /* offset of heap_entry in type */ +}; + +struct heap_entry { + struct heap_entry *he_left; + struct heap_entry *he_child; + struct heap_entry *he_nextsibling; +}; + +struct heap { + struct heap_entry *h_root; +}; + +#define HEAP_HEAD(_name) \ +struct _name { \ + struct heap heap; \ +} + +#ifdef _KERNEL + +static inline void +_heap_init(struct heap *h) +{ + h->h_root = NULL; +} + +static inline int +_heap_empty(struct heap *h) +{ + return (h->h_root == NULL); +} + +void _heap_insert(const struct heap_type *, struct heap *, void *); +void _heap_remove(const struct heap_type *, struct heap *, void *); +void *_heap_min(const struct heap_type *, struct heap *); +void *_heap_extract(const struct heap_type *, struct heap *); +void *_heap_cextract(const struct heap_type *, struct heap *, const void *); + +#define HEAP_INITIALIZER(_head) { { NULL } } + +#define HEAP_PROTOTYPE(_name, _type) \ +extern const struct heap_type *const _name##_HEAP_TYPE; \ + \ +static inline void \ +_name##_HEAP_INIT(struct _name *head) \ +{ \ + _heap_init(&head->heap); \ +} \ + \ +static inline void \ +_name##_HEAP_INSERT(struct _name *head, struct _type *elm) \ +{ \ + _heap_insert(_name##_HEAP_TYPE, &head->heap, elm); \ +} \ + \ +static inline void \ +_name##_HEAP_REMOVE(struct _name *head, struct _type *elm) \ +{ \ + _heap_remove(_name##_HEAP_TYPE, &head->heap, elm); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_MIN(struct _name *head) \ +{ \ + return _heap_min(_name##_HEAP_TYPE, &head->heap); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_EXTRACT(struct _name *head) \ +{ \ + return _heap_extract(_name##_HEAP_TYPE, &head->heap); \ +} \ + \ +static inline struct _type * \ +_name##_HEAP_CEXTRACT(struct _name *head, const struct _type *key) \ +{ \ + return _heap_cextract(_name##_HEAP_TYPE, &head->heap, key); \ +} + +#define HEAP_GENERATE(_name, _type, _field, _cmp) \ +static int \ +_name##_HEAP_COMPARE(const void *lptr, const void *rptr) \ +{ \ + const struct _type *l = lptr, *r = rptr; \ + return _cmp(l, r); \ +} \ +static const struct heap_type _name##_HEAP_INFO = { \ + _name##_HEAP_COMPARE, \ + offsetof(struct _type, _field), \ +}; \ +const struct heap_type *const _name##_HEAP_TYPE = &_name##_HEAP_INFO + +#define HEAP_INIT(_name, _h) _name##_HEAP_INIT((_h)) +#define HEAP_INSERT(_name, _h, _e) _name##_HEAP_INSERT((_h), (_e)) +#define HEAP_REMOVE(_name, _h, _e) _name##_HEAP_REMOVE((_h), (_e)) +#define HEAP_MIN(_name, _h) _name##_HEAP_MIN((_h)) +#define HEAP_EXTRACT(_name, _h) _name##_HEAP_EXTRACT((_h)) +#define HEAP_CEXTRACT(_name, _h, _k) _name##_HEAP_CEXTRACT((_h), (_k)) + +#endif /* _KERNEL */ + #endif /* _SYS_TREE_H_ */ Index: kern/kern_task.c =================================================================== RCS file: /cvs/src/sys/kern/kern_task.c,v retrieving revision 1.18 diff -u -p -r1.18 kern_task.c --- kern/kern_task.c 11 Aug 2016 01:32:31 -0000 1.18 +++ kern/kern_task.c 20 Sep 2016 09:48:13 -0000 @@ -21,163 +21,257 @@ #include #include #include +#include + #include #define TASK_ONQUEUE 1 -struct taskq { +/* + * common infrastructure for task queues and heaps + */ + +struct task_threads { enum { - TQ_S_CREATED, - TQ_S_RUNNING, - TQ_S_DESTROYED - } tq_state; - unsigned int tq_running; - unsigned int tq_nthreads; - unsigned int tq_flags; - const char *tq_name; + TT_S_CREATED, + TT_S_RUNNING, + TT_S_KILLED, + TT_S_DESTROYED + } tt_state; + unsigned int tt_running; + unsigned int tt_nthreads; + unsigned int tt_flags; + const char *tt_name; + + struct mutex tt_mtx; +}; + +typedef int (*sleepfn)(const volatile void *, struct mutex *, int, + const char *, int); + +void taskt_init(struct task_threads *, unsigned int, const char *, + unsigned int, int); +void taskt_destroy(struct task_threads *); + +void taskt_create_thread(struct task_threads *, + void (*)(void *)); +int taskt_sleep(const volatile void *, struct mutex *, int, + const char *, int); +void taskt_thread(struct task_threads *, + int (*)(struct task_threads *, struct task *, sleepfn)); + +/* task queues */ + +struct taskq { + struct task_threads tq_threads; - struct mutex tq_mtx; struct task_list tq_worklist; }; struct taskq taskq_sys = { - TQ_S_CREATED, - 0, - 1, - 0, - "systq", - MUTEX_INITIALIZER(IPL_HIGH), + { + TT_S_CREATED, + 0, + 1, + 0, + "systq", + MUTEX_INITIALIZER(IPL_HIGH) + }, TAILQ_HEAD_INITIALIZER(taskq_sys.tq_worklist) }; struct taskq taskq_sys_mp = { - TQ_S_CREATED, - 0, - 1, - TASKQ_MPSAFE, - "systqmp", - MUTEX_INITIALIZER(IPL_HIGH), + { + TT_S_CREATED, + 0, + 1, + TASKQ_MPSAFE, + "systqmp", + MUTEX_INITIALIZER(IPL_HIGH), + }, TAILQ_HEAD_INITIALIZER(taskq_sys_mp.tq_worklist) }; -typedef int (*sleepfn)(const volatile void *, struct mutex *, int, - const char *, int); +void taskq_create_thread(void *); +int taskq_next_work(struct task_threads *, struct task *, sleepfn); +void taskq_thread(void *); struct taskq *const systq = &taskq_sys; struct taskq *const systqmp = &taskq_sys_mp; -void taskq_init(void); /* called in init_main.c */ -void taskq_create_thread(void *); -int taskq_sleep(const volatile void *, struct mutex *, int, - const char *, int); -int taskq_next_work(struct taskq *, struct task *, sleepfn); -void taskq_thread(void *); +/* + * task heaps + */ + +HEAP_HEAD(task_heap); + +struct taskh { + struct task_threads th_threads; + struct task_heap th_workheap; + struct timeout th_schedule; +}; + +void taskh_create_thread(void *); +void taskh_thread(void *); +void taskh_wakeup(void *); + +HEAP_PROTOTYPE(task_heap, task); + +/* + * task thread implementation + */ + +void tasks_init(void); void -taskq_init(void) +tasks_init(void) { kthread_create_deferred(taskq_create_thread, systq); kthread_create_deferred(taskq_create_thread, systqmp); } -struct taskq * -taskq_create(const char *name, unsigned int nthreads, int ipl, - unsigned int flags) +void +taskt_init(struct task_threads *tt, unsigned int nthreads, + const char *name, unsigned int flags, int ipl) { - struct taskq *tq; - - tq = malloc(sizeof(*tq), M_DEVBUF, M_WAITOK); - if (tq == NULL) - return (NULL); - - tq->tq_state = TQ_S_CREATED; - tq->tq_running = 0; - tq->tq_nthreads = nthreads; - tq->tq_name = name; - tq->tq_flags = flags; + tt->tt_state = TT_S_CREATED; + tt->tt_running = 0; + tt->tt_nthreads = nthreads; + tt->tt_name = name; + tt->tt_flags = flags; - mtx_init(&tq->tq_mtx, ipl); - TAILQ_INIT(&tq->tq_worklist); - - /* try to create a thread to guarantee that tasks will be serviced */ - kthread_create_deferred(taskq_create_thread, tq); - - return (tq); + mtx_init(&tt->tt_mtx, ipl); } void -taskq_destroy(struct taskq *tq) +taskt_destroy(struct task_threads *tt) { - mtx_enter(&tq->tq_mtx); - switch (tq->tq_state) { - case TQ_S_CREATED: - /* tq is still referenced by taskq_create_thread */ - tq->tq_state = TQ_S_DESTROYED; - mtx_leave(&tq->tq_mtx); - return; + mtx_enter(&tt->tt_mtx); - case TQ_S_RUNNING: - tq->tq_state = TQ_S_DESTROYED; + switch (tt->tt_state) { + case TT_S_CREATED: + /* tt is still referenced by taskt_create_thread */ + tt->tt_state = TT_S_KILLED; + + do { + msleep(&tt->tt_state, &tt->tt_mtx, PWAIT, + "taskkill", 0); + } while (tt->tt_state != TT_S_DESTROYED); break; - default: - panic("unexpected %s tq state %u", tq->tq_name, tq->tq_state); - } + case TT_S_RUNNING: + tt->tt_state = TT_S_DESTROYED; - while (tq->tq_running > 0) { - wakeup(tq); - msleep(&tq->tq_running, &tq->tq_mtx, PWAIT, "tqdestroy", 0); - } - mtx_leave(&tq->tq_mtx); + while (tt->tt_running > 0) { + wakeup(tt); + msleep(&tt->tt_running, &tt->tt_mtx, PWAIT, + "taskdtor", 0); + } + break; - free(tq, M_DEVBUF, sizeof(*tq)); + default: + panic("unexpected %s state %u", tt->tt_name, tt->tt_state); + } + mtx_leave(&tt->tt_mtx); } void -taskq_create_thread(void *arg) +taskt_create_thread(struct task_threads *tt, void (*thread)(void *)) { - struct taskq *tq = arg; int rv; - mtx_enter(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); - switch (tq->tq_state) { - case TQ_S_DESTROYED: - mtx_leave(&tq->tq_mtx); - free(tq, M_DEVBUF, sizeof(*tq)); + switch (tt->tt_state) { + case TT_S_KILLED: + tt->tt_state = TT_S_DESTROYED; + mtx_leave(&tt->tt_mtx); + wakeup(&tt->tt_state); return; - case TQ_S_CREATED: - tq->tq_state = TQ_S_RUNNING; + case TT_S_CREATED: + tt->tt_state = TT_S_RUNNING; break; default: - panic("unexpected %s tq state %d", tq->tq_name, tq->tq_state); + panic("unexpected %s state %d", tt->tt_name, tt->tt_state); } do { - tq->tq_running++; - mtx_leave(&tq->tq_mtx); + tt->tt_running++; + mtx_leave(&tt->tt_mtx); - rv = kthread_create(taskq_thread, tq, NULL, tq->tq_name); + rv = kthread_create(thread, tt, NULL, tt->tt_name); - mtx_enter(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); if (rv != 0) { - printf("unable to create thread for \"%s\" taskq\n", - tq->tq_name); + printf("unable to create task thread for \"%s\"\n", + tt->tt_name); - tq->tq_running--; + tt->tt_running--; /* could have been destroyed during kthread_create */ - if (tq->tq_state == TQ_S_DESTROYED && - tq->tq_running == 0) - wakeup_one(&tq->tq_running); + if (tt->tt_state == TT_S_DESTROYED && + tt->tt_running == 0) + wakeup_one(&tt->tt_running); break; } - } while (tq->tq_running < tq->tq_nthreads); + } while (tt->tt_running < tt->tt_nthreads); - mtx_leave(&tq->tq_mtx); + mtx_leave(&tt->tt_mtx); } +int +taskt_sleep(const volatile void *ident, struct mutex *mtx, int priority, + const char *wmesg, int tmo) +{ + u_int *flags = &curproc->p_flag; + int rv; + + atomic_clearbits_int(flags, P_CANTSLEEP); + rv = msleep(ident, mtx, priority, wmesg, tmo); + atomic_setbits_int(flags, P_CANTSLEEP); + + return (tmo); +} + +void +taskt_thread(struct task_threads *tt, + int (*next_task)(struct task_threads *, struct task *, sleepfn)) +{ + sleepfn ttsleep = msleep; + struct task work; + int last; + + if (ISSET(tt->tt_flags, TASKQ_MPSAFE)) + KERNEL_UNLOCK(); + + if (ISSET(tt->tt_flags, TASKQ_CANTSLEEP)) { + ttsleep = taskt_sleep; + atomic_setbits_int(&curproc->p_flag, P_CANTSLEEP); + } + + while (next_task(tt, &work, ttsleep)) { + (*work.t_func)(work.t_arg); + sched_pause(); + } + + mtx_enter(&tt->tt_mtx); + last = (--tt->tt_running == 0); + mtx_leave(&tt->tt_mtx); + + if (ISSET(tt->tt_flags, TASKQ_CANTSLEEP)) + atomic_clearbits_int(&curproc->p_flag, P_CANTSLEEP); + + if (ISSET(tt->tt_flags, TASKQ_MPSAFE)) + KERNEL_LOCK(); + + if (last) + wakeup_one(&tt->tt_running); + + kthread_exit(0); +} + + void task_set(struct task *t, void (*fn)(void *), void *arg) { @@ -186,24 +280,64 @@ task_set(struct task *t, void (*fn)(void t->t_flags = 0; } +/* + * task queue implementation + */ + +struct taskq * +taskq_create(const char *name, unsigned int nthreads, int ipl, + unsigned int flags) +{ + struct taskq *tq; + + tq = malloc(sizeof(*tq), M_DEVBUF, M_WAITOK); + if (tq == NULL) + return (NULL); + + taskt_init(&tq->tq_threads, nthreads, name, flags, ipl); + TAILQ_INIT(&tq->tq_worklist); + + /* try to create a thread to guarantee that tasks will be serviced */ + kthread_create_deferred(taskq_create_thread, tq); + + return (tq); +} + +void +taskq_create_thread(void *arg) +{ + struct taskq *tq = arg; + struct task_threads *tt = &tq->tq_threads; + + taskt_create_thread(tt, taskq_thread); +} + +void +taskq_destroy(struct taskq *tq) +{ + taskt_destroy(&tq->tq_threads); + free(tq, M_DEVBUF, sizeof(*tq)); +} + int task_add(struct taskq *tq, struct task *w) { + struct task_threads *tt = &tq->tq_threads; int rv = 0; if (ISSET(w->t_flags, TASK_ONQUEUE)) return (0); - mtx_enter(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); if (!ISSET(w->t_flags, TASK_ONQUEUE)) { rv = 1; SET(w->t_flags, TASK_ONQUEUE); TAILQ_INSERT_TAIL(&tq->tq_worklist, w, t_entry); } - mtx_leave(&tq->tq_mtx); + mtx_leave(&tt->tt_mtx); if (rv) - wakeup_one(tq); + wakeup_one(tt); return (rv); } @@ -211,98 +345,222 @@ task_add(struct taskq *tq, struct task * int task_del(struct taskq *tq, struct task *w) { + struct task_threads *tt = &tq->tq_threads; int rv = 0; if (!ISSET(w->t_flags, TASK_ONQUEUE)) return (0); - mtx_enter(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); if (ISSET(w->t_flags, TASK_ONQUEUE)) { rv = 1; CLR(w->t_flags, TASK_ONQUEUE); TAILQ_REMOVE(&tq->tq_worklist, w, t_entry); } - mtx_leave(&tq->tq_mtx); + mtx_leave(&tt->tt_mtx); return (rv); } int -taskq_sleep(const volatile void *ident, struct mutex *mtx, int priority, - const char *wmesg, int tmo) +taskq_next_work(struct task_threads *tt, struct task *work, sleepfn ttsleep) { - u_int *flags = &curproc->p_flag; - int rv; + struct taskq *tq = (struct taskq *)tt; + struct task *next; - atomic_clearbits_int(flags, P_CANTSLEEP); - rv = msleep(ident, mtx, priority, wmesg, tmo); - atomic_setbits_int(flags, P_CANTSLEEP); + mtx_enter(&tt->tt_mtx); + while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) { + if (tt->tt_state != TT_S_RUNNING) { + mtx_leave(&tt->tt_mtx); + return (0); + } - return (tmo); + ttsleep(tt, &tt->tt_mtx, PWAIT, "bored", 0); + } + + TAILQ_REMOVE(&tq->tq_worklist, next, t_entry); + CLR(next->t_flags, TASK_ONQUEUE); + + *work = *next; /* copy to caller to avoid races */ + + next = TAILQ_FIRST(&tq->tq_worklist); + mtx_leave(&tt->tt_mtx); + + if (next != NULL && tt->tt_nthreads > 1) + wakeup_one(tt); + + return (1); +} + +void +taskq_thread(void *arg) +{ + struct task_threads *tt = arg; + + taskt_thread(tt, taskq_next_work); +} + +/* + * task heap implementation + */ + +extern int ticks; + +struct taskh * +taskh_create(const char *name, unsigned int nthreads, int ipl, + unsigned int flags) +{ + struct taskh *th; + + th = malloc(sizeof(*th), M_DEVBUF, M_WAITOK); + if (th == NULL) + return (NULL); + + taskt_init(&th->th_threads, nthreads, name, flags, ipl); + HEAP_INIT(task_heap, &th->th_workheap); + timeout_set(&th->th_schedule, taskh_wakeup, &th->th_threads); + + /* try to create a thread to guarantee that tasks will be serviced */ + kthread_create_deferred(taskh_create_thread, th); + + return (th); +} + +void +taskh_create_thread(void *arg) +{ + struct taskq *th = arg; + struct task_threads *tt = (struct task_threads *)th; + + taskt_create_thread(tt, taskh_thread); +} + +static inline struct task * +taskh_next(struct task_heap *heap) +{ + struct task key; + + key.t_deadline = ticks; + return (HEAP_CEXTRACT(task_heap, heap, &key)); } int -taskq_next_work(struct taskq *tq, struct task *work, sleepfn tqsleep) +taskh_next_work(struct task_threads *tt, struct task *work, sleepfn ttsleep) { + struct taskh *th = (struct taskh *)tt; struct task *next; + int diff = -1; - mtx_enter(&tq->tq_mtx); - while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) { - if (tq->tq_state != TQ_S_RUNNING) { - mtx_leave(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); + while ((next = taskh_next(&th->th_workheap)) == NULL) { + if (th->th_threads.tt_state != TT_S_RUNNING) { + mtx_leave(&tt->tt_mtx); return (0); } - tqsleep(tq, &tq->tq_mtx, PWAIT, "bored", 0); + ttsleep(tt, &tt->tt_mtx, PWAIT, "bored", 0); } - TAILQ_REMOVE(&tq->tq_worklist, next, t_entry); CLR(next->t_flags, TASK_ONQUEUE); *work = *next; /* copy to caller to avoid races */ - next = TAILQ_FIRST(&tq->tq_worklist); - mtx_leave(&tq->tq_mtx); + next = HEAP_MIN(task_heap, &th->th_workheap); + if (next != NULL) { + diff = next->t_deadline - ticks; + if (diff > 0) + timeout_add(&th->th_schedule, diff); + else + diff = 0; + } + mtx_leave(&tt->tt_mtx); - if (next != NULL && tq->tq_nthreads > 1) - wakeup_one(tq); + if (diff == 0 && tt->tt_nthreads > 1) + wakeup_one(tt); return (1); } void -taskq_thread(void *xtq) +taskh_thread(void *arg) { - sleepfn tqsleep = msleep; - struct taskq *tq = xtq; - struct task work; - int last; + struct task_threads *tt = arg; - if (ISSET(tq->tq_flags, TASKQ_MPSAFE)) - KERNEL_UNLOCK(); + taskt_thread(tt, taskh_next_work); +} - if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP)) { - tqsleep = taskq_sleep; - atomic_setbits_int(&curproc->p_flag, P_CANTSLEEP); - } +int +taskh_add(struct taskh *th, struct task *w, int to_ticks) +{ + struct task_threads *tt = (struct task_threads *)th; + int rv = 0; + int diff; - while (taskq_next_work(tq, &work, tqsleep)) { - (*work.t_func)(work.t_arg); - sched_pause(); - } + KASSERT(to_ticks >= 0); - mtx_enter(&tq->tq_mtx); - last = (--tq->tq_running == 0); - mtx_leave(&tq->tq_mtx); + mtx_enter(&tt->tt_mtx); + if (ISSET(w->t_flags, TASK_ONQUEUE)) + HEAP_REMOVE(task_heap, &th->th_workheap, w); + else + rv = 1; - if (ISSET(tq->tq_flags, TASKQ_CANTSLEEP)) - atomic_clearbits_int(&curproc->p_flag, P_CANTSLEEP); + SET(w->t_flags, TASK_ONQUEUE); + w->t_deadline = ticks + to_ticks; + HEAP_INSERT(task_heap, &th->th_workheap, w); + + w = HEAP_MIN(task_heap, &th->th_workheap); + diff = w->t_deadline - ticks; + if (diff >= 0) + timeout_add(&th->th_schedule, diff); + mtx_leave(&tt->tt_mtx); - if (ISSET(tq->tq_flags, TASKQ_MPSAFE)) - KERNEL_LOCK(); + if (diff < 0) + wakeup_one(tt); - if (last) - wakeup_one(&tq->tq_running); + return (rv); +} - kthread_exit(0); +int +taskh_del(struct taskh *th, struct task *w) +{ + struct task_threads *tt = (struct task_threads *)th; + int rv = 0; + + if (!ISSET(w->t_flags, TASK_ONQUEUE)) + return (0); + + mtx_enter(&tt->tt_mtx); + if (ISSET(w->t_flags, TASK_ONQUEUE)) { + rv = 1; + CLR(w->t_flags, TASK_ONQUEUE); + HEAP_REMOVE(task_heap, &th->th_workheap, w); + + w = HEAP_MIN(task_heap, &th->th_workheap); + if (w == NULL) + timeout_del(&th->th_schedule); + else { + int diff; + + diff = ticks - w->t_deadline; + if (diff >= 0) + timeout_add(&th->th_schedule, diff); + } + } + mtx_leave(&tt->tt_mtx); + + return (rv); } + +static inline int +taskh_cmp(const struct task *a, const struct task *b) +{ + return (a->t_deadline - b->t_deadline); +} + +void +taskh_wakeup(void *ident) +{ + wakeup_one(ident); +} + +HEAP_GENERATE(task_heap, task, _t_entry._t_heap, taskh_cmp); Index: kern/subr_tree.c =================================================================== RCS file: /cvs/src/sys/kern/subr_tree.c,v retrieving revision 1.6 diff -u -p -r1.6 subr_tree.c --- kern/subr_tree.c 20 Sep 2016 01:11:27 -0000 1.6 +++ kern/subr_tree.c 20 Sep 2016 09:48:13 -0000 @@ -610,3 +610,181 @@ _rb_check(const struct rb_type *t, void (unsigned long)RBE_LEFT(rbe) == poison && (unsigned long)RBE_RIGHT(rbe) == poison); } + +static inline struct heap_entry * +heap_n2e(const struct heap_type *t, void *node) +{ + caddr_t addr = (caddr_t)node; + + return ((struct heap_entry *)(addr + t->t_offset)); +} + +static inline void * +heap_e2n(const struct heap_type *t, struct heap_entry *rbe) +{ + caddr_t addr = (caddr_t)rbe; + + return ((void *)(addr - t->t_offset)); +} + +static struct heap_entry * +_heap_merge(const struct heap_type *t, + struct heap_entry *he1, struct heap_entry *he2) +{ + struct heap_entry *hi, *lo; + struct heap_entry *child; + + if (he1 == NULL) + return (he2); + if (he2 == NULL) + return (he1); + + if (t->t_compare(he1, he2) >= 0) { + hi = he1; + lo = he2; + } else { + lo = he1; + hi = he2; + } + + child = lo->he_child; + + hi->he_left = lo; + hi->he_nextsibling = child; + if (child != NULL) + child->he_left = hi; + lo->he_child = hi; + lo->he_left = NULL; + lo->he_nextsibling = NULL; + + return (lo); +} + +static inline void +_heap_sibling_remove(struct heap_entry *he) +{ + if (he->he_left == NULL) + return; + + if (he->he_left->he_child == he) { + if ((he->he_left->he_child = he->he_nextsibling) != NULL) + he->he_nextsibling->he_left = he->he_left; + } else { + if ((he->he_left->he_nextsibling = he->he_nextsibling) != NULL) + he->he_nextsibling->he_left = he->he_left; + } + + he->he_left = NULL; + he->he_nextsibling = NULL; +} + +static inline struct heap_entry * +_heap_2pass_merge(const struct heap_type *t, struct heap_entry *root) +{ + struct heap_entry *node, *next = NULL; + struct heap_entry *tmp, *list = NULL; + + node = root->he_child; + if (node == NULL) + return (NULL); + + root->he_child = NULL; + + /* first pass */ + for (next = node->he_nextsibling; next != NULL; + next = (node != NULL ? node->he_nextsibling : NULL)) { + tmp = next->he_nextsibling; + node = _heap_merge(t, node, next); + + /* insert head */ + node->he_nextsibling = list; + list = node; + node = tmp; + } + + /* odd child case */ + if (node != NULL) { + node->he_nextsibling = list; + list = node; + } + + /* second pass */ + while (list->he_nextsibling != NULL) { + tmp = list->he_nextsibling->he_nextsibling; + list = _heap_merge(t, list, list->he_nextsibling); + list->he_nextsibling = tmp; + } + + list->he_left = NULL; + list->he_nextsibling = NULL; + + return (list); +} + +void +_heap_insert(const struct heap_type *t, struct heap *h, void *node) +{ + struct heap_entry *he = heap_n2e(t, node); + + he->he_left = NULL; + he->he_child = NULL; + he->he_nextsibling = NULL; + + h->h_root = _heap_merge(t, h->h_root, he); +} + +void +_heap_remove(const struct heap_type *t, struct heap *h, void *node) +{ + struct heap_entry *he = heap_n2e(t, node); + + if (he->he_left == NULL) { + _heap_extract(t, h); + return; + } + + _heap_sibling_remove(he); + h->h_root = _heap_merge(t, h->h_root, _heap_2pass_merge(t, he)); +} + +void * +_heap_min(const struct heap_type *t, struct heap *h) +{ + struct heap_entry *min = h->h_root; + + if (min == NULL) + return (NULL); + + return (heap_e2n(t, min)); +} + +void * +_heap_extract(const struct heap_type *t, struct heap *h) +{ + struct heap_entry *min = h->h_root; + + if (min == NULL) + return (NULL); + + h->h_root = _heap_2pass_merge(t, min); + + return (heap_e2n(t, min)); +} + +void * +_heap_cextract(const struct heap_type *t, struct heap *h, const void *key) +{ + struct heap_entry *min = h->h_root; + void *node; + + if (min == NULL) + return (NULL); + + node = heap_e2n(t, min); + if (t->t_compare(node, key) > 0) + return (NULL); + + h->h_root = _heap_2pass_merge(t, min); + + return (node); +}