Index: sys/sys/rbtree.h =================================================================== RCS file: sys/sys/rbtree.h diff -N sys/sys/rbtree.h --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/sys/rbtree.h 12 Aug 2016 07:06:57 -0000 @@ -0,0 +1,224 @@ +/* $OpenBSD */ + +/* + * Copyright (c) 2016 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SYS_RBTREE_H_ +#define _SYS_RBTREE_H_ + +#include + +struct rb_type { + int (*t_compare)(const void *, const void *); + void (*t_augment)(void *); + size_t t_offset; /* offset of rb_entry in type */ +}; + +struct rb_entry { + struct rb_entry *rbe_parent; + struct rb_entry *rbe_left; + struct rb_entry *rbe_right; + unsigned int rbe_color; +}; + +struct rb_tree { + struct rb_entry *rbt_root; +}; + +static inline void +_rb_init(struct rb_tree *rbt) +{ + rbt->rbt_root = NULL; +} + +static inline int +_rb_empty(struct rb_tree *rbt) +{ + return (rbt->rbt_root == NULL); +} + +void *_rb_insert(const struct rb_type *, struct rb_tree *, void *); +void *_rb_remove(const struct rb_type *, struct rb_tree *, void *); +void *_rb_find(const struct rb_type *, struct rb_tree *, const void *); +void *_rb_nfind(const struct rb_type *, struct rb_tree *, const void *); +void *_rb_root(const struct rb_type *, struct rb_tree *); +void *_rb_min(const struct rb_type *, struct rb_tree *); +void *_rb_max(const struct rb_type *, struct rb_tree *); +void *_rb_next(const struct rb_type *, void *); +void *_rb_prev(const struct rb_type *, void *); +void *_rb_left(const struct rb_type *, void *); +void *_rb_right(const struct rb_type *, void *); +void *_rb_parent(const struct rb_type *, void *); +void *_rb_color(const struct rb_type *, void *); + +#define RBT_HEAD(_name, _type) \ +struct _name { \ + struct rb_tree rbh_root; \ +} + +#define RBT_INITIALIZER(_head) { { NULL } } + +#define RBT_ENTRY(_type) struct rb_entry + +#define RBT_PROTOTYPE(_name, _type, _field, _cmp) \ +extern const struct rb_type *const _name##_RBT_TYPE; \ + \ +static inline void \ +_name##_RBT_INIT(struct _name *head) \ +{ \ + _rb_init(&head->rbh_root); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_INSERT(struct _name *head, struct _type *elm) \ +{ \ + return _rb_insert(_name##_RBT_TYPE, &head->rbh_root, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_REMOVE(struct _name *head, struct _type *elm) \ +{ \ + return _rb_remove(_name##_RBT_TYPE, &head->rbh_root, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_FIND(struct _name *head, const struct _type *key) \ +{ \ + return _rb_find(_name##_RBT_TYPE, &head->rbh_root, key); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_NFIND(struct _name *head, const struct _type *key) \ +{ \ + return _rb_nfind(_name##_RBT_TYPE, &head->rbh_root, key); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_ROOT(struct _name *head) \ +{ \ + return _rb_root(_name##_RBT_TYPE, &head->rbh_root); \ +} \ + \ +static inline int \ +_name##_RBT_EMPTY(struct _name *head) \ +{ \ + return _rb_empty(&head->rbh_root); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_MIN(struct _name *head) \ +{ \ + return _rb_min(_name##_RBT_TYPE, &head->rbh_root); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_MAX(struct _name *head) \ +{ \ + return _rb_max(_name##_RBT_TYPE, &head->rbh_root); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_NEXT(struct _type *elm) \ +{ \ + return _rb_next(_name##_RBT_TYPE, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_PREV(struct _type *elm) \ +{ \ + return _rb_prev(_name##_RBT_TYPE, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_LEFT(struct _type *elm) \ +{ \ + return _rb_left(_name##_RBT_TYPE, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_RIGHT(struct _type *elm) \ +{ \ + return _rb_right(_name##_RBT_TYPE, elm); \ +} \ + \ +static inline struct _type * \ +_name##_RBT_PARENT(struct _type *elm) \ +{ \ + return _rb_parent(_name##_RBT_TYPE, elm); \ +} + +#define RBT_GENERATE_INTERNAL(_name, _type, _field, _cmp, _aug) \ +static int \ +_name##_RBT_COMPARE(const void *lptr, const void *rptr) \ +{ \ + const struct _type *l = lptr, *r = rptr; \ + return _cmp(l, r); \ +} \ +static const struct rb_type _name##_RBT_INFO = { \ + _name##_RBT_COMPARE, \ + _aug, \ + offsetof(struct _type, _field), \ +}; \ +const struct rb_type *const _name##_RBT_TYPE = &_name##_RBT_INFO + +#define RBT_GENERATE_AUGMENT(_name, _type, _field, _cmp, _aug) \ +static void \ +_name##_RBT_AUGMENT(void *ptr) \ +{ \ + struct _type *p = ptr; \ + return _aug(p); \ +} \ +RBT_GENERATE_INTERNAL(_name, _type, _field, _cmp, _name##_RBT_AUGMENT) + +#define RBT_GENERATE(_name, _type, _field, _cmp) \ + RBT_GENERATE_INTERNAL(_name, _type, _field, _cmp, NULL) + +#define RBT_INIT(_name, _head) _name##_RBT_INIT(_head) +#define RBT_INSERT(_name, _head, _elm) _name##_RBT_INSERT(_head, _elm) +#define RBT_REMOVE(_name, _head, _elm) _name##_RBT_REMOVE(_head, _elm) +#define RBT_FIND(_name, _head, _key) _name##_RBT_FIND(_head, _key) +#define RBT_NFIND(_name, _head, _key) _name##_RBT_NFIND(_head, _key) +#define RBT_ROOT(_name, _head) _name##_RBT_ROOT(_head) +#define RBT_EMPTY(_name, _head) _name##_RBT_EMPTY(_head) +#define RBT_MIN(_name, _head) _name##_RBT_MIN(_head) +#define RBT_MAX(_name, _head) _name##_RBT_MAX(_head) +#define RBT_NEXT(_name, _elm) _name##_RBT_NEXT(_elm) +#define RBT_PREV(_name, _elm) _name##_RBT_PREV(_elm) +#define RBT_LEFT(_name, _elm) _name##_RBT_LEFT(_elm) +#define RBT_RIGHT(_name, _elm) _name##_RBT_RIGHT(_elm) +#define RBT_PARENT(_name, _elm) _name##_RBT_PARENT(_elm) + +#define RBT_FOREACH(_e, _name, _head) \ + for ((_e) = RBT_MIN(_name, (_head)); \ + (_e) != NULL; \ + (_e) = RBT_NEXT(_name, (_e))) + +#define RBT_FOREACH_SAFE(_e, _name, _head, _n) \ + for ((_e) = RBT_MIN(_name, (_head)); \ + (_e) != NULL && ((_n) = RBT_NEXT(_name, (_e)), 1); \ + (_e) = (_n)) + +#define RBT_FOREACH_REVERSE(_e, _name, _head) \ + for ((_e) = RBT_MAX(_name, (_head)); \ + (_e) != NULL; \ + (_e) = RBT_PREV(_name, (_e))) + +#define RBT_FOREACH_REVERSE_SAFE(_e, _name, _head, _n) \ + for ((_e) = RBT_MAX(_name, (_head)); \ + (_e) != NULL && ((_n) = RBT_PREV(_name, (_e)), 1); \ + (_e) = (_n)) + +#endif /* _SYS_RBTREE_H */ Index: sys/sys/buf.h =================================================================== RCS file: /cvs/src/sys/sys/buf.h,v retrieving revision 1.103 diff -u -p -r1.103 buf.h --- sys/sys/buf.h 23 May 2016 09:31:28 -0000 1.103 +++ sys/sys/buf.h 12 Aug 2016 07:06:57 -0000 @@ -40,7 +40,7 @@ #ifndef _SYS_BUF_H_ #define _SYS_BUF_H_ #include -#include +#include #include #define NOLIST ((struct buf *)0x87654321) @@ -48,8 +48,8 @@ struct buf; struct vnode; -struct buf_rb_bufs; -RB_PROTOTYPE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); +RBT_HEAD(buf_rb_bufs, buf); +RBT_PROTOTYPE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); LIST_HEAD(bufhead, buf); @@ -140,7 +140,7 @@ extern struct bio_ops { /* The buffer header describes an I/O operation in the kernel. */ struct buf { - RB_ENTRY(buf) b_rbbufs; /* vnode "hash" tree */ + RBT_ENTRY(buf) b_rbbufs; /* vnode "hash" tree */ LIST_ENTRY(buf) b_list; /* All allocated buffers. */ LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */ TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ Index: sys/sys/hibernate.h =================================================================== RCS file: /cvs/src/sys/sys/hibernate.h,v retrieving revision 1.39 diff -u -p -r1.39 hibernate.h --- sys/sys/hibernate.h 7 Feb 2015 01:19:40 -0000 1.39 +++ sys/sys/hibernate.h 12 Aug 2016 07:06:57 -0000 @@ -20,7 +20,7 @@ #define _SYS_HIBERNATE_H_ #include -#include +#include #include #include @@ -37,7 +37,7 @@ struct hiballoc_entry; * Allocator operates from an arena, that is pre-allocated by the caller. */ struct hiballoc_arena { - RB_HEAD(hiballoc_addr, hiballoc_entry) hib_addrs; + RBT_HEAD(hiballoc_addr, hiballoc_entry) hib_addrs; }; /* Index: sys/sys/namei.h =================================================================== RCS file: /cvs/src/sys/sys/namei.h,v retrieving revision 1.32 diff -u -p -r1.32 namei.h --- sys/sys/namei.h 29 Apr 2016 14:40:36 -0000 1.32 +++ sys/sys/namei.h 12 Aug 2016 07:06:57 -0000 @@ -36,13 +36,9 @@ #define _SYS_NAMEI_H_ #include -#include +#include #include -struct namecache; -struct namecache_rb_cache; -RB_PROTOTYPE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); - /* * Encapsulation of namei parameters. */ @@ -176,7 +172,7 @@ void ndinitat(struct nameidata *ndp, u_l struct namecache { TAILQ_ENTRY(namecache) nc_lru; /* Regular Entry LRU chain */ TAILQ_ENTRY(namecache) nc_neg; /* Negative Entry LRU chain */ - RB_ENTRY(namecache) n_rbcache; /* Namecache rb tree from vnode */ + RBT_ENTRY(namecache) n_rbcache; /* Namecache rb tree from vnode */ TAILQ_ENTRY(namecache) nc_me; /* ncp's referring to me */ struct vnode *nc_dvp; /* vnode of parent of name */ u_long nc_dvpid; /* capability number of nc_dvp */ @@ -201,6 +197,8 @@ void cache_purgevfs(struct mount *); extern struct pool namei_pool; +struct namecache_rb_cache; +void namecache_rb_tree_init(struct namecache_rb_cache *); #endif /* Index: sys/sys/pool.h =================================================================== RCS file: /cvs/src/sys/sys/pool.h,v retrieving revision 1.59 diff -u -p -r1.59 pool.h --- sys/sys/pool.h 21 Apr 2016 04:09:28 -0000 1.59 +++ sys/sys/pool.h 12 Aug 2016 07:06:57 -0000 @@ -69,7 +69,7 @@ struct kinfo_pool { #if defined(_KERNEL) || defined(_LIBKVM) #include -#include +#include #include struct pool; @@ -121,7 +121,7 @@ struct pool { int pr_ipl; - RB_HEAD(phtree, pool_item_header) + RBT_HEAD(phtree, pool_item_header) pr_phtree; u_int pr_align; Index: sys/sys/vnode.h =================================================================== RCS file: /cvs/src/sys/sys/vnode.h,v retrieving revision 1.135 diff -u -p -r1.135 vnode.h --- sys/sys/vnode.h 23 May 2016 09:31:28 -0000 1.135 +++ sys/sys/vnode.h 12 Aug 2016 07:06:57 -0000 @@ -39,7 +39,7 @@ #include #include #include -#include +#include /* * The vnode is the focus of all file activity in UNIX. There is a @@ -80,8 +80,7 @@ enum vtagtype { */ LIST_HEAD(buflists, buf); -RB_HEAD(buf_rb_bufs, buf); -RB_HEAD(namecache_rb_cache, namecache); +RBT_HEAD(namecache_rb_cache, namecache); struct uvm_vnode; struct vnode { Index: sys/kern/subr_rbtree.c =================================================================== RCS file: sys/kern/subr_rbtree.c diff -N sys/kern/subr_rbtree.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/kern/subr_rbtree.c 12 Aug 2016 07:06:58 -0000 @@ -0,0 +1,597 @@ +/* $OpenBSD */ + +/* + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Copyright (c) 2016 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#define RB_BLACK 0x0 +#define RB_RED 0x1 + +static inline void * +rb_n2e(const struct rb_type *t, void *node) +{ + caddr_t addr = (caddr_t)node; + + return ((void *)(addr + t->t_offset)); +} + +static inline void * +rb_e2n(const struct rb_type *t, struct rb_entry *rbe) +{ + caddr_t addr = (caddr_t)rbe; + + return ((void *)(addr - t->t_offset)); +} + +#define RBE_LEFT(_rbe) (_rbe)->rbe_left +#define RBE_RIGHT(_rbe) (_rbe)->rbe_right +#define RBE_PARENT(_rbe) (_rbe)->rbe_parent +#define RBE_COLOR(_rbe) (_rbe)->rbe_color + +#define RBH_ROOT(_rbt) (_rbt)->rbt_root + +static inline void +rbe_set(struct rb_entry *rbe, struct rb_entry *parent) +{ + RBE_PARENT(rbe) = parent; + RBE_LEFT(rbe) = RBE_RIGHT(rbe) = NULL; + RBE_COLOR(rbe) = RB_RED; +} + +static inline void +rbe_set_blackred(struct rb_entry *black, struct rb_entry *red) +{ + RBE_COLOR(black) = RB_BLACK; + RBE_COLOR(red) = RB_RED; +} + +static inline void +rbe_augment(const struct rb_type *t, struct rb_entry *rbe) +{ + (*t->t_augment)(rb_e2n(t, rbe)); +} + +static inline void +rbe_if_augment(const struct rb_type *t, struct rb_entry *rbe) +{ + if (t->t_augment != NULL) + rbe_augment(t, rbe); +} + +static inline void +rbe_rotate_left(const struct rb_type *t, struct rb_tree *rbt, + struct rb_entry *rbe) +{ + struct rb_entry *parent; + struct rb_entry *tmp; + + tmp = RBE_RIGHT(rbe); + RBE_RIGHT(rbe) = RBE_LEFT(tmp); + if (RBE_RIGHT(rbe) != NULL) + RBE_PARENT(RBE_LEFT(tmp)) = rbe; + + parent = RBE_PARENT(rbe); + RBE_PARENT(tmp) = parent; + if (parent != NULL) { + if (rbe == RBE_LEFT(parent)) + RBE_LEFT(parent) = tmp; + else + RBE_RIGHT(parent) = tmp; + } else + RBH_ROOT(rbt) = tmp; + + RBE_LEFT(tmp) = rbe; + RBE_PARENT(rbe) = tmp; + + if (t->t_augment != NULL) { + rbe_augment(t, rbe); + rbe_augment(t, tmp); + parent = RBE_PARENT(tmp); + if (parent != NULL) + rbe_augment(t, parent); + } +} + +static inline void +rbe_rotate_right(const struct rb_type *t, struct rb_tree *rbt, + struct rb_entry *rbe) +{ + struct rb_entry *parent; + struct rb_entry *tmp; + + tmp = RBE_LEFT(rbe); + RBE_LEFT(rbe) = RBE_RIGHT(tmp); + if (RBE_LEFT(rbe) != NULL) + RBE_PARENT(RBE_RIGHT(tmp)) = rbe; + + parent = RBE_PARENT(rbe); + RBE_PARENT(tmp) = parent; + if (parent != NULL) { + if (rbe == RBE_LEFT(parent)) + RBE_LEFT(parent) = tmp; + else + RBE_RIGHT(parent) = tmp; + } else + RBH_ROOT(rbt) = tmp; + + RBE_RIGHT(tmp) = rbe; + RBE_PARENT(rbe) = tmp; + + if (t->t_augment != NULL) { + rbe_augment(t, rbe); + rbe_augment(t, tmp); + parent = RBE_PARENT(tmp); + if (parent != NULL) + rbe_augment(t, parent); + } +} + +static inline void +rbe_insert_color(const struct rb_type *t, struct rb_tree *rbt, + struct rb_entry *rbe) +{ + struct rb_entry *parent, *gparent, *tmp; + + while ((parent = RBE_PARENT(rbe)) != NULL && + RBE_COLOR(parent) == RB_RED) { + gparent = RBE_PARENT(parent); + + if (parent == RBE_LEFT(gparent)) { + tmp = RBE_RIGHT(gparent); + if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) { + RBE_COLOR(tmp) = RB_BLACK; + rbe_set_blackred(parent, gparent); + rbe = gparent; + continue; + } + + if (RBE_RIGHT(parent) == rbe) { + rbe_rotate_left(t, rbt, parent); + tmp = parent; + parent = rbe; + rbe = tmp; + } + + rbe_set_blackred(parent, gparent); + rbe_rotate_right(t, rbt, gparent); + } else { + tmp = RBE_LEFT(gparent); + if (tmp != NULL && RBE_COLOR(tmp) == RB_RED) { + RBE_COLOR(tmp) = RB_BLACK; + rbe_set_blackred(parent, gparent); + rbe = gparent; + continue; + } + + if (RBE_LEFT(parent) == rbe) { + rbe_rotate_right(t, rbt, parent); + tmp = parent; + parent = rbe; + rbe = tmp; + } + + rbe_set_blackred(parent, gparent); + rbe_rotate_left(t, rbt, gparent); + } + } + + RBE_COLOR(RBH_ROOT(rbt)) = RB_BLACK; +} + +static inline void +rbe_remove_color(const struct rb_type *t, struct rb_tree *rbt, + struct rb_entry *parent, struct rb_entry *rbe) +{ + struct rb_entry *tmp; + + while ((rbe == NULL || RBE_COLOR(rbe) == RB_BLACK) && + rbe != RBH_ROOT(rbt)) { + if (RBE_LEFT(parent) == rbe) { + tmp = RBE_RIGHT(parent); + if (RBE_COLOR(tmp) == RB_RED) { + rbe_set_blackred(tmp, parent); + rbe_rotate_left(t, rbt, parent); + tmp = RBE_RIGHT(parent); + } + if ((RBE_LEFT(tmp) == NULL || + RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) && + (RBE_RIGHT(tmp) == NULL || + RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) { + RBE_COLOR(tmp) = RB_RED; + rbe = parent; + parent = RBE_PARENT(rbe); + } else { + if (RBE_RIGHT(tmp) == NULL || + RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK) { + struct rb_entry *oleft; + + oleft = RBE_LEFT(tmp); + if (oleft != NULL) + RBE_COLOR(oleft) = RB_BLACK; + + RBE_COLOR(tmp) = RB_RED; + rbe_rotate_right(t, rbt, tmp); + tmp = RBE_RIGHT(parent); + } + + RBE_COLOR(tmp) = RBE_COLOR(parent); + RBE_COLOR(parent) = RB_BLACK; + if (RBE_RIGHT(tmp)) + RBE_COLOR(RBE_RIGHT(tmp)) = RB_BLACK; + + rbe_rotate_left(t, rbt, parent); + rbe = RBH_ROOT(rbt); + break; + } + } else { + tmp = RBE_LEFT(parent); + if (RBE_COLOR(tmp) == RB_RED) { + rbe_set_blackred(tmp, parent); + rbe_rotate_right(t, rbt, parent); + tmp = RBE_LEFT(parent); + } + + if ((RBE_LEFT(tmp) == NULL || + RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) && + (RBE_RIGHT(tmp) == NULL || + RBE_COLOR(RBE_RIGHT(tmp)) == RB_BLACK)) { + RBE_COLOR(tmp) = RB_RED; + rbe = parent; + parent = RBE_PARENT(rbe); + } else { + if (RBE_LEFT(tmp) == NULL || + RBE_COLOR(RBE_LEFT(tmp)) == RB_BLACK) { + struct rb_entry *oright; + + oright = RBE_RIGHT(tmp); + if (oright != NULL) + RBE_COLOR(oright) = RB_BLACK; + + RBE_COLOR(tmp) = RB_RED; + rbe_rotate_left(t, rbt, tmp); + tmp = RBE_LEFT(parent); + } + + RBE_COLOR(tmp) = RBE_COLOR(parent); + RBE_COLOR(parent) = RB_BLACK; + if (RBE_LEFT(tmp) != NULL) + RBE_COLOR(RBE_LEFT(tmp)) = RB_BLACK; + + rbe_rotate_right(t, rbt, parent); + rbe = RBH_ROOT(rbt); + break; + } + } + } + + if (rbe != NULL) + RBE_COLOR(rbe) = RB_BLACK; +} + +static inline struct rb_entry * +rbe_remove(const struct rb_type *t, struct rb_tree *rbt, struct rb_entry *rbe) +{ + struct rb_entry *child, *parent, *old = rbe; + unsigned int color; + + if (RBE_LEFT(rbe) == NULL) + child = RBE_RIGHT(rbe); + else if (RBE_RIGHT(rbe) == NULL) + child = RBE_LEFT(rbe); + else { + struct rb_entry *tmp; + + rbe = RBE_RIGHT(rbe); + while ((tmp = RBE_LEFT(rbe)) != NULL) + rbe = tmp; + + child = RBE_RIGHT(rbe); + parent = RBE_PARENT(rbe); + color = RBE_COLOR(rbe); + if (child != NULL) + RBE_PARENT(child) = parent; + if (parent != NULL) { + if (RBE_LEFT(parent) == rbe) + RBE_LEFT(parent) = child; + else + RBE_RIGHT(parent) = child; + + rbe_if_augment(t, parent); + } else + RBH_ROOT(rbt) = child; + if (RBE_PARENT(rbe) == old) + parent = rbe; + *rbe = *old; + + tmp = RBE_PARENT(old); + if (tmp != NULL) { + if (RBE_LEFT(tmp) == old) + RBE_LEFT(tmp) = rbe; + else + RBE_RIGHT(tmp) = rbe; + + rbe_if_augment(t, parent); + } else + RBH_ROOT(rbt) = rbe; + + RBE_PARENT(RBE_LEFT(old)) = rbe; + if (RBE_RIGHT(old)) + RBE_PARENT(RBE_RIGHT(old)) = rbe; + + if (t->t_augment != NULL && parent != NULL) { + tmp = parent; + do { + rbe_augment(t, tmp); + tmp = RBE_PARENT(tmp); + } while (tmp != NULL); + } + + goto color; + } + + parent = RBE_PARENT(rbe); + color = RBE_COLOR(rbe); + + if (child != NULL) + RBE_PARENT(child) = parent; + if (parent != NULL) { + if (RBE_LEFT(parent) == rbe) + RBE_LEFT(parent) = child; + else + RBE_RIGHT(parent) = child; + + rbe_if_augment(t, parent); + } else + RBH_ROOT(rbt) = child; +color: + if (color == RB_BLACK) + rbe_remove_color(t, rbt, parent, child); + + return (old); +} + +void * +_rb_remove(const struct rb_type *t, struct rb_tree *rbt, void *elm) +{ + struct rb_entry *rbe = rb_n2e(t, elm); + struct rb_entry *old; + + old = rbe_remove(t, rbt, rbe); + + return (old == NULL ? NULL : rb_e2n(t, old)); +} + +void * +_rb_insert(const struct rb_type *t, struct rb_tree *rbt, void *elm) +{ + struct rb_entry *rbe = rb_n2e(t, elm); + struct rb_entry *tmp; + struct rb_entry *parent = NULL; + void *node; + int comp = 0; + + tmp = RBH_ROOT(rbt); + while (tmp != NULL) { + parent = tmp; + + node = rb_e2n(t, tmp); + comp = (*t->t_compare)(elm, node); + if (comp < 0) + tmp = RBE_LEFT(tmp); + else if (comp > 0) + tmp = RBE_RIGHT(tmp); + else + return (node); + } + + rbe_set(rbe, parent); + + if (parent != NULL) { + if (comp < 0) + RBE_LEFT(parent) = rbe; + else + RBE_RIGHT(parent) = rbe; + + rbe_if_augment(t, parent); + } else + RBH_ROOT(rbt) = rbe; + + rbe_insert_color(t, rbt, rbe); + + return (NULL); +} + +/* Finds the node with the same key as elm */ +void * +_rb_find(const struct rb_type *t, struct rb_tree *rbt, const void *key) +{ + struct rb_entry *tmp = RBH_ROOT(rbt); + void *node; + int comp; + + while (tmp != NULL) { + node = rb_e2n(t, tmp); + comp = (*t->t_compare)(key, node); + if (comp < 0) + tmp = RBE_LEFT(tmp); + else if (comp > 0) + tmp = RBE_RIGHT(tmp); + else + return (node); + } + + return (NULL); +} + +/* Finds the first node greater than or equal to the search key */ \ +void * +_rb_nfind(const struct rb_type *t, struct rb_tree *rbt, const void *key) +{ + struct rb_entry *tmp = RBH_ROOT(rbt); + void *node; + void *res = NULL; + int comp; + + while (tmp != NULL) { + node = rb_e2n(t, tmp); + comp = (*t->t_compare)(key, node); + if (comp < 0) { + res = node; + tmp = RBE_LEFT(tmp); + } else if (comp > 0) + tmp = RBE_RIGHT(tmp); + else + return (node); + } + + return (res); +} + +void * +_rb_next(const struct rb_type *t, void *elm) +{ + struct rb_entry *rbe = rb_n2e(t, elm); + + if (RBE_RIGHT(rbe) != NULL) { + rbe = RBE_RIGHT(rbe); + while (RBE_LEFT(rbe) != NULL) + rbe = RBE_LEFT(rbe); + } else { + if (RBE_PARENT(rbe) && + (rbe == RBE_LEFT(RBE_PARENT(rbe)))) + rbe = RBE_PARENT(rbe); + else { + while (RBE_PARENT(rbe) && + (rbe == RBE_RIGHT(RBE_PARENT(rbe)))) + rbe = RBE_PARENT(rbe); + rbe = RBE_PARENT(rbe); + } + } + + return (rbe == NULL ? NULL : rb_e2n(t, rbe)); +} + +void * +_rb_prev(const struct rb_type *t, void *elm) +{ + struct rb_entry *rbe = rb_n2e(t, elm); + + if (RBE_LEFT(rbe)) { + rbe = RBE_LEFT(rbe); + while (RBE_RIGHT(rbe)) + rbe = RBE_RIGHT(rbe); + } else { + if (RBE_PARENT(rbe) && + (rbe == RBE_RIGHT(RBE_PARENT(rbe)))) + rbe = RBE_PARENT(rbe); + else { + while (RBE_PARENT(rbe) && + (rbe == RBE_LEFT(RBE_PARENT(rbe)))) + rbe = RBE_PARENT(rbe); + rbe = RBE_PARENT(rbe); + } + } + + return (rbe == NULL ? NULL : rb_e2n(t, rbe)); +} + +void * +_rb_root(const struct rb_type *t, struct rb_tree *rbt) +{ + struct rb_entry *rbe = RBH_ROOT(rbt); + + return (rbe == NULL ? rbe : rb_e2n(t, rbe)); +} + +void * +_rb_min(const struct rb_type *t, struct rb_tree *rbt) +{ + struct rb_entry *rbe = RBH_ROOT(rbt); + struct rb_entry *parent = NULL; + + while (rbe != NULL) { + parent = rbe; + rbe = RBE_LEFT(rbe); + } + + return (parent == NULL ? NULL : rb_e2n(t, parent)); +} + +void * +_rb_max(const struct rb_type *t, struct rb_tree *rbt) +{ + struct rb_entry *rbe = RBH_ROOT(rbt); + struct rb_entry *parent = NULL; + + while (rbe != NULL) { + parent = rbe; + rbe = RBE_RIGHT(rbe); + } + + return (parent == NULL ? NULL : rb_e2n(t, parent)); +} + +void * +_rb_left(const struct rb_type *t, void *node) +{ + struct rb_entry *rbe = rb_n2e(t, node); + rbe = RBE_LEFT(rbe); + return (rbe == NULL ? NULL : rb_e2n(t, rbe)); +} + +void * +_rb_right(const struct rb_type *t, void *node) +{ + struct rb_entry *rbe = rb_n2e(t, node); + rbe = RBE_RIGHT(rbe); + return (rbe == NULL ? NULL : rb_e2n(t, rbe)); +} + +void * +_rb_parent(const struct rb_type *t, void *node) +{ + struct rb_entry *rbe = rb_n2e(t, node); + rbe = RBE_PARENT(rbe); + return (rbe == NULL ? NULL : rb_e2n(t, rbe)); +} + Index: sys/kern/subr_hibernate.c =================================================================== RCS file: /cvs/src/sys/kern/subr_hibernate.c,v retrieving revision 1.116 diff -u -p -r1.116 subr_hibernate.c --- sys/kern/subr_hibernate.c 4 May 2015 02:18:05 -0000 1.116 +++ sys/kern/subr_hibernate.c 12 Aug 2016 07:06:58 -0000 @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -118,7 +118,7 @@ int hibernate_write_rle(union hibernate_ struct hiballoc_entry { size_t hibe_use; size_t hibe_space; - RB_ENTRY(hiballoc_entry) hibe_entry; + RBT_ENTRY(hiballoc_entry) hibe_entry; }; /* @@ -154,12 +154,12 @@ hibernate_sort_ranges(union hibernate_in * we just compare the hiballoc_entry pointers. */ static __inline int -hibe_cmp(struct hiballoc_entry *l, struct hiballoc_entry *r) +hibe_cmp(const struct hiballoc_entry *l, const struct hiballoc_entry *r) { return l < r ? -1 : (l > r); } -RB_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) +RBT_PROTOTYPE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp); /* * Given a hiballoc entry, return the address it manages. @@ -187,7 +187,7 @@ hib_addr_to_entry(void *addr_param) return (struct hiballoc_entry*)addr; } -RB_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp) +RBT_GENERATE(hiballoc_addr, hiballoc_entry, hibe_entry, hibe_cmp); /* * Allocate memory from the arena. @@ -217,9 +217,9 @@ hib_alloc(struct hiballoc_arena *arena, * a sufficiently large space. */ find_sz = alloc_sz + HIB_SIZEOF(struct hiballoc_entry); - entry = RB_ROOT(&arena->hib_addrs); + entry = RBT_ROOT(hiballoc_addr, &arena->hib_addrs); if (entry != NULL && entry->hibe_space < find_sz) { - RB_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) { + RBT_FOREACH_REVERSE(entry, hiballoc_addr, &arena->hib_addrs) { if (entry->hibe_space >= find_sz) break; } @@ -242,7 +242,7 @@ hib_alloc(struct hiballoc_arena *arena, /* * Insert entry. */ - if (RB_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL) + if (RBT_INSERT(hiballoc_addr, &arena->hib_addrs, new_entry) != NULL) panic("hib_alloc: insert failure"); entry->hibe_space = 0; @@ -277,7 +277,7 @@ hib_free(struct hiballoc_arena *arena, v * Derive entry from addr and check it is really in this arena. */ entry = hib_addr_to_entry(addr); - if (RB_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry) + if (RBT_FIND(hiballoc_addr, &arena->hib_addrs, entry) != entry) panic("hib_free: freed item %p not in hib arena", addr); /* @@ -286,12 +286,12 @@ hib_free(struct hiballoc_arena *arena, v * If entry has no predecessor, change its used space into free space * instead. */ - prev = RB_PREV(hiballoc_addr, &arena->hib_addrs, entry); + prev = RBT_PREV(hiballoc_addr, entry); if (prev != NULL && (void *)((caddr_t)prev + HIB_SIZEOF(struct hiballoc_entry) + prev->hibe_use + prev->hibe_space) == entry) { /* Merge entry. */ - RB_REMOVE(hiballoc_addr, &arena->hib_addrs, entry); + RBT_REMOVE(hiballoc_addr, &arena->hib_addrs, entry); prev->hibe_space += HIB_SIZEOF(struct hiballoc_entry) + entry->hibe_use + entry->hibe_space; } else { @@ -313,7 +313,7 @@ hiballoc_init(struct hiballoc_arena *are caddr_t ptr; size_t len; - RB_INIT(&arena->hib_addrs); + RBT_INIT(hiballoc_addr, &arena->hib_addrs); /* * Hib allocator enforces HIB_ALIGN alignment. @@ -335,7 +335,7 @@ hiballoc_init(struct hiballoc_arena *are entry = (struct hiballoc_entry*)ptr; entry->hibe_use = 0; entry->hibe_space = len - HIB_SIZEOF(struct hiballoc_entry); - RB_INSERT(hiballoc_addr, &arena->hib_addrs, entry); + RBT_INSERT(hiballoc_addr, &arena->hib_addrs, entry); return 0; } @@ -363,8 +363,8 @@ uvm_pmr_zero_everything(void) } /* Zero multi page ranges. */ - while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_DIRTY])) - != NULL) { + while ((pg = RBT_ROOT(uvm_pmr_size, + &pmr->size[UVM_PMR_MEMTYPE_DIRTY])) != NULL) { pg--; /* Size tree always has second page. */ uvm_pmr_remove(pmr, pg); for (i = 0; i < pg->fpgsz; i++) { @@ -402,8 +402,8 @@ uvm_pmr_dirty_everything(void) } /* Dirty multi page ranges. */ - while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZERO])) - != NULL) { + while ((pg = RBT_ROOT(uvm_pmr_size, + &pmr->size[UVM_PMR_MEMTYPE_ZERO])) != NULL) { pg--; /* Size tree always has second page. */ uvm_pmr_remove(pmr, pg); for (i = 0; i < pg->fpgsz; i++) Index: sys/kern/subr_pool.c =================================================================== RCS file: /cvs/src/sys/kern/subr_pool.c,v retrieving revision 1.194 diff -u -p -r1.194 subr_pool.c --- sys/kern/subr_pool.c 15 Jan 2016 11:21:58 -0000 1.194 +++ sys/kern/subr_pool.c 12 Aug 2016 07:06:58 -0000 @@ -79,7 +79,7 @@ struct pool_item_header { TAILQ_ENTRY(pool_item_header) ph_pagelist; /* pool page list */ XSIMPLEQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ - RB_ENTRY(pool_item_header) + RBT_ENTRY(pool_item_header) ph_node; /* Off-page page headers */ int ph_nmissing; /* # of chunks in use */ caddr_t ph_page; /* this page's address */ @@ -165,11 +165,13 @@ struct task pool_gc_task = TASK_INITIALI int pool_wait_free = 1; int pool_wait_gc = 8; +RBT_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); + static inline int -phtree_compare(struct pool_item_header *a, struct pool_item_header *b) +phtree_compare(const void *a, const void *b) { - vaddr_t va = (vaddr_t)a->ph_page; - vaddr_t vb = (vaddr_t)b->ph_page; + vaddr_t va = (vaddr_t)((struct pool_item_header *)a)->ph_page; + vaddr_t vb = (vaddr_t)((struct pool_item_header *)b)->ph_page; /* the compares in this order are important for the NFIND to work */ if (vb < va) @@ -180,8 +182,7 @@ phtree_compare(struct pool_item_header * return (0); } -RB_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); -RB_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); +RBT_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); /* * Return the pool page header based on page address. @@ -200,7 +201,7 @@ pr_find_pagehead(struct pool *pp, void * } key.ph_page = v; - ph = RB_NFIND(phtree, &pp->pr_phtree, &key); + ph = RBT_NFIND(phtree, &pp->pr_phtree, &key); if (ph == NULL) panic("%s: %s: page header missing", __func__, pp->pr_wchan); @@ -292,7 +293,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; - RB_INIT(&pp->pr_phtree); + RBT_INIT(phtree, &pp->pr_phtree); /* * Use the space between the chunks and the page header @@ -847,7 +848,7 @@ pool_p_insert(struct pool *pp, struct po TAILQ_INSERT_TAIL(&pp->pr_emptypages, ph, ph_pagelist); if (!POOL_INPGHDR(pp)) - RB_INSERT(phtree, &pp->pr_phtree, ph); + RBT_INSERT(phtree, &pp->pr_phtree, ph); pp->pr_nitems += pp->pr_itemsperpage; pp->pr_nidle++; @@ -868,7 +869,7 @@ pool_p_remove(struct pool *pp, struct po pp->pr_nitems -= pp->pr_itemsperpage; if (!POOL_INPGHDR(pp)) - RB_REMOVE(phtree, &pp->pr_phtree, ph); + RBT_REMOVE(phtree, &pp->pr_phtree, ph); TAILQ_REMOVE(&pp->pr_emptypages, ph, ph_pagelist); pool_update_curpage(pp); Index: sys/kern/vfs_bio.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_bio.c,v retrieving revision 1.175 diff -u -p -r1.175 vfs_bio.c --- sys/kern/vfs_bio.c 7 Jun 2016 01:31:54 -0000 1.175 +++ sys/kern/vfs_bio.c 12 Aug 2016 07:06:58 -0000 @@ -248,7 +248,7 @@ bufadjust(int newbufpages) (bcstats.numbufpages > targetpages)) { bufcache_take(bp); if (bp->b_vp) { - RB_REMOVE(buf_rb_bufs, + RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); brelvp(bp); } @@ -766,8 +766,7 @@ brelse(struct buf *bp) } if (bp->b_vp) { - RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, - bp); + RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); brelvp(bp); } bp->b_vp = NULL; @@ -834,7 +833,7 @@ incore(struct vnode *vp, daddr_t blkno) /* Search buf lookup tree */ b.b_lblkno = blkno; - bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b); + bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b); if (bp != NULL && ISSET(bp->b_flags, B_INVAL)) bp = NULL; @@ -870,7 +869,7 @@ getblk(struct vnode *vp, daddr_t blkno, start: s = splbio(); b.b_lblkno = blkno; - bp = RB_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b); + bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b); if (bp != NULL) { if (ISSET(bp->b_flags, B_BUSY)) { SET(bp->b_flags, B_WANTED); @@ -953,7 +952,7 @@ buf_get(struct vnode *vp, daddr_t blkno, (bp = bufcache_getanycleanbuf())) { bufcache_take(bp); if (bp->b_vp) { - RB_REMOVE(buf_rb_bufs, + RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); brelvp(bp); } @@ -1014,7 +1013,7 @@ buf_get(struct vnode *vp, daddr_t blkno, bp->b_blkno = bp->b_lblkno = blkno; bgetvp(vp, bp); - if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp)) + if (RBT_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp)) panic("buf_get: dup lblk vp %p bp %p", vp, bp); } else { bp->b_vnbufs.le_next = NOLIST; @@ -1513,7 +1512,7 @@ hibernate_suspend_bufcache(void) while ((bp = bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES - 1, 1))) { bufcache_take(bp); if (bp->b_vp) { - RB_REMOVE(buf_rb_bufs, + RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp); brelvp(bp); } Index: sys/kern/vfs_cache.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_cache.c,v retrieving revision 1.49 diff -u -p -r1.49 vfs_cache.c --- sys/kern/vfs_cache.c 19 Mar 2016 12:04:15 -0000 1.49 +++ sys/kern/vfs_cache.c 12 Aug 2016 07:06:58 -0000 @@ -73,8 +73,8 @@ struct pool nch_pool; void cache_zap(struct namecache *); u_long nextvnodeid; -static int -namecache_compare(struct namecache *n1, struct namecache *n2) +static inline int +namecache_compare(const struct namecache *n1, const struct namecache *n2) { if (n1->nc_nlen == n2->nc_nlen) return (memcmp(n1->nc_name, n2->nc_name, n1->nc_nlen)); @@ -82,7 +82,14 @@ namecache_compare(struct namecache *n1, return (n1->nc_nlen - n2->nc_nlen); } -RB_GENERATE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); +RBT_PROTOTYPE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); +RBT_GENERATE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); + +void +namecache_rb_tree_init(struct namecache_rb_cache *nc_cache) +{ + RBT_INIT(namecache_rb_cache, nc_cache); +} /* * blow away a namecache entry @@ -100,8 +107,8 @@ cache_zap(struct namecache *ncp) numneg--; } if (ncp->nc_dvp) { - RB_REMOVE(namecache_rb_cache, &ncp->nc_dvp->v_nc_tree, ncp); - if (RB_EMPTY(&ncp->nc_dvp->v_nc_tree)) + RBT_REMOVE(namecache_rb_cache, &ncp->nc_dvp->v_nc_tree, ncp); + if (RBT_EMPTY(namecache_rb_cache, &ncp->nc_dvp->v_nc_tree)) dvp = ncp->nc_dvp; } if (ncp->nc_vp && (ncp->nc_vpid == ncp->nc_vp->v_id)) { @@ -157,7 +164,7 @@ cache_lookup(struct vnode *dvp, struct v /* lookup in directory vnode's redblack tree */ n.nc_nlen = cnp->cn_namelen; memcpy(n.nc_name, cnp->cn_nameptr, n.nc_nlen); - ncp = RB_FIND(namecache_rb_cache, &dvp->v_nc_tree, &n); + ncp = RBT_FIND(namecache_rb_cache, &dvp->v_nc_tree, &n); if (ncp == NULL) { nchstats.ncs_miss++; @@ -368,10 +375,10 @@ cache_enter(struct vnode *dvp, struct vn ncp->nc_dvpid = dvp->v_id; ncp->nc_nlen = cnp->cn_namelen; memcpy(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen); - if (RB_EMPTY(&dvp->v_nc_tree)) { + if (RBT_EMPTY(namecache_rb_cache, &dvp->v_nc_tree)) { vhold(dvp); } - if ((lncp = RB_INSERT(namecache_rb_cache, &dvp->v_nc_tree, ncp)) + if ((lncp = RBT_INSERT(namecache_rb_cache, &dvp->v_nc_tree, ncp)) != NULL) { /* someone has raced us and added a different entry * for the same vnode (different ncp) - we don't need @@ -435,7 +442,7 @@ cache_purge(struct vnode *vp) while ((ncp = TAILQ_FIRST(&vp->v_cache_dst))) cache_zap(ncp); - while ((ncp = RB_ROOT(&vp->v_nc_tree))) + while ((ncp = RBT_ROOT(namecache_rb_cache, &vp->v_nc_tree))) cache_zap(ncp); /* XXX this blows goats */ Index: sys/kern/vfs_subr.c =================================================================== RCS file: /cvs/src/sys/kern/vfs_subr.c,v retrieving revision 1.249 diff -u -p -r1.249 vfs_subr.c --- sys/kern/vfs_subr.c 22 Jul 2016 09:54:09 -0000 1.249 +++ sys/kern/vfs_subr.c 12 Aug 2016 07:06:58 -0000 @@ -62,7 +62,7 @@ #include #include #include -#include +#include #include #include @@ -122,11 +122,8 @@ void printlockedvnodes(void); struct pool vnode_pool; struct pool uvm_vnode_pool; -static int rb_buf_compare(struct buf *b1, struct buf *b2); -RB_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); - -static int -rb_buf_compare(struct buf *b1, struct buf *b2) +static inline int +rb_buf_compare(const struct buf *b1, const struct buf *b2) { if (b1->b_lblkno < b2->b_lblkno) return(-1); @@ -134,6 +131,7 @@ rb_buf_compare(struct buf *b1, struct bu return(1); return(0); } +RBT_GENERATE(buf_rb_bufs, buf, b_rbbufs, rb_buf_compare); /* * Initialize the vnode management data structures. @@ -375,8 +373,8 @@ getnewvnode(enum vtagtype tag, struct mo vp = pool_get(&vnode_pool, PR_WAITOK | PR_ZERO); vp->v_uvm = pool_get(&uvm_vnode_pool, PR_WAITOK | PR_ZERO); vp->v_uvm->u_vnode = vp; - RB_INIT(&vp->v_bufs_tree); - RB_INIT(&vp->v_nc_tree); + RBT_INIT(buf_rb_bufs, &vp->v_bufs_tree); + namecache_rb_tree_init(&vp->v_nc_tree); TAILQ_INIT(&vp->v_cache_dst); numvnodes++; } else { Index: sys/arch/amd64/amd64/pmap.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/pmap.c,v retrieving revision 1.99 diff -u -p -r1.99 pmap.c --- sys/arch/amd64/amd64/pmap.c 7 Jun 2016 06:23:19 -0000 1.99 +++ sys/arch/amd64/amd64/pmap.c 12 Aug 2016 07:06:58 -0000 @@ -891,7 +891,7 @@ pmap_freepage(struct pmap *pmap, struct obj = &pmap->pm_obj[lidx]; pmap->pm_stats.resident_count--; if (pmap->pm_ptphint[lidx] == ptp) - pmap->pm_ptphint[lidx] = RB_ROOT(&obj->memt); + pmap->pm_ptphint[lidx] = RBT_ROOT(uvm_objtree, &obj->memt); ptp->wire_count = 0; uvm_pagerealloc(ptp, NULL, 0); TAILQ_INSERT_TAIL(pagelist, ptp, pageq); @@ -1143,7 +1143,8 @@ pmap_destroy(struct pmap *pmap) */ for (i = 0; i < PTP_LEVELS - 1; i++) { - while ((pg = RB_ROOT(&pmap->pm_obj[i].memt)) != NULL) { + while ((pg = RBT_ROOT(uvm_objtree, + &pmap->pm_obj[i].memt)) != NULL) { KASSERT((pg->pg_flags & PG_BUSY) == 0); pg->wire_count = 0; Index: sys/conf/files =================================================================== RCS file: /cvs/src/sys/conf/files,v retrieving revision 1.623 diff -u -p -r1.623 files --- sys/conf/files 11 Aug 2016 09:30:57 -0000 1.623 +++ sys/conf/files 12 Aug 2016 07:06:58 -0000 @@ -689,6 +689,7 @@ file kern/subr_hibernate.c hibernate file kern/subr_log.c file kern/subr_poison.c diagnostic file kern/subr_pool.c +file kern/subr_rbtree.c file kern/dma_alloc.c file kern/subr_prf.c file kern/subr_prof.c Index: sys/net/if_pfsync.c =================================================================== RCS file: /cvs/src/sys/net/if_pfsync.c,v retrieving revision 1.229 diff -u -p -r1.229 if_pfsync.c --- sys/net/if_pfsync.c 29 Apr 2016 08:55:03 -0000 1.229 +++ sys/net/if_pfsync.c 12 Aug 2016 07:06:58 -0000 @@ -749,8 +749,8 @@ pfsync_in_clr(caddr_t buf, int len, int (kif = pfi_kif_find(clr->ifname)) == NULL) continue; - for (st = RB_MIN(pf_state_tree_id, &tree_id); st; st = nexts) { - nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); + for (st = RBT_MIN(pf_state_tree_id, &tree_id); st; st = nexts) { + nexts = RBT_NEXT(pf_state_tree_id, st); if (st->creatorid == creatorid && ((kif && st->kif == kif) || !kif)) { SET(st->state_flags, PFSTATE_NOSYNC); Index: sys/net/if_pppx.c =================================================================== RCS file: /cvs/src/sys/net/if_pppx.c,v retrieving revision 1.51 diff -u -p -r1.51 if_pppx.c --- sys/net/if_pppx.c 13 Apr 2016 11:41:15 -0000 1.51 +++ sys/net/if_pppx.c 12 Aug 2016 07:06:58 -0000 @@ -139,12 +139,10 @@ struct pppx_if_key { int pxik_protocol; }; -int pppx_if_cmp(struct pppx_if *, struct pppx_if *); - struct pppx_if { struct pppx_if_key pxi_key; /* must be first in the struct */ - RB_ENTRY(pppx_if) pxi_entry; + RBT_ENTRY(pppx_if) pxi_entry; LIST_ENTRY(pppx_if) pxi_list; int pxi_unit; @@ -154,9 +152,15 @@ struct pppx_if { struct pipex_iface_context pxi_ifcontext; }; +static inline int +pppx_if_cmp(const struct pppx_if *a, const struct pppx_if *b) +{ + return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); +} + struct rwlock pppx_ifs_lk = RWLOCK_INITIALIZER("pppxifs"); -RB_HEAD(pppx_ifs, pppx_if) pppx_ifs = RB_INITIALIZER(&pppx_ifs); -RB_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); +RBT_HEAD(pppx_ifs, pppx_if) pppx_ifs = RBT_INITIALIZER(&pppx_ifs); +RBT_PROTOTYPE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); int pppx_if_next_unit(void); struct pppx_if *pppx_if_find(struct pppx_dev *, int, int); @@ -607,12 +611,6 @@ pppxclose(dev_t dev, int flags, int mode } int -pppx_if_cmp(struct pppx_if *a, struct pppx_if *b) -{ - return memcmp(&a->pxi_key, &b->pxi_key, sizeof(a->pxi_key)); -} - -int pppx_if_next_unit(void) { struct pppx_if *pxi; @@ -623,7 +621,7 @@ pppx_if_next_unit(void) /* this is safe without splnet since we're not modifying it */ do { int found = 0; - RB_FOREACH(pxi, pppx_ifs, &pppx_ifs) { + RBT_FOREACH(pxi, pppx_ifs, &pppx_ifs) { if (pxi->pxi_unit == unit) { found = 1; break; @@ -648,7 +646,7 @@ pppx_if_find(struct pppx_dev *pxd, int s s->pxi_key.pxik_protocol = protocol; rw_enter_read(&pppx_ifs_lk); - p = RB_FIND(pppx_ifs, &pppx_ifs, s); + p = RBT_FIND(pppx_ifs, &pppx_ifs, s); rw_exit_read(&pppx_ifs_lk); free(s, M_DEVBUF, 0); @@ -829,7 +827,7 @@ pppx_add_session(struct pppx_dev *pxd, s pxi->pxi_dev = pxd; /* this is safe without splnet since we're not modifying it */ - if (RB_FIND(pppx_ifs, &pppx_ifs, pxi) != NULL) { + if (RBT_FIND(pppx_ifs, &pppx_ifs, pxi) != NULL) { pool_put(pppx_if_pl, pxi); error = EADDRINUSE; goto out; @@ -875,7 +873,7 @@ pppx_add_session(struct pppx_dev *pxd, s #endif SET(ifp->if_flags, IFF_RUNNING); - if (RB_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) + if (RBT_INSERT(pppx_ifs, &pppx_ifs, pxi) != NULL) panic("pppx_ifs modified while lock was held"); LIST_INSERT_HEAD(&pxd->pxd_pxis, pxi, pxi_list); @@ -983,7 +981,7 @@ pppx_if_destroy(struct pppx_dev *pxd, st if_detach(ifp); rw_enter_write(&pppx_ifs_lk); - if (RB_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) + if (RBT_REMOVE(pppx_ifs, &pppx_ifs, pxi) == NULL) panic("pppx_ifs modified while lock was held"); LIST_REMOVE(pxi, pxi_list); rw_exit_write(&pppx_ifs_lk); @@ -1127,4 +1125,4 @@ pppx_if_ioctl(struct ifnet *ifp, u_long return (error); } -RB_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); +RBT_GENERATE(pppx_ifs, pppx_if, pxi_entry, pppx_if_cmp); Index: sys/net/pf.c =================================================================== RCS file: /cvs/src/sys/net/pf.c,v retrieving revision 1.979 diff -u -p -r1.979 pf.c --- sys/net/pf.c 18 Jul 2016 13:17:44 -0000 1.979 +++ sys/net/pf.c 12 Aug 2016 07:06:58 -0000 @@ -288,24 +288,25 @@ struct pf_pool_limit pf_pool_limits[PF_L mrm->r->states_cur++; \ } while (0) -static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); -static __inline int pf_state_compare_key(struct pf_state_key *, - struct pf_state_key *); -static __inline int pf_state_compare_id(struct pf_state *, - struct pf_state *); +static __inline int pf_src_compare(const struct pf_src_node *, + const struct pf_src_node *); +static __inline int pf_state_compare_key(const struct pf_state_key *, + const struct pf_state_key *); +static __inline int pf_state_compare_id(const struct pf_state *, + const struct pf_state *); struct pf_src_tree tree_src_tracking; struct pf_state_tree_id tree_id; struct pf_state_queue state_list; -RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); -RB_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); -RB_GENERATE(pf_state_tree_id, pf_state, - entry_id, pf_state_compare_id); +RBT_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare); +RBT_GENERATE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); +RBT_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id); __inline int -pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af) +pf_addr_compare(const struct pf_addr *a, const struct pf_addr *b, + sa_family_t af) { switch (af) { case AF_INET: @@ -339,7 +340,7 @@ pf_addr_compare(struct pf_addr *a, struc } static __inline int -pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) +pf_src_compare(const struct pf_src_node *a, const struct pf_src_node *b) { int diff; @@ -470,7 +471,7 @@ pf_src_connlimit(struct pf_state **state struct pf_state *st; pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; - RB_FOREACH(st, pf_state_tree_id, &tree_id) { + RBT_FOREACH(st, pf_state_tree_id, &tree_id) { sk = st->key[PF_SK_WIRE]; /* * Kill states from this source. (Only those @@ -518,7 +519,7 @@ pf_insert_src_node(struct pf_src_node ** PF_ACPY(&k.addr, src, af); k.rule.ptr = rule; pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; - *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); + *sn = RBT_FIND(pf_src_tree, &tree_src_tracking, &k); } if (*sn == NULL) { if (!rule->max_src_nodes || @@ -539,7 +540,7 @@ pf_insert_src_node(struct pf_src_node ** PF_ACPY(&(*sn)->addr, src, af); if (raddr) PF_ACPY(&(*sn)->raddr, raddr, af); - if (RB_INSERT(pf_src_tree, + if (RBT_INSERT(pf_src_tree, &tree_src_tracking, *sn) != NULL) { if (pf_status.debug >= LOG_NOTICE) { log(LOG_NOTICE, @@ -574,7 +575,7 @@ pf_remove_src_node(struct pf_src_node *s if (sn->rule.ptr->states_cur == 0 && sn->rule.ptr->src_nodes == 0) pf_rm_rule(NULL, sn->rule.ptr); - RB_REMOVE(pf_src_tree, &tree_src_tracking, sn); + RBT_REMOVE(pf_src_tree, &tree_src_tracking, sn); pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; pf_status.src_nodes--; pool_put(&pf_src_tree_pl, sn); @@ -615,7 +616,7 @@ pf_state_rm_src_node(struct pf_state *s, /* state table stuff */ static __inline int -pf_state_compare_key(struct pf_state_key *a, struct pf_state_key *b) +pf_state_compare_key(const struct pf_state_key *a, const struct pf_state_key *b) { int diff; @@ -637,7 +638,7 @@ pf_state_compare_key(struct pf_state_key } static __inline int -pf_state_compare_id(struct pf_state *a, struct pf_state *b) +pf_state_compare_id(const struct pf_state *a, const struct pf_state *b) { if (a->id > b->id) return (1); @@ -659,7 +660,7 @@ pf_state_key_attach(struct pf_state_key struct pf_state *olds = NULL; KASSERT(s->key[idx] == NULL); - if ((cur = RB_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { + if ((cur = RBT_INSERT(pf_state_tree, &pf_statetbl, sk)) != NULL) { /* key exists. check for same kif, if none, add to key */ TAILQ_FOREACH(si, &cur->states, entry) if (si->s->kif == s->kif && @@ -758,7 +759,7 @@ pf_state_key_detach(struct pf_state *s, sk = s->key[idx]; s->key[idx] = NULL; if (TAILQ_EMPTY(&sk->states)) { - RB_REMOVE(pf_state_tree, &pf_statetbl, sk); + RBT_REMOVE(pf_state_tree, &pf_statetbl, sk); sk->removed = 1; pf_state_key_unlink_reverse(sk); pf_inpcb_unlink_state_key(sk->inp); @@ -934,7 +935,7 @@ pf_state_insert(struct pfi_kif *kif, str s->id = htobe64(pf_status.stateid++); s->creatorid = pf_status.hostid; } - if (RB_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { + if (RBT_INSERT(pf_state_tree_id, &tree_id, s) != NULL) { if (pf_status.debug >= LOG_NOTICE) { log(LOG_NOTICE, "pf: state insert failed: " "id: %016llx creatorid: %08x", @@ -959,7 +960,7 @@ pf_find_state_byid(struct pf_state_cmp * { pf_status.fcounters[FCNT_STATE_SEARCH]++; - return (RB_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); + return (RBT_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key)); } int @@ -1038,7 +1039,7 @@ pf_find_state(struct pfi_kif *kif, struc } if (sk == NULL) { - if ((sk = RB_FIND(pf_state_tree, &pf_statetbl, + if ((sk = RBT_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key)) == NULL) return (NULL); if (dir == PF_OUT && pkt_sk && @@ -1074,7 +1075,7 @@ pf_find_state_all(struct pf_state_key_cm pf_status.fcounters[FCNT_STATE_SEARCH]++; - sk = RB_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); + sk = RBT_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key); if (sk != NULL) { TAILQ_FOREACH(si, &sk->states, entry) @@ -1235,14 +1236,13 @@ pf_purge_expired_src_nodes(int waslocked struct pf_src_node *cur, *next; int locked = waslocked; - for (cur = RB_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { - next = RB_NEXT(pf_src_tree, &tree_src_tracking, cur); + for (cur = RBT_MIN(pf_src_tree, &tree_src_tracking); cur; cur = next) { + next = RBT_NEXT(pf_src_tree, cur); if (cur->states == 0 && cur->expire <= time_uptime) { if (! locked) { rw_enter_write(&pf_consistency_lock); - next = RB_NEXT(pf_src_tree, - &tree_src_tracking, cur); + next = RBT_NEXT(pf_src_tree, cur); locked = 1; } pf_remove_src_node(cur); @@ -1293,7 +1293,7 @@ pf_remove_state(struct pf_state *cur) TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, cur->key[PF_SK_WIRE]->rdomain); } - RB_REMOVE(pf_state_tree_id, &tree_id, cur); + RBT_REMOVE(pf_state_tree_id, &tree_id, cur); #if NPFLOW > 0 if (cur->state_flags & PFSTATE_PFLOW) export_pflow(cur); @@ -2672,7 +2672,7 @@ pf_step_into_anchor(int *depth, struct p f->r = *r; if ((*r)->anchor_wildcard) { f->parent = &(*r)->anchor->children; - if ((f->child = RB_MIN(pf_anchor_node, f->parent)) == NULL) { + if ((f->child = RBT_MIN(pf_anchor_node, f->parent)) == NULL) { *r = NULL; return; } @@ -2697,7 +2697,7 @@ pf_step_out_of_anchor(int *depth, struct break; f = pf_anchor_stack + *depth - 1; if (f->parent != NULL && f->child != NULL) { - f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); + f->child = RBT_NEXT(pf_anchor_node, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->rules.active.ptr); Index: sys/net/pf_if.c =================================================================== RCS file: /cvs/src/sys/net/pf_if.c,v retrieving revision 1.82 diff -u -p -r1.82 pf_if.c --- sys/net/pf_if.c 20 Nov 2015 03:35:23 -0000 1.82 +++ sys/net/pf_if.c 12 Aug 2016 07:06:58 -0000 @@ -72,12 +72,12 @@ void pfi_table_update(struct pfr_ktabl void pfi_kifaddr_update(void *); void pfi_instance_add(struct ifnet *, u_int8_t, int); void pfi_address_add(struct sockaddr *, sa_family_t, u_int8_t); -int pfi_if_compare(struct pfi_kif *, struct pfi_kif *); +int pfi_if_compare(const struct pfi_kif *, const struct pfi_kif *); int pfi_skip_if(const char *, struct pfi_kif *); int pfi_unmask(void *); -RB_PROTOTYPE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); -RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); +RBT_PROTOTYPE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); +RBT_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); #define PFI_BUFFER_MAX 0x10000 #define PFI_MTYPE M_IFADDR @@ -105,7 +105,7 @@ pfi_kif_find(const char *kif_name) bzero(&s, sizeof(s)); strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name)); - return (RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&s)); + return (RBT_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&s)); } struct pfi_kif * @@ -130,7 +130,7 @@ pfi_kif_get(const char *kif_name) kif->pfik_flags_new |= PFI_IFLAG_ANY; } - RB_INSERT(pfi_ifhead, &pfi_ifs, kif); + RBT_INSERT(pfi_ifhead, &pfi_ifs, kif); return (kif); } @@ -195,7 +195,7 @@ pfi_kif_unref(struct pfi_kif *kif, enum if (kif->pfik_rules || kif->pfik_states || kif->pfik_routes) return; - RB_REMOVE(pfi_ifhead, &pfi_ifs, kif); + RBT_REMOVE(pfi_ifhead, &pfi_ifs, kif); free(kif, PFI_MTYPE, 0); } @@ -628,7 +628,7 @@ pfi_kifaddr_update(void *v) } int -pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q) +pfi_if_compare(const struct pfi_kif *p, const struct pfi_kif *q) { return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ)); } @@ -644,7 +644,7 @@ pfi_update_status(const char *name, stru s = splsoftnet(); if (*name == '\0' && pfs == NULL) { - RB_FOREACH(p, pfi_ifhead, &pfi_ifs) { + RBT_FOREACH(p, pfi_ifhead, &pfi_ifs) { bzero(p->pfik_packets, sizeof(p->pfik_packets)); bzero(p->pfik_bytes, sizeof(p->pfik_bytes)); p->pfik_tzero = time_second; @@ -654,7 +654,7 @@ pfi_update_status(const char *name, stru } strlcpy(key.pfik_name, name, sizeof(key.pfik_name)); - p = RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&key); + p = RBT_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)&key); if (p == NULL) { splx(s); return; @@ -704,8 +704,8 @@ pfi_get_ifaces(const char *name, struct int s, n = 0; s = splsoftnet(); - for (p = RB_MIN(pfi_ifhead, &pfi_ifs); p; p = nextp) { - nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p); + for (p = RBT_MIN(pfi_ifhead, &pfi_ifs); p; p = nextp) { + nextp = RBT_NEXT(pfi_ifhead, p); if (pfi_skip_if(name, p)) continue; if (*size > n++) { @@ -717,7 +717,7 @@ pfi_get_ifaces(const char *name, struct splx(s); return (EFAULT); } - nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p); + nextp = RBT_NEXT(pfi_ifhead, p); pfi_kif_unref(p, PFI_KIF_REF_RULE); } } @@ -755,7 +755,7 @@ pfi_set_flags(const char *name, int flag int s; s = splsoftnet(); - RB_FOREACH(p, pfi_ifhead, &pfi_ifs) { + RBT_FOREACH(p, pfi_ifhead, &pfi_ifs) { if (pfi_skip_if(name, p)) continue; p->pfik_flags_new = p->pfik_flags | flags; @@ -771,7 +771,7 @@ pfi_clear_flags(const char *name, int fl int s; s = splsoftnet(); - RB_FOREACH(p, pfi_ifhead, &pfi_ifs) { + RBT_FOREACH(p, pfi_ifhead, &pfi_ifs) { if (pfi_skip_if(name, p)) continue; p->pfik_flags_new = p->pfik_flags & ~flags; @@ -787,7 +787,7 @@ pfi_xcommit(void) int s; s = splsoftnet(); - RB_FOREACH(p, pfi_ifhead, &pfi_ifs) + RBT_FOREACH(p, pfi_ifhead, &pfi_ifs) p->pfik_flags = p->pfik_flags_new; splx(s); } Index: sys/net/pf_ioctl.c =================================================================== RCS file: /cvs/src/sys/net/pf_ioctl.c,v retrieving revision 1.297 diff -u -p -r1.297 pf_ioctl.c --- sys/net/pf_ioctl.c 3 Dec 2015 13:30:18 -0000 1.297 +++ sys/net/pf_ioctl.c 12 Aug 2016 07:06:58 -0000 @@ -169,8 +169,8 @@ pfattach(int num) pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT_SMALL; - RB_INIT(&tree_src_tracking); - RB_INIT(&pf_anchors); + RBT_INIT(pf_src_tree, &tree_src_tracking); + RBT_INIT(pf_anchor_global, &pf_anchors); pf_init_ruleset(&pf_main_ruleset); TAILQ_INIT(&pf_queues[0]); TAILQ_INIT(&pf_queues[1]); @@ -1421,8 +1421,8 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; u_int killed = 0; - for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { - nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); + for (s = RBT_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { + nexts = RBT_NEXT(pf_state_tree_id, s); if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, s->kif->pfik_name)) { @@ -1459,9 +1459,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a break; } - for (s = RB_MIN(pf_state_tree_id, &tree_id); s; + for (s = RBT_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { - nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); + nexts = RBT_NEXT(pf_state_tree_id, s); if (s->direction == PF_OUT) { sk = s->key[PF_SK_STACK]; @@ -1757,11 +1757,11 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a pr->nr = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ - RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) + RBT_FOREACH(anchor, pf_anchor_global, &pf_anchors) if (anchor->parent == NULL) pr->nr++; } else { - RB_FOREACH(anchor, pf_anchor_node, + RBT_FOREACH(anchor, pf_anchor_node, &ruleset->anchor->children) pr->nr++; } @@ -1782,14 +1782,14 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a pr->name[0] = 0; if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ - RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) + RBT_FOREACH(anchor, pf_anchor_global, &pf_anchors) if (anchor->parent == NULL && nr++ == pr->nr) { strlcpy(pr->name, anchor->name, sizeof(pr->name)); break; } } else { - RB_FOREACH(anchor, pf_anchor_node, + RBT_FOREACH(anchor, pf_anchor_node, &ruleset->anchor->children) if (nr++ == pr->nr) { strlcpy(pr->name, anchor->name, @@ -2239,7 +2239,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a int space = psn->psn_len; if (space == 0) { - RB_FOREACH(n, pf_src_tree, &tree_src_tracking) + RBT_FOREACH(n, pf_src_tree, &tree_src_tracking) nr++; psn->psn_len = sizeof(struct pf_src_node) * nr; break; @@ -2248,7 +2248,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); p = psn->psn_src_nodes; - RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { + RBT_FOREACH(n, pf_src_tree, &tree_src_tracking) { int secs = time_uptime, diff; if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) @@ -2292,9 +2292,9 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a struct pf_src_node *n; struct pf_state *state; - RB_FOREACH(state, pf_state_tree_id, &tree_id) + RBT_FOREACH(state, pf_state_tree_id, &tree_id) pf_src_tree_remove_state(state); - RB_FOREACH(n, pf_src_tree, &tree_src_tracking) + RBT_FOREACH(n, pf_src_tree, &tree_src_tracking) n->expire = 1; pf_purge_expired_src_nodes(1); break; @@ -2307,7 +2307,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a (struct pfioc_src_node_kill *)addr; u_int killed = 0; - RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { + RBT_FOREACH(sn, pf_src_tree, &tree_src_tracking) { if (PF_MATCHA(psnk->psnk_src.neg, &psnk->psnk_src.addr.v.a.addr, &psnk->psnk_src.addr.v.a.mask, @@ -2318,7 +2318,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t a &sn->raddr, sn->af)) { /* Handle state to src_node linkage */ if (sn->states != 0) - RB_FOREACH(s, pf_state_tree_id, + RBT_FOREACH(s, pf_state_tree_id, &tree_id) pf_state_rm_src_node(s, sn); sn->expire = 1; Index: sys/net/pf_lb.c =================================================================== RCS file: /cvs/src/sys/net/pf_lb.c,v retrieving revision 1.55 diff -u -p -r1.55 pf_lb.c --- sys/net/pf_lb.c 19 Jul 2016 12:51:19 -0000 1.55 +++ sys/net/pf_lb.c 12 Aug 2016 07:06:58 -0000 @@ -275,7 +275,7 @@ pf_map_addr_sticky(sa_family_t af, struc PF_ACPY(&k.addr, saddr, af); k.rule.ptr = r; pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; - sns[type] = RB_FIND(pf_src_tree, &tree_src_tracking, &k); + sns[type] = RBT_FIND(pf_src_tree, &tree_src_tracking, &k); if (sns[type] == NULL) return (-1); @@ -307,7 +307,7 @@ pf_map_addr_sticky(sa_family_t af, struc } if (sns[type]->states != 0) { /* XXX expensive */ - RB_FOREACH(s, pf_state_tree_id, + RBT_FOREACH(s, pf_state_tree_id, &tree_id) pf_state_rm_src_node(s, sns[type]); Index: sys/net/pf_norm.c =================================================================== RCS file: /cvs/src/sys/net/pf_norm.c,v retrieving revision 1.188 diff -u -p -r1.188 pf_norm.c --- sys/net/pf_norm.c 15 Jun 2016 11:49:34 -0000 1.188 +++ sys/net/pf_norm.c 12 Aug 2016 07:06:58 -0000 @@ -74,7 +74,7 @@ struct pf_frent { u_int16_t fe_mff; /* more fragment flag */ }; -/* keep synced with struct pf_fragment, used in RB_FIND */ +/* keep synced with struct pf_fragment, used in RBT_FIND */ struct pf_fragment_cmp { struct pf_addr fr_src; struct pf_addr fr_dst; @@ -92,7 +92,7 @@ struct pf_fragment { u_int8_t fr_proto; /* protocol of this fragment */ u_int8_t fr_direction; /* pf packet direction */ - RB_ENTRY(pf_fragment) fr_entry; + RBT_ENTRY(pf_fragment) fr_entry; TAILQ_ENTRY(pf_fragment) frag_next; TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; int32_t fr_timeout; @@ -107,11 +107,11 @@ struct pf_fragment_tag { TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue; -static __inline int pf_frag_compare(struct pf_fragment *, - struct pf_fragment *); -RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree; -RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); -RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); +static __inline int pf_frag_compare(const struct pf_fragment *, + const struct pf_fragment *); +RBT_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree; +RBT_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); +RBT_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); /* Private prototypes */ void pf_flush_fragments(void); @@ -151,7 +151,7 @@ pf_normalize_init(void) } static __inline int -pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) +pf_frag_compare(const struct pf_fragment *a, const struct pf_fragment *b) { int diff; @@ -211,7 +211,7 @@ pf_free_fragment(struct pf_fragment *fra { struct pf_frent *frent; - RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag); + RBT_REMOVE(pf_frag_tree, &pf_frag_tree, frag); TAILQ_REMOVE(&pf_fragqueue, frag, frag_next); /* Free all fragment entries */ @@ -229,7 +229,7 @@ pf_find_fragment(struct pf_fragment_cmp { struct pf_fragment *frag; - frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); + frag = RBT_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); if (frag != NULL) { TAILQ_REMOVE(&pf_fragqueue, frag, frag_next); TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next); @@ -309,7 +309,7 @@ pf_fillup_fragment(struct pf_fragment_cm frag->fr_timeout = time_uptime; frag->fr_maxlen = frent->fe_len; - RB_INSERT(pf_frag_tree, &pf_frag_tree, frag); + RBT_INSERT(pf_frag_tree, &pf_frag_tree, frag); TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next); /* We do not have a previous fragment */ Index: sys/net/pf_ruleset.c =================================================================== RCS file: /cvs/src/sys/net/pf_ruleset.c,v retrieving revision 1.12 diff -u -p -r1.12 pf_ruleset.c --- sys/net/pf_ruleset.c 19 Jul 2016 13:34:12 -0000 1.12 +++ sys/net/pf_ruleset.c 12 Aug 2016 07:06:58 -0000 @@ -79,13 +79,14 @@ struct pf_anchor_global pf_anchors; struct pf_anchor pf_main_anchor; -static __inline int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *); +static __inline int pf_anchor_compare(const struct pf_anchor *, + const struct pf_anchor *); -RB_GENERATE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare); -RB_GENERATE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare); +RBT_GENERATE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare); +RBT_GENERATE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare); static __inline int -pf_anchor_compare(struct pf_anchor *a, struct pf_anchor *b) +pf_anchor_compare(const struct pf_anchor *a, const struct pf_anchor *b) { int c = strcmp(a->path, b->path); @@ -111,7 +112,7 @@ pf_find_anchor(const char *path) if (key == NULL) return (NULL); strlcpy(key->path, path, sizeof(key->path)); - found = RB_FIND(pf_anchor_global, &pf_anchors, key); + found = RBT_FIND(pf_anchor_global, &pf_anchors, key); rs_free(key); return (found); } @@ -180,7 +181,7 @@ pf_find_or_create_ruleset(const char *pa rs_free(p); return (NULL); } - RB_INIT(&anchor->children); + RBT_INIT(pf_anchor_node, &anchor->children); strlcpy(anchor->name, q, sizeof(anchor->name)); if (parent != NULL) { strlcpy(anchor->path, parent->path, @@ -188,10 +189,10 @@ pf_find_or_create_ruleset(const char *pa strlcat(anchor->path, "/", sizeof(anchor->path)); } strlcat(anchor->path, anchor->name, sizeof(anchor->path)); - if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != + if ((dup = RBT_INSERT(pf_anchor_global, &pf_anchors, anchor)) != NULL) { DPFPRINTF(LOG_NOTICE, - "pf_find_or_create_ruleset: RB_INSERT1 " + "pf_find_or_create_ruleset: RBT_INSERT1 " "'%s' '%s' collides with '%s' '%s'", anchor->path, anchor->name, dup->path, dup->name); rs_free(anchor); @@ -200,14 +201,14 @@ pf_find_or_create_ruleset(const char *pa } if (parent != NULL) { anchor->parent = parent; - if ((dup = RB_INSERT(pf_anchor_node, &parent->children, + if ((dup = RBT_INSERT(pf_anchor_node, &parent->children, anchor)) != NULL) { DPFPRINTF(LOG_NOTICE, "pf_find_or_create_ruleset: " - "RB_INSERT2 '%s' '%s' collides with " + "RBT_INSERT2 '%s' '%s' collides with " "'%s' '%s'", anchor->path, anchor->name, dup->path, dup->name); - RB_REMOVE(pf_anchor_global, &pf_anchors, + RBT_REMOVE(pf_anchor_global, &pf_anchors, anchor); rs_free(anchor); rs_free(p); @@ -233,7 +234,7 @@ pf_remove_if_empty_ruleset(struct pf_rul while (ruleset != NULL) { if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || - !RB_EMPTY(&ruleset->anchor->children) || + !RBT_EMPTY(pf_anchor_node, &ruleset->anchor->children) || ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || ruleset->topen) return; @@ -241,9 +242,9 @@ pf_remove_if_empty_ruleset(struct pf_rul !TAILQ_EMPTY(ruleset->rules.inactive.ptr) || ruleset->rules.inactive.open) return; - RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); + RBT_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); if ((parent = ruleset->anchor->parent) != NULL) - RB_REMOVE(pf_anchor_node, &parent->children, + RBT_REMOVE(pf_anchor_node, &parent->children, ruleset->anchor); rs_free(ruleset->anchor); if (parent == NULL) Index: sys/net/pf_table.c =================================================================== RCS file: /cvs/src/sys/net/pf_table.c,v retrieving revision 1.116 diff -u -p -r1.116 pf_table.c --- sys/net/pf_table.c 3 Nov 2015 22:10:33 -0000 1.116 +++ sys/net/pf_table.c 12 Aug 2016 07:06:58 -0000 @@ -177,8 +177,8 @@ struct pfr_ktable *pfr_create_ktable(str int); void pfr_destroy_ktables(struct pfr_ktableworkq *, int); void pfr_destroy_ktable(struct pfr_ktable *, int); -int pfr_ktable_compare(struct pfr_ktable *, - struct pfr_ktable *); +int pfr_ktable_compare(const struct pfr_ktable *, + const struct pfr_ktable *); void pfr_ktable_winfo_update(struct pfr_ktable *, struct pfr_kentry *); struct pfr_ktable *pfr_lookup_table(struct pfr_table *); @@ -190,8 +190,8 @@ int pfr_skip_table(struct pfr_table * struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int); int pfr_islinklocal(sa_family_t, struct pf_addr *); -RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); -RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); +RBT_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); +RBT_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); struct pfr_ktablehead pfr_ktables; struct pfr_table pfr_nulltable; @@ -1274,7 +1274,7 @@ pfr_clr_tables(struct pfr_table *filter, return (ENOENT); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR)) @@ -1312,7 +1312,7 @@ pfr_add_tables(struct pfr_table *tbl, in flags & PFR_FLAG_USERIOCTL)) senderr(EINVAL); key.pfrkt_flags |= PFR_TFLAG_ACTIVE; - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p == NULL) { p = pfr_create_ktable(&key.pfrkt_t, tzero, 1, !(flags & PFR_FLAG_USERIOCTL)); @@ -1329,7 +1329,7 @@ pfr_add_tables(struct pfr_table *tbl, in /* find or create root table */ bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); - r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + r = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (r != NULL) { p->pfrkt_root = r; goto _skip; @@ -1388,7 +1388,7 @@ pfr_del_tables(struct pfr_table *tbl, in if (pfr_validate_table(&key.pfrkt_t, 0, flags & PFR_FLAG_USERIOCTL)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { SLIST_FOREACH(q, &workq, pfrkt_workq) if (!pfr_ktable_compare(p, q)) @@ -1426,7 +1426,7 @@ pfr_get_tables(struct pfr_table *filter, *size = n; return (0); } - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (n-- <= 0) @@ -1464,7 +1464,7 @@ pfr_get_tstats(struct pfr_table *filter, return (0); } SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (pfr_skip_table(filter, p, flags)) continue; if (n-- <= 0) @@ -1505,7 +1505,7 @@ pfr_clr_tstats(struct pfr_table *tbl, in return (EFAULT); if (pfr_validate_table(&key.pfrkt_t, 0, 0)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL) { SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xzero++; @@ -1540,7 +1540,7 @@ pfr_set_tflags(struct pfr_table *tbl, in if (pfr_validate_table(&key.pfrkt_t, 0, flags & PFR_FLAG_USERIOCTL)) return (EINVAL); - p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + p = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { p->pfrkt_nflags = (p->pfrkt_flags | setflag) & ~clrflag; @@ -1583,7 +1583,7 @@ pfr_ina_begin(struct pfr_table *trs, u_i if (rs == NULL) return (ENOMEM); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1626,7 +1626,7 @@ pfr_ina_define(struct pfr_table *tbl, st return (EBUSY); tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; SLIST_INIT(&tableq); - kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); + kt = RBT_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl); if (kt == NULL) { kt = pfr_create_ktable(tbl, 0, 1, !(flags & PFR_FLAG_USERIOCTL)); @@ -1640,7 +1640,7 @@ pfr_ina_define(struct pfr_table *tbl, st /* find or create root table */ bzero(&key, sizeof(key)); strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name)); - rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); + rt = RBT_FIND(pfr_ktablehead, &pfr_ktables, &key); if (rt != NULL) { kt->pfrkt_root = rt; goto _skip; @@ -1722,7 +1722,7 @@ pfr_ina_rollback(struct pfr_table *trs, if (rs == NULL || !rs->topen || ticket != rs->tticket) return (0); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1756,7 +1756,7 @@ pfr_ina_commit(struct pfr_table *trs, u_ return (EBUSY); SLIST_INIT(&workq); - RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { + RBT_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || pfr_skip_table(trs, p, 0)) continue; @@ -1929,7 +1929,7 @@ pfr_insert_ktables(struct pfr_ktablework void pfr_insert_ktable(struct pfr_ktable *kt) { - RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); + RBT_INSERT(pfr_ktablehead, &pfr_ktables, kt); pfr_ktable_cnt++; if (kt->pfrkt_root != NULL) if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) @@ -1960,7 +1960,7 @@ pfr_setflags_ktable(struct pfr_ktable *k if (!(newf & PFR_TFLAG_ACTIVE)) newf &= ~PFR_TFLAG_USRMASK; if (!(newf & PFR_TFLAG_SETMASK)) { - RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); + RBT_REMOVE(pfr_ktablehead, &pfr_ktables, kt); if (kt->pfrkt_root != NULL) if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) pfr_setflags_ktable(kt->pfrkt_root, @@ -2083,7 +2083,7 @@ pfr_destroy_ktable(struct pfr_ktable *kt } int -pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) +pfr_ktable_compare(const struct pfr_ktable *p, const struct pfr_ktable *q) { int d; @@ -2096,7 +2096,7 @@ struct pfr_ktable * pfr_lookup_table(struct pfr_table *tbl) { /* struct pfr_ktable start like a struct pfr_table */ - return (RB_FIND(pfr_ktablehead, &pfr_ktables, + return (RBT_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl)); } Index: sys/net/pfvar.h =================================================================== RCS file: /cvs/src/sys/net/pfvar.h,v retrieving revision 1.434 diff -u -p -r1.434 pfvar.h --- sys/net/pfvar.h 19 Jul 2016 13:30:51 -0000 1.434 +++ sys/net/pfvar.h 12 Aug 2016 07:06:58 -0000 @@ -35,7 +35,7 @@ #define _NET_PFVAR_H_ #include -#include +#include #include #include #include @@ -614,7 +614,7 @@ SLIST_HEAD(pf_rule_slist, pf_rule_item); enum pf_sn_types { PF_SN_NONE, PF_SN_NAT, PF_SN_RDR, PF_SN_ROUTE, PF_SN_MAX }; struct pf_src_node { - RB_ENTRY(pf_src_node) entry; + RBT_ENTRY(pf_src_node) entry; struct pf_addr addr; struct pf_addr raddr; union pf_rule_ptr rule; @@ -677,7 +677,7 @@ struct pf_state_peer { TAILQ_HEAD(pf_state_queue, pf_state); -/* keep synced with struct pf_state_key, used in RB_FIND */ +/* keep synced with struct pf_state_key, used in RBT_FIND */ struct pf_state_key_cmp { struct pf_addr addr[2]; u_int16_t port[2]; @@ -700,7 +700,7 @@ struct pf_state_key { sa_family_t af; u_int8_t proto; - RB_ENTRY(pf_state_key) entry; + RBT_ENTRY(pf_state_key) entry; struct pf_statelisthead states; struct pf_state_key *reverse; struct inpcb *inp; @@ -711,7 +711,7 @@ struct pf_state_key { ((key[PF_SK_WIRE]->af != key[PF_SK_STACK]->af) && \ (key[PF_SK_WIRE]->af != (family))) -/* keep synced with struct pf_state, used in RB_FIND */ +/* keep synced with struct pf_state, used in RBT_FIND */ struct pf_state_cmp { u_int64_t id; u_int32_t creatorid; @@ -727,7 +727,7 @@ struct pf_state { TAILQ_ENTRY(pf_state) sync_list; TAILQ_ENTRY(pf_state) entry_list; - RB_ENTRY(pf_state) entry_id; + RBT_ENTRY(pf_state) entry_id; struct pf_state_peer src; struct pf_state_peer dst; struct pf_rule_slist match_rules; @@ -911,11 +911,11 @@ struct pf_ruleset { int topen; }; -RB_HEAD(pf_anchor_global, pf_anchor); -RB_HEAD(pf_anchor_node, pf_anchor); +RBT_HEAD(pf_anchor_global, pf_anchor); +RBT_HEAD(pf_anchor_node, pf_anchor); struct pf_anchor { - RB_ENTRY(pf_anchor) entry_global; - RB_ENTRY(pf_anchor) entry_node; + RBT_ENTRY(pf_anchor) entry_global; + RBT_ENTRY(pf_anchor) entry_node; struct pf_anchor *parent; struct pf_anchor_node children; char name[PF_ANCHOR_NAME_SIZE]; @@ -924,8 +924,8 @@ struct pf_anchor { int refcnt; /* anchor rules */ int match; }; -RB_PROTOTYPE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare) -RB_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare) +RBT_PROTOTYPE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare) +RBT_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare) #define PF_RESERVED_ANCHOR "_pf" @@ -1075,10 +1075,10 @@ struct pfr_kentry_all { #define pfrke_rkif u.kr.kif SLIST_HEAD(pfr_ktableworkq, pfr_ktable); -RB_HEAD(pfr_ktablehead, pfr_ktable); +RBT_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { struct pfr_tstats pfrkt_ts; - RB_ENTRY(pfr_ktable) pfrkt_tree; + RBT_ENTRY(pfr_ktable) pfrkt_tree; SLIST_ENTRY(pfr_ktable) pfrkt_workq; struct radix_node_head *pfrkt_ip4; struct radix_node_head *pfrkt_ip6; @@ -1104,19 +1104,19 @@ struct pfr_ktable { #define pfrkt_nomatch pfrkt_ts.pfrts_nomatch #define pfrkt_tzero pfrkt_ts.pfrts_tzero -RB_HEAD(pf_state_tree, pf_state_key); -RB_PROTOTYPE(pf_state_tree, pf_state_key, entry, pf_state_compare_key) +RBT_HEAD(pf_state_tree, pf_state_key); +RBT_PROTOTYPE(pf_state_tree, pf_state_key, entry, pf_state_compare_key); -RB_HEAD(pf_state_tree_ext_gwy, pf_state_key); -RB_PROTOTYPE(pf_state_tree_ext_gwy, pf_state_key, - entry_ext_gwy, pf_state_compare_ext_gwy) +RBT_HEAD(pf_state_tree_ext_gwy, pf_state_key); +RBT_PROTOTYPE(pf_state_tree_ext_gwy, pf_state_key, + entry_ext_gwy, pf_state_compare_ext_gwy); -RB_HEAD(pfi_ifhead, pfi_kif); +RBT_HEAD(pfi_ifhead, pfi_kif); /* state tables */ extern struct pf_state_tree pf_statetbl; -/* keep synced with pfi_kif, used in RB_FIND */ +/* keep synced with pfi_kif, used in RBT_FIND */ struct pfi_kif_cmp { char pfik_name[IFNAMSIZ]; }; @@ -1126,7 +1126,7 @@ struct ifg_group; struct pfi_kif { char pfik_name[IFNAMSIZ]; - RB_ENTRY(pfi_kif) pfik_tree; + RBT_ENTRY(pfi_kif) pfik_tree; u_int64_t pfik_packets[2][2][2]; u_int64_t pfik_bytes[2][2][2]; time_t pfik_tzero; @@ -1640,12 +1640,12 @@ struct pfioc_iface { #define DIOCGETQSTATS _IOWR('D', 96, struct pfioc_qstats) #ifdef _KERNEL -RB_HEAD(pf_src_tree, pf_src_node); -RB_PROTOTYPE(pf_src_tree, pf_src_node, entry, pf_src_compare); +RBT_HEAD(pf_src_tree, pf_src_node); +RBT_PROTOTYPE(pf_src_tree, pf_src_node, entry, pf_src_compare); extern struct pf_src_tree tree_src_tracking; -RB_HEAD(pf_state_tree_id, pf_state); -RB_PROTOTYPE(pf_state_tree_id, pf_state, +RBT_HEAD(pf_state_tree_id, pf_state); +RBT_PROTOTYPE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id); extern struct pf_state_tree_id tree_id; extern struct pf_state_queue state_list; @@ -1837,7 +1837,7 @@ void pf_tag2tagname(u_int16_t, char *) void pf_tag_ref(u_int16_t); void pf_tag_unref(u_int16_t); void pf_tag_packet(struct mbuf *, int, int); -int pf_addr_compare(struct pf_addr *, struct pf_addr *, +int pf_addr_compare(const struct pf_addr *, const struct pf_addr *, sa_family_t); extern struct pf_status pf_status; Index: sys/nfs/nfs_node.c =================================================================== RCS file: /cvs/src/sys/nfs/nfs_node.c,v retrieving revision 1.64 diff -u -p -r1.64 nfs_node.c --- sys/nfs/nfs_node.c 19 Mar 2016 12:04:16 -0000 1.64 +++ sys/nfs/nfs_node.c 12 Aug 2016 07:06:58 -0000 @@ -64,7 +64,7 @@ struct rwlock nfs_hashlock = RWLOCK_INIT extern struct vops nfs_vops; /* filehandle to node lookup. */ -static __inline int +static inline int nfsnode_cmp(const struct nfsnode *a, const struct nfsnode *b) { if (a->n_fhsize != b->n_fhsize) @@ -72,8 +72,14 @@ nfsnode_cmp(const struct nfsnode *a, con return (memcmp(a->n_fhp, b->n_fhp, a->n_fhsize)); } -RB_PROTOTYPE(nfs_nodetree, nfsnode, n_entry, nfsnode_cmp); -RB_GENERATE(nfs_nodetree, nfsnode, n_entry, nfsnode_cmp); +RBT_PROTOTYPE(nfs_nodetree, nfsnode, n_entry, nfsnode_cmp); +RBT_GENERATE(nfs_nodetree, nfsnode, n_entry, nfsnode_cmp); + +void +nfs_nodetree_init(struct nfs_nodetree *nm_tree) +{ + RBT_INIT(nfs_nodetree, nm_tree); +} /* * Look up a vnode/nfsnode by file handle and store the pointer in *npp. @@ -95,7 +101,7 @@ loop: rw_enter_write(&nfs_hashlock); find.n_fhp = fh; find.n_fhsize = fhsize; - np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); + np = RBT_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { rw_exit_write(&nfs_hashlock); vp = NFSTOV(np); @@ -124,7 +130,7 @@ loop: return (error); } nvp->v_flag |= VLARVAL; - np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); + np = RBT_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { vgone(nvp); rw_exit_write(&nfs_hashlock); @@ -153,7 +159,7 @@ loop: np->n_fhp = &np->n_fh; bcopy(fh, np->n_fhp, fhsize); np->n_fhsize = fhsize; - np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np); + np2 = RBT_INSERT(nfs_nodetree, &nmp->nm_ntree, np); KASSERT(np2 == NULL); np->n_accstamp = -1; rw_exit(&nfs_hashlock); @@ -234,7 +240,7 @@ nfs_reclaim(void *v) #endif nmp = VFSTONFS(vp->v_mount); rw_enter_write(&nfs_hashlock); - RB_REMOVE(nfs_nodetree, &nmp->nm_ntree, np); + RBT_REMOVE(nfs_nodetree, &nmp->nm_ntree, np); rw_exit_write(&nfs_hashlock); if (np->n_rcred) Index: sys/nfs/nfs_vfsops.c =================================================================== RCS file: /cvs/src/sys/nfs/nfs_vfsops.c,v retrieving revision 1.109 diff -u -p -r1.109 nfs_vfsops.c --- sys/nfs/nfs_vfsops.c 26 Apr 2016 18:37:03 -0000 1.109 +++ sys/nfs/nfs_vfsops.c 12 Aug 2016 07:06:58 -0000 @@ -652,7 +652,7 @@ mountnfs(struct nfs_args *argp, struct m nmp->nm_nam = nam; nfs_decode_args(nmp, argp, &mp->mnt_stat.mount_info.nfs_args); - RB_INIT(&nmp->nm_ntree); + nfs_nodetree_init(&nmp->nm_ntree); TAILQ_INIT(&nmp->nm_reqsq); timeout_set(&nmp->nm_rtimeout, nfs_timer, nmp); Index: sys/nfs/nfsmount.h =================================================================== RCS file: /cvs/src/sys/nfs/nfsmount.h,v retrieving revision 1.25 diff -u -p -r1.25 nfsmount.h --- sys/nfs/nfsmount.h 10 Sep 2012 11:10:59 -0000 1.25 +++ sys/nfs/nfsmount.h 12 Aug 2016 07:06:58 -0000 @@ -45,7 +45,7 @@ * Holds NFS specific information for mount. */ struct nfsmount { - RB_HEAD(nfs_nodetree, nfsnode) + RBT_HEAD(nfs_nodetree, nfsnode) nm_ntree; /* filehandle/node tree */ TAILQ_HEAD(reqs, nfsreq) nm_reqsq; /* request queue for this mount. */ @@ -103,6 +103,7 @@ int nfs_vptofh(struct vnode *, struct fi int nfs_fsinfo(struct nfsmount *, struct vnode *, struct ucred *, struct proc *); void nfs_init(void); +void nfs_nodetree_init(struct nfs_nodetree *); #endif /* _KERNEL */ Index: sys/nfs/nfsnode.h =================================================================== RCS file: /cvs/src/sys/nfs/nfsnode.h,v retrieving revision 1.39 diff -u -p -r1.39 nfsnode.h --- sys/nfs/nfsnode.h 15 Dec 2009 15:53:48 -0000 1.39 +++ sys/nfs/nfsnode.h 12 Aug 2016 07:06:58 -0000 @@ -70,7 +70,7 @@ struct sillyrename { * be well aligned and, therefore, tightly packed. */ struct nfsnode { - RB_ENTRY(nfsnode) n_entry; /* filehandle/node tree. */ + RBT_ENTRY(nfsnode) n_entry; /* filehandle/node tree. */ u_quad_t n_size; /* Current size of file */ struct vattr n_vattr; /* Vnode attribute cache */ time_t n_attrstamp; /* Attr. cache timestamp */ Index: sys/uvm/uvm.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm.h,v retrieving revision 1.61 diff -u -p -r1.61 uvm.h --- sys/uvm/uvm.h 11 Aug 2016 01:17:33 -0000 1.61 +++ sys/uvm/uvm.h 12 Aug 2016 07:06:58 -0000 @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm.h,v 1.61 2016/08/11 01:17:33 dlg Exp $ */ +/* $OpenBSD: uvm.h,v 1.60 2015/10/08 15:58:38 kettenis Exp $ */ /* $NetBSD: uvm.h,v 1.24 2000/11/27 08:40:02 chs Exp $ */ /* Index: sys/uvm/uvm_addr.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_addr.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_addr.c --- sys/uvm/uvm_addr.c 30 Jul 2016 16:37:54 -0000 1.17 +++ sys/uvm/uvm_addr.c 12 Aug 2016 07:06:58 -0000 @@ -93,8 +93,9 @@ struct uaddr_pivot_state { * Free space comparison. * Compares smaller free-space before larger free-space. */ -static __inline int -uvm_mapent_fspace_cmp(struct vm_map_entry *e1, struct vm_map_entry *e2) +static inline int +uvm_mapent_fspace_cmp(const struct vm_map_entry *e1, + const struct vm_map_entry *e2) { if (e1->fspace != e2->fspace) return (e1->fspace < e2->fspace ? -1 : 1); @@ -196,14 +197,14 @@ uvm_addr_entrybyspace(struct uaddr_free_ { struct vm_map_entry *tmp, *res; - tmp = RB_ROOT(free); + tmp = RBT_ROOT(uaddr_free_rbtree, free); res = NULL; while (tmp) { if (tmp->fspace >= sz) { res = tmp; - tmp = RB_LEFT(tmp, dfree.rbtree); + tmp = RBT_LEFT(uaddr_free_rbtree, tmp); } else if (tmp->fspace < sz) - tmp = RB_RIGHT(tmp, dfree.rbtree); + tmp = RBT_RIGHT(uaddr_free_rbtree, tmp); } return res; } @@ -387,8 +388,8 @@ uvm_addr_linsearch(struct vm_map *map, s for (entry = uvm_map_entrybyaddr(&map->addr, hint - (direction == -1 ? 1 : 0)); entry != NULL; entry = (direction == 1 ? - RB_NEXT(uvm_map_addr, &map->addr, entry) : - RB_PREV(uvm_map_addr, &map->addr, entry))) { + RBT_NEXT(uvm_map_addr, entry) : + RBT_PREV(uvm_map_addr, entry))) { if (VMMAP_FREE_START(entry) > high || VMMAP_FREE_END(entry) < low) { break; @@ -626,7 +627,7 @@ uaddr_rnd_select(struct vm_map *map, str /* Walk up the tree, until there is at least sufficient space. */ while (entry != NULL && entry->fspace_augment < before_gap + after_gap + sz) - entry = RB_PARENT(entry, daddrs.addr_entry); + entry = RBT_PARENT(uvm_map_addr, entry); while (entry != NULL) { /* Test if this fits. */ @@ -644,20 +645,20 @@ uaddr_rnd_select(struct vm_map *map, str return 0; } - /* RB_NEXT, but skip subtrees that cannot possible fit. */ - next = RB_RIGHT(entry, daddrs.addr_entry); + /* RBT_NEXT, but skip subtrees that cannot possible fit. */ + next = RBT_RIGHT(uvm_map_addr, entry); if (next != NULL && next->fspace_augment >= before_gap + after_gap + sz) { entry = next; - while ((next = RB_LEFT(entry, daddrs.addr_entry)) != + while ((next = RBT_LEFT(uvm_map_addr, entry)) != NULL) entry = next; } else { do_parent: - next = RB_PARENT(entry, daddrs.addr_entry); + next = RBT_PARENT(uvm_map_addr, entry); if (next == NULL) entry = NULL; - else if (RB_LEFT(next, daddrs.addr_entry) == entry) + else if (RBT_LEFT(uvm_map_addr, next) == entry) entry = next; else { entry = next; @@ -876,7 +877,7 @@ uaddr_kbootstrap_select(struct vm_map *m { vaddr_t tmp; - RB_FOREACH(*entry_out, uvm_map_addr, &map->addr) { + RBT_FOREACH(*entry_out, uvm_map_addr, &map->addr) { if (VMMAP_FREE_END(*entry_out) <= uvm_maxkaddr && uvm_addr_fitspace(addr_out, &tmp, VMMAP_FREE_START(*entry_out), VMMAP_FREE_END(*entry_out), @@ -918,7 +919,7 @@ uaddr_bestfit_create(vaddr_t minaddr, va uaddr->ubf_uaddr.uaddr_minaddr = minaddr; uaddr->ubf_uaddr.uaddr_maxaddr = maxaddr; uaddr->ubf_uaddr.uaddr_functions = &uaddr_bestfit_functions; - RB_INIT(&uaddr->ubf_free); + RBT_INIT(uaddr_free_rbtree, &uaddr->ubf_free); return &uaddr->ubf_uaddr; } @@ -936,7 +937,7 @@ uaddr_bestfit_insert(struct vm_map *map, struct vm_map_entry *rb_rv; uaddr = (struct uaddr_bestfit_state *)uaddr_p; - if ((rb_rv = RB_INSERT(uaddr_free_rbtree, &uaddr->ubf_free, entry)) != + if ((rb_rv = RBT_INSERT(uaddr_free_rbtree, &uaddr->ubf_free, entry)) != NULL) { panic("%s: duplicate insertion: state %p " "interting %p, colliding with %p", __func__, @@ -951,7 +952,7 @@ uaddr_bestfit_remove(struct vm_map *map, struct uaddr_bestfit_state *uaddr; uaddr = (struct uaddr_bestfit_state *)uaddr_p; - if (RB_REMOVE(uaddr_free_rbtree, &uaddr->ubf_free, entry) != entry) + if (RBT_REMOVE(uaddr_free_rbtree, &uaddr->ubf_free, entry) != entry) panic("%s: entry was not in tree", __func__); } @@ -983,7 +984,7 @@ uaddr_bestfit_select(struct vm_map *map, while (uvm_addr_fitspace(&min, &max, VMMAP_FREE_START(entry), VMMAP_FREE_END(entry), sz, align, offset, 0, guardsz) != 0) { - entry = RB_NEXT(uaddr_free_rbtree, &uaddr->ubf_free, entry); + entry = RBT_NEXT(uaddr_free_rbtree, entry); if (entry == NULL) return ENOMEM; } @@ -1124,7 +1125,7 @@ uaddr_pivot_newpivot(struct vm_map *map, */ path = arc4random(); found = NULL; - entry = RB_ROOT(&uaddr->up_free); + entry = RBT_ROOT(uaddr_free_rbtree, &uaddr->up_free); while (entry != NULL) { fit_error = uvm_addr_fitspace(&min, &max, MAX(VMMAP_FREE_START(entry), minaddr), @@ -1140,13 +1141,13 @@ uaddr_pivot_newpivot(struct vm_map *map, /* Next. */ if (fit_error != 0) - entry = RB_RIGHT(entry, dfree.rbtree); + entry = RBT_RIGHT(uaddr_free_rbtree, entry); else if ((path & 0x1) == 0) { path >>= 1; - entry = RB_RIGHT(entry, dfree.rbtree); + entry = RBT_RIGHT(uaddr_free_rbtree, entry); } else { path >>= 1; - entry = RB_LEFT(entry, dfree.rbtree); + entry = RBT_LEFT(uaddr_free_rbtree, entry); } } if (found == NULL) @@ -1320,7 +1321,7 @@ uaddr_pivot_insert(struct vm_map *map, s vaddr_t start, end; uaddr = (struct uaddr_pivot_state *)uaddr_p; - if ((rb_rv = RB_INSERT(uaddr_free_rbtree, &uaddr->up_free, entry)) != + if ((rb_rv = RBT_INSERT(uaddr_free_rbtree, &uaddr->up_free, entry)) != NULL) { panic("%s: duplicate insertion: state %p " "inserting entry %p which collides with %p", __func__, @@ -1360,7 +1361,7 @@ uaddr_pivot_remove(struct vm_map *map, s struct uaddr_pivot *p; uaddr = (struct uaddr_pivot_state *)uaddr_p; - if (RB_REMOVE(uaddr_free_rbtree, &uaddr->up_free, entry) != entry) + if (RBT_REMOVE(uaddr_free_rbtree, &uaddr->up_free, entry) != entry) panic("%s: entry was not in tree", __func__); /* @@ -1394,7 +1395,7 @@ uaddr_pivot_create(vaddr_t minaddr, vadd uaddr->up_uaddr.uaddr_minaddr = minaddr; uaddr->up_uaddr.uaddr_maxaddr = maxaddr; uaddr->up_uaddr.uaddr_functions = &uaddr_pivot_functions; - RB_INIT(&uaddr->up_free); + RBT_INIT(uaddr_free_rbtree, &uaddr->up_free); memset(uaddr->up_pivots, 0, sizeof(uaddr->up_pivots)); return &uaddr->up_uaddr; @@ -1427,10 +1428,10 @@ uaddr_pivot_print(struct uvm_addr_state if (!full) return; - if (RB_EMPTY(&uaddr->up_free)) + if (RBT_EMPTY(uaddr_free_rbtree, &uaddr->up_free)) (*pr)("\tempty\n"); /* Print list of free space. */ - RB_FOREACH(entry, uaddr_free_rbtree, &uaddr->up_free) { + RBT_FOREACH(entry, uaddr_free_rbtree, &uaddr->up_free) { (*pr)("\t0x%lx - 0x%lx free (0x%lx bytes)\n", VMMAP_FREE_START(entry), VMMAP_FREE_END(entry), VMMAP_FREE_END(entry) - VMMAP_FREE_START(entry)); @@ -1556,6 +1557,6 @@ uaddr_stack_brk_create(vaddr_t minaddr, #ifndef SMALL_KERNEL -RB_GENERATE(uaddr_free_rbtree, vm_map_entry, dfree.rbtree, +RBT_GENERATE(uaddr_free_rbtree, vm_map_entry, dfree.rbtree, uvm_mapent_fspace_cmp); #endif /* !SMALL_KERNEL */ Index: sys/uvm/uvm_addr.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_addr.h,v retrieving revision 1.5 diff -u -p -r1.5 uvm_addr.h --- sys/uvm/uvm_addr.h 30 Mar 2015 21:05:17 -0000 1.5 +++ sys/uvm/uvm_addr.h 12 Aug 2016 07:06:58 -0000 @@ -108,8 +108,8 @@ void uvm_addr_print(struct uvm_addr_s /* * Kernel bootstrap allocator. */ -RB_HEAD(uaddr_free_rbtree, vm_map_entry); -RB_PROTOTYPE(uaddr_free_rbtree, vm_map_entry, dfree.rbtree, +RBT_HEAD(uaddr_free_rbtree, vm_map_entry); +RBT_PROTOTYPE(uaddr_free_rbtree, vm_map_entry, dfree.rbtree, uvm_mapent_fspace_cmp); extern struct uvm_addr_state uaddr_kbootstrap; Index: sys/uvm/uvm_aobj.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v retrieving revision 1.81 diff -u -p -r1.81 uvm_aobj.c --- sys/uvm/uvm_aobj.c 17 Jun 2016 10:48:25 -0000 1.81 +++ sys/uvm/uvm_aobj.c 12 Aug 2016 07:06:58 -0000 @@ -872,7 +872,7 @@ uao_detach_locked(struct uvm_object *uob * Release swap resources then free the page. */ uvm_lock_pageq(); - while((pg = RB_ROOT(&uobj->memt)) != NULL) { + while((pg = RBT_ROOT(uvm_objtree, &uobj->memt)) != NULL) { if (pg->pg_flags & PG_BUSY) { atomic_setbits_int(&pg->pg_flags, PG_WANTED); uvm_unlock_pageq(); Index: sys/uvm/uvm_device.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_device.c,v retrieving revision 1.52 diff -u -p -r1.52 uvm_device.c --- sys/uvm/uvm_device.c 28 Aug 2015 00:03:54 -0000 1.52 +++ sys/uvm/uvm_device.c 12 Aug 2016 07:06:58 -0000 @@ -232,7 +232,7 @@ again: uobj->uo_refs--; return; } - KASSERT(uobj->uo_npages == 0 && RB_EMPTY(&uobj->memt)); + KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt)); /* is it being held? if so, wait until others are done. */ mtx_enter(&udv_lock); Index: sys/uvm/uvm_extern.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_extern.h,v retrieving revision 1.139 diff -u -p -r1.139 uvm_extern.h --- sys/uvm/uvm_extern.h 5 Jun 2016 08:35:57 -0000 1.139 +++ sys/uvm/uvm_extern.h 12 Aug 2016 07:06:58 -0000 @@ -162,7 +162,7 @@ typedef int vm_prot_t; #define PHYSLOAD_DEVICE 0x01 /* don't add to the page queue */ #include -#include +#include #include #ifdef _KERNEL Index: sys/uvm/uvm_fault.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_fault.c,v retrieving revision 1.90 diff -u -p -r1.90 uvm_fault.c --- sys/uvm/uvm_fault.c 8 May 2016 11:52:32 -0000 1.90 +++ sys/uvm/uvm_fault.c 12 Aug 2016 07:06:58 -0000 @@ -1348,7 +1348,7 @@ uvm_fault_unwire_locked(vm_map_t map, va /* find the map entry for the current address. */ KASSERT(va >= entry->start); while (va >= entry->end) { - next = RB_NEXT(uvm_map_addr, &map->addr, entry); + next = RBT_NEXT(uvm_map_addr, entry); KASSERT(next != NULL && next->start <= entry->end); entry = next; } Index: sys/uvm/uvm_map.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.220 diff -u -p -r1.220 uvm_map.c --- sys/uvm/uvm_map.c 11 Aug 2016 01:17:33 -0000 1.220 +++ sys/uvm/uvm_map.c 12 Aug 2016 07:06:59 -0000 @@ -160,8 +160,6 @@ void uvm_map_addr_augment(struct vm_m static __inline void uvm_mapent_copy(struct vm_map_entry*, struct vm_map_entry*); -static int uvm_mapentry_addrcmp(struct vm_map_entry*, - struct vm_map_entry*); void uvm_mapent_free_insert(struct vm_map*, struct uvm_addr_state*, struct vm_map_entry*); void uvm_mapent_free_remove(struct vm_map*, @@ -335,8 +333,9 @@ vaddr_t uvm_maxkaddr; * (sorted by address) within a free-memory tree. */ -static __inline int -uvm_mapentry_addrcmp(struct vm_map_entry *e1, struct vm_map_entry *e2) +static inline int +uvm_mapentry_addrcmp(const struct vm_map_entry *e1, + const struct vm_map_entry *e2) { return e1->start < e2->start ? -1 : e1->start > e2->start; } @@ -433,16 +432,12 @@ uvm_mapent_addr_insert(struct vm_map *ma { struct vm_map_entry *res; - if (RB_LEFT(entry, daddrs.addr_entry) != UVMMAP_DEADBEEF || - RB_RIGHT(entry, daddrs.addr_entry) != UVMMAP_DEADBEEF || - RB_PARENT(entry, daddrs.addr_entry) != UVMMAP_DEADBEEF) - panic("uvm_mapent_addr_insert: entry still in addr list"); KDASSERT(entry->start <= entry->end); KDASSERT((entry->start & (vaddr_t)PAGE_MASK) == 0 && (entry->end & (vaddr_t)PAGE_MASK) == 0); UVM_MAP_REQ_WRITE(map); - res = RB_INSERT(uvm_map_addr, &map->addr, entry); + res = RBT_INSERT(uvm_map_addr, &map->addr, entry); if (res != NULL) { panic("uvm_mapent_addr_insert: map %p entry %p " "(0x%lx-0x%lx G=0x%lx F=0x%lx) insert collision " @@ -462,11 +457,9 @@ uvm_mapent_addr_remove(struct vm_map *ma struct vm_map_entry *res; UVM_MAP_REQ_WRITE(map); - res = RB_REMOVE(uvm_map_addr, &map->addr, entry); + res = RBT_REMOVE(uvm_map_addr, &map->addr, entry); if (res != entry) panic("uvm_mapent_addr_remove"); - RB_LEFT(entry, daddrs.addr_entry) = RB_RIGHT(entry, daddrs.addr_entry) = - RB_PARENT(entry, daddrs.addr_entry) = UVMMAP_DEADBEEF; } /* @@ -521,12 +514,12 @@ uvm_map_entrybyaddr(struct uvm_map_addr { struct vm_map_entry *iter; - iter = RB_ROOT(atree); + iter = RBT_ROOT(uvm_map_addr, atree); while (iter != NULL) { if (iter->start > addr) - iter = RB_LEFT(iter, daddrs.addr_entry); + iter = RBT_LEFT(uvm_map_addr, iter); else if (VMMAP_FREE_END(iter) <= addr) - iter = RB_RIGHT(iter, daddrs.addr_entry); + iter = RBT_RIGHT(uvm_map_addr, iter); else return iter; } @@ -771,9 +764,6 @@ uvm_map_isavail(struct vm_map *map, stru struct uvm_map_addr *atree; struct vm_map_entry *i, *i_end; - if (addr + sz < addr) - return 0; - /* * Kernel memory above uvm_maxkaddr is considered unavailable. */ @@ -820,9 +810,9 @@ uvm_map_isavail(struct vm_map *map, stru * considered unavailable unless called by those allocators. */ i = *start_ptr; - i_end = RB_NEXT(uvm_map_addr, atree, *end_ptr); + i_end = RBT_NEXT(uvm_map_addr, *end_ptr); for (; i != i_end; - i = RB_NEXT(uvm_map_addr, atree, i)) { + i = RBT_NEXT(uvm_map_addr, i)) { if (i->start != i->end && i->end > addr) return 0; @@ -886,9 +876,9 @@ uvm_map_addr_augment_get(struct vm_map_e struct vm_map_entry *left, *right; augment = entry->fspace; - if ((left = RB_LEFT(entry, daddrs.addr_entry)) != NULL) + if ((left = RBT_LEFT(uvm_map_addr, entry)) != NULL) augment = MAX(augment, left->fspace_augment); - if ((right = RB_RIGHT(entry, daddrs.addr_entry)) != NULL) + if ((right = RBT_RIGHT(uvm_map_addr, entry)) != NULL) augment = MAX(augment, right->fspace_augment); return augment; } @@ -914,7 +904,7 @@ uvm_map_addr_augment(struct vm_map_entry if (entry->fspace_augment == augment) return; entry->fspace_augment = augment; - entry = RB_PARENT(entry, daddrs.addr_entry); + entry = RBT_PARENT(uvm_map_addr, entry); } } @@ -1489,7 +1479,7 @@ uvm_mapent_tryjoin(struct vm_map *map, s struct vm_map_entry *merged; /* Merge with previous entry. */ - other = RB_PREV(uvm_map_addr, &map->addr, entry); + other = RBT_PREV(uvm_map_addr, entry); if (other && uvm_mapent_isjoinable(map, other, entry)) { merged = uvm_mapent_merge(map, other, entry, dead); if (merged) @@ -1503,7 +1493,7 @@ uvm_mapent_tryjoin(struct vm_map *map, s * probably contains sensible info, only perform forward merging * in the absence of an amap. */ - other = RB_NEXT(uvm_map_addr, &map->addr, entry); + other = RBT_NEXT(uvm_map_addr, entry); if (other && entry->aref.ar_amap == NULL && other->aref.ar_amap == NULL && uvm_mapent_isjoinable(map, entry, other)) { @@ -1627,7 +1617,7 @@ uvm_map_mkentry(struct vm_map *map, stru * We are iterating using last in reverse order. */ for (; first != last; last = prev) { - prev = RB_PREV(uvm_map_addr, &map->addr, last); + prev = RBT_PREV(uvm_map_addr, last); KDASSERT(last->start == last->end); free = uvm_map_uaddr_e(map, last); @@ -1703,12 +1693,6 @@ uvm_mapent_alloc(struct vm_map *map, int me->flags = 0; } - if (me != NULL) { - RB_LEFT(me, daddrs.addr_entry) = - RB_RIGHT(me, daddrs.addr_entry) = - RB_PARENT(me, daddrs.addr_entry) = UVMMAP_DEADBEEF; - } - out: return(me); } @@ -1832,7 +1816,7 @@ uvm_mapent_mkfree(struct vm_map *map, st if (prev == NULL || VMMAP_FREE_END(prev) != entry->start) - prev = RB_PREV(uvm_map_addr, &map->addr, entry); + prev = RBT_PREV(uvm_map_addr, entry); /* Entry is describing only free memory and has nothing to drain into. */ if (prev == NULL && entry->start == entry->end && markfree) { @@ -1959,7 +1943,7 @@ uvm_unmap_remove(struct vm_map *map, vad entry = uvm_map_entrybyaddr(&map->addr, start); KDASSERT(entry != NULL && entry->start <= start); if (entry->end <= start && markfree) - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); else UVM_MAP_CLIP_START(map, entry, start); @@ -1973,7 +1957,7 @@ uvm_unmap_remove(struct vm_map *map, vad if (entry->end > end || !markfree) UVM_MAP_CLIP_END(map, entry, end); KDASSERT(entry->start >= start && entry->end <= end); - next = RB_NEXT(uvm_map_addr, &map->addr, entry); + next = RBT_NEXT(uvm_map_addr, entry); /* Don't remove holes unless asked to do so. */ if (UVM_ET_ISHOLE(entry)) { @@ -2006,7 +1990,7 @@ uvm_unmap_remove(struct vm_map *map, vad if (markfree) { for (entry = uvm_map_entrybyaddr(&map->addr, start); entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { KDASSERT(entry->end <= start || entry->start == entry->end || UVM_ET_ISHOLE(entry)); @@ -2031,7 +2015,7 @@ uvm_map_pageable_pgon(struct vm_map *map struct vm_map_entry *iter; for (iter = first; iter != end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { KDASSERT(iter->start >= start_addr && iter->end <= end_addr); if (!VM_MAPENT_ISWIRED(iter) || UVM_ET_ISHOLE(iter)) continue; @@ -2076,7 +2060,7 @@ uvm_map_pageable_wire(struct vm_map *map * status of the entries we modify here cannot change. */ for (iter = first; iter != end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { KDASSERT(iter->start >= start_addr && iter->end <= end_addr); if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || iter->protection == PROT_NONE) @@ -2109,7 +2093,7 @@ uvm_map_pageable_wire(struct vm_map *map error = 0; for (iter = first; error == 0 && iter != end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || iter->protection == PROT_NONE) continue; @@ -2136,7 +2120,7 @@ uvm_map_pageable_wire(struct vm_map *map * Use it as iterator to unmap successful mappings. */ for (; first != iter; - first = RB_NEXT(uvm_map_addr, &map->addr, first)) { + first = RBT_NEXT(uvm_map_addr, first)) { if (UVM_ET_ISHOLE(first) || first->start == first->end || first->protection == PROT_NONE) @@ -2151,7 +2135,7 @@ uvm_map_pageable_wire(struct vm_map *map /* decrease counter in the rest of the entries */ for (; iter != end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { if (UVM_ET_ISHOLE(iter) || iter->start == iter->end || iter->protection == PROT_NONE) continue; @@ -2229,7 +2213,7 @@ uvm_map_pageable(struct vm_map *map, vad /* Check that the range has no holes. */ for (last = first; last != NULL && last->start < end; - last = RB_NEXT(uvm_map_addr, &map->addr, last)) { + last = RBT_NEXT(uvm_map_addr, last)) { if (UVM_ET_ISHOLE(last) || (last->end < end && VMMAP_FREE_END(last) != last->end)) { /* @@ -2248,14 +2232,14 @@ uvm_map_pageable(struct vm_map *map, vad * Note that last may be NULL. */ if (last == NULL) { - last = RB_MAX(uvm_map_addr, &map->addr); + last = RBT_MAX(uvm_map_addr, &map->addr); if (last->end < end) { error = EINVAL; goto out; } } else { KASSERT(last != first); - last = RB_PREV(uvm_map_addr, &map->addr, last); + last = RBT_PREV(uvm_map_addr, last); } /* Wire/unwire pages here. */ @@ -2273,7 +2257,7 @@ uvm_map_pageable(struct vm_map *map, vad */ if (VM_MAPENT_ISWIRED(last)) { UVM_MAP_CLIP_END(map, last, end); - tmp = RB_NEXT(uvm_map_addr, &map->addr, last); + tmp = RBT_NEXT(uvm_map_addr, last); } else tmp = last; @@ -2298,7 +2282,7 @@ out: */ if (!VM_MAPENT_ISWIRED(last)) { UVM_MAP_CLIP_END(map, last, end); - tmp = RB_NEXT(uvm_map_addr, &map->addr, last); + tmp = RBT_NEXT(uvm_map_addr, last); } else tmp = last; @@ -2324,7 +2308,7 @@ uvm_map_pageable_all(struct vm_map *map, vm_map_lock(map); if (flags == 0) { - uvm_map_pageable_pgon(map, RB_MIN(uvm_map_addr, &map->addr), + uvm_map_pageable_pgon(map, RBT_MIN(uvm_map_addr, &map->addr), NULL, map->min_offset, map->max_offset); vm_map_modflags(map, 0, VM_MAP_WIREFUTURE); @@ -2344,7 +2328,7 @@ uvm_map_pageable_all(struct vm_map *map, * If the number exceeds the limit, abort. */ size = 0; - RB_FOREACH(iter, uvm_map_addr, &map->addr) { + RBT_FOREACH(iter, uvm_map_addr, &map->addr) { if (VM_MAPENT_ISWIRED(iter) || UVM_ET_ISHOLE(iter)) continue; @@ -2368,7 +2352,7 @@ uvm_map_pageable_all(struct vm_map *map, /* * uvm_map_pageable_wire will release lcok */ - return uvm_map_pageable_wire(map, RB_MIN(uvm_map_addr, &map->addr), + return uvm_map_pageable_wire(map, RBT_MIN(uvm_map_addr, &map->addr), NULL, map->min_offset, map->max_offset, 0); } @@ -2399,7 +2383,7 @@ uvm_map_setup(struct vm_map *map, vaddr_ max -= PAGE_SIZE; } - RB_INIT(&map->addr); + RBT_INIT(uvm_map_addr, &map->addr); map->uaddr_exe = NULL; for (i = 0; i < nitems(map->uaddr_any); ++i) map->uaddr_any[i] = NULL; @@ -2483,14 +2467,14 @@ uvm_map_teardown(struct vm_map *map) * The vm_map is broken down in linear time. */ TAILQ_INIT(&dead_entries); - if ((entry = RB_ROOT(&map->addr)) != NULL) + if ((entry = RBT_ROOT(uvm_map_addr, &map->addr)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, entry); while (entry != NULL) { sched_pause(); uvm_unmap_kill_entry(map, entry); - if ((tmp = RB_LEFT(entry, daddrs.addr_entry)) != NULL) + if ((tmp = RBT_LEFT(uvm_map_addr, entry)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, tmp); - if ((tmp = RB_RIGHT(entry, daddrs.addr_entry)) != NULL) + if ((tmp = RBT_RIGHT(uvm_map_addr, entry)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, tmp); /* Update wave-front. */ entry = TAILQ_NEXT(entry, dfree.deadq); @@ -2498,7 +2482,7 @@ uvm_map_teardown(struct vm_map *map) #ifdef VMMAP_DEBUG numt = numq = 0; - RB_FOREACH(entry, uvm_map_addr, &map->addr) + RBT_FOREACH(entry, uvm_map_addr, &map->addr) numt++; TAILQ_FOREACH(entry, &dead_entries, dfree.deadq) numq++; @@ -2520,7 +2504,7 @@ uvm_map_teardown(struct vm_map *map) void uvm_map_setup_entries(struct vm_map *map) { - KDASSERT(RB_EMPTY(&map->addr)); + KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr)); uvm_map_fix_space(map, NULL, map->min_offset, map->max_offset, 0); } @@ -2548,8 +2532,8 @@ uvm_map_splitentry(struct vm_map *map, s KASSERT(orig->start < split && VMMAP_FREE_END(orig) > split); #ifdef VMMAP_DEBUG - KDASSERT(RB_FIND(uvm_map_addr, &map->addr, orig) == orig); - KDASSERT(RB_FIND(uvm_map_addr, &map->addr, next) != next); + KDASSERT(RBT_FIND(uvm_map_addr, orig) == orig); + KDASSERT(RBT_FIND(uvm_map_addr, next) != next); #endif /* VMMAP_DEBUG */ /* @@ -2650,7 +2634,7 @@ uvm_tree_sanity(struct vm_map *map, char struct uvm_addr_state *free; addr = vm_map_min(map); - RB_FOREACH(iter, uvm_map_addr, &map->addr) { + RBT_FOREACH(iter, uvm_map_addr, &map->addr) { /* * Valid start, end. * Catch overflow for end+fspace. @@ -2705,7 +2689,7 @@ uvm_tree_size_chk(struct vm_map *map, ch vsize_t size; size = 0; - RB_FOREACH(iter, uvm_map_addr, &map->addr) { + RBT_FOREACH(iter, uvm_map_addr, &map->addr) { if (!UVM_ET_ISHOLE(iter)) size += iter->end - iter->start; } @@ -2737,7 +2721,7 @@ vmspace_validate(struct vm_map *map) stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr); stack = heap = 0; - RB_FOREACH(iter, uvm_map_addr, &map->addr) { + RBT_FOREACH(iter, uvm_map_addr, &map->addr) { imin = imax = iter->start; if (UVM_ET_ISHOLE(iter) || iter->object.uvm_obj != NULL) @@ -2859,7 +2843,7 @@ uvm_map_printit(struct vm_map *map, bool if (!full) goto print_uaddr; - RB_FOREACH(entry, uvm_map_addr, &map->addr) { + RBT_FOREACH(entry, uvm_map_addr, &map->addr) { (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n", entry, entry->start, entry->end, entry->object.uvm_obj, (long long)entry->offset, entry->aref.ar_amap, @@ -2922,7 +2906,7 @@ uvm_object_printit(uobj, full, pr) return; } (*pr)(" PAGES :\n "); - RB_FOREACH(pg, uvm_objtree, &uobj->memt) { + RBT_FOREACH(pg, uvm_objtree, &uobj->memt) { (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset); if ((cnt % 3) == 2) { (*pr)("\n "); @@ -2984,7 +2968,7 @@ uvm_page_printit(pg, full, pr) uobj = pg->uobject; if (uobj) { (*pr)(" checking object list\n"); - RB_FOREACH(tpg, uvm_objtree, &uobj->memt) { + RBT_FOREACH(tpg, uvm_objtree, &uobj->memt) { if (tpg == pg) { break; } @@ -3061,11 +3045,11 @@ uvm_map_protect(struct vm_map *map, vadd first = uvm_map_entrybyaddr(&map->addr, start); KDASSERT(first != NULL); if (first->end < start) - first = RB_NEXT(uvm_map_addr, &map->addr, first); + first = RBT_NEXT(uvm_map_addr, first); /* First, check for protection violations. */ for (iter = first; iter != NULL && iter->start < end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { /* Treat memory holes as free space. */ if (iter->start == iter->end || UVM_ET_ISHOLE(iter)) continue; @@ -3085,7 +3069,7 @@ uvm_map_protect(struct vm_map *map, vadd /* Fix protections. */ for (iter = first; iter != NULL && iter->start < end; - iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) { + iter = RBT_NEXT(uvm_map_addr, iter)) { /* Treat memory holes as free space. */ if (iter->start == iter->end || UVM_ET_ISHOLE(iter)) continue; @@ -3296,7 +3280,7 @@ uvmspace_exec(struct proc *p, vaddr_t st uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead_entries, TRUE, FALSE); - KDASSERT(RB_EMPTY(&map->addr)); + KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr)); /* Nuke statistics and boundaries. */ memset(&ovm->vm_startcopy, 0, @@ -3408,7 +3392,7 @@ uvm_share(struct vm_map *dstmap, vaddr_t unmap_end = dstaddr; for (; src_entry != NULL; psrc_entry = src_entry, - src_entry = RB_NEXT(uvm_map_addr, &srcmap->addr, src_entry)) { + src_entry = RBT_NEXT(uvm_map_addr, src_entry)) { /* hole in address space, bail out */ if (psrc_entry != NULL && psrc_entry->end != src_entry->start) break; @@ -3774,7 +3758,7 @@ uvmspace_fork(struct process *pr) /* go entry-by-entry */ TAILQ_INIT(&dead); - RB_FOREACH(old_entry, uvm_map_addr, &old_map->addr) { + RBT_FOREACH(old_entry, uvm_map_addr, &old_map->addr) { if (old_entry->start == old_entry->end) continue; @@ -3953,7 +3937,7 @@ uvm_map_checkprot(struct vm_map *map, va */ for (entry = uvm_map_entrybyaddr(&map->addr, start); entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { /* Fail if a hole is found. */ if (UVM_ET_ISHOLE(entry) || (entry->end < end && entry->end != VMMAP_FREE_END(entry))) @@ -4007,7 +3991,7 @@ uvm_map_deallocate(vm_map_t map) uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead, TRUE, FALSE); pmap_destroy(map->pmap); - KASSERT(RB_EMPTY(&map->addr)); + KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr)); free(map, M_VMMAP, sizeof *map); uvm_unmap_detach(&dead, 0); @@ -4049,12 +4033,12 @@ uvm_map_inherit(struct vm_map *map, vadd if (entry->end > start) UVM_MAP_CLIP_START(map, entry, start); else - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); while (entry != NULL && entry->start < end) { UVM_MAP_CLIP_END(map, entry, end); entry->inheritance = new_inheritance; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); } vm_map_unlock(map); @@ -4093,7 +4077,7 @@ uvm_map_advice(struct vm_map *map, vaddr if (entry != NULL && entry->end > start) UVM_MAP_CLIP_START(map, entry, start); else if (entry!= NULL) - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); /* * XXXJRT: disallow holes? @@ -4101,7 +4085,7 @@ uvm_map_advice(struct vm_map *map, vaddr while (entry != NULL && entry->start < end) { UVM_MAP_CLIP_END(map, entry, end); entry->advice = new_advice; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); } vm_map_unlock(map); @@ -4158,7 +4142,7 @@ uvm_map_extract(struct vm_map *srcmap, v /* Check that the range is contiguous. */ for (entry = first; entry != NULL && entry->end < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { if (VMMAP_FREE_END(entry) != entry->end || UVM_ET_ISHOLE(entry)) { error = EINVAL; @@ -4178,7 +4162,7 @@ uvm_map_extract(struct vm_map *srcmap, v * Also, perform clipping of last if not UVM_EXTRACT_QREF. */ for (entry = first; entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { if (UVM_ET_ISNEEDSCOPY(entry)) amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end); if (UVM_ET_ISNEEDSCOPY(entry)) { @@ -4207,7 +4191,7 @@ uvm_map_extract(struct vm_map *srcmap, v */ /* step 1: start looping through map entries, performing extraction. */ for (entry = first; entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { KDASSERT(!UVM_ET_ISNEEDSCOPY(entry)); if (UVM_ET_ISHOLE(entry)) continue; @@ -4303,7 +4287,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ /* Make a first pass to check for holes. */ for (entry = first; entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { if (UVM_ET_ISSUBMAP(entry)) { vm_map_unlock_read(map); return EINVAL; @@ -4319,7 +4303,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ error = 0; for (entry = first; entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { amap = entry->aref.ar_amap; /* top layer */ if (UVM_ET_ISOBJ(entry)) uobj = entry->object.uvm_obj; @@ -4690,7 +4674,7 @@ uvm_map_kmem_grow(struct vm_map *map, st end = MAX(uvm_maxkaddr, map->min_offset); entry = uvm_map_entrybyaddr(&map->addr, end); while (entry && entry->fspace < alloc_sz) - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); if (entry) { end = MAX(VMMAP_FREE_START(entry), end); end += MIN(sz, map->max_offset - end); @@ -4718,9 +4702,9 @@ uvm_map_freelist_update_clear(struct vm_ struct vm_map_entry *entry, *prev, *next; prev = NULL; - for (entry = RB_MIN(uvm_map_addr, &map->addr); entry != NULL; + for (entry = RBT_MIN(uvm_map_addr, &map->addr); entry != NULL; entry = next) { - next = RB_NEXT(uvm_map_addr, &map->addr, entry); + next = RBT_NEXT(uvm_map_addr, entry); free = uvm_map_uaddr_e(map, entry); uvm_mapent_free_remove(map, free, entry); @@ -4743,7 +4727,7 @@ uvm_map_freelist_update_refill(struct vm struct vm_map_entry *entry; vaddr_t min, max; - RB_FOREACH(entry, uvm_map_addr, &map->addr) { + RBT_FOREACH(entry, uvm_map_addr, &map->addr) { min = VMMAP_FREE_START(entry); max = VMMAP_FREE_END(entry); entry->fspace = 0; @@ -4981,15 +4965,15 @@ uvm_map_mquery(struct vm_map *map, vaddr if (addr >= map->max_offset) goto out; else - entry = RB_MIN(uvm_map_addr, &map->addr); + entry = RBT_MIN(uvm_map_addr, &map->addr); } else if (VMMAP_FREE_START(entry) <= addr) { /* [3] Bumped into used memory. */ - entry = RB_NEXT(uvm_map_addr, &map->addr, entry); + entry = RBT_NEXT(uvm_map_addr, entry); } /* Test if the next entry is sufficient for the allocation. */ for (; entry != NULL; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { if (entry->fspace == 0) continue; addr = VMMAP_FREE_START(entry); @@ -5242,7 +5226,7 @@ uvm_map_fill_vmmap(struct vm_map *map, s start = (vaddr_t)kve[0].kve_start; vm_map_lock(map); - RB_FOREACH(entry, uvm_map_addr, &map->addr) { + RBT_FOREACH(entry, uvm_map_addr, &map->addr) { if (cnt == maxcnt) { error = ENOMEM; break; @@ -5274,13 +5258,8 @@ uvm_map_fill_vmmap(struct vm_map *map, s } #endif - -#undef RB_AUGMENT -#define RB_AUGMENT(x) uvm_map_addr_augment((x)) -RB_GENERATE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, - uvm_mapentry_addrcmp); -#undef RB_AUGMENT - +RBT_GENERATE_AUGMENT(uvm_map_addr, vm_map_entry, daddrs.addr_entry, + uvm_mapentry_addrcmp, uvm_map_addr_augment); /* * MD code: vmspace allocator setup. Index: sys/uvm/uvm_map.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.h,v retrieving revision 1.56 diff -u -p -r1.56 uvm_map.h --- sys/uvm/uvm_map.h 11 Aug 2016 01:17:33 -0000 1.56 +++ sys/uvm/uvm_map.h 12 Aug 2016 07:06:59 -0000 @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.h,v 1.56 2016/08/11 01:17:33 dlg Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.55 2015/09/09 23:33:37 kettenis Exp $ */ /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ /* @@ -160,12 +160,12 @@ union vm_map_object { */ struct vm_map_entry { union { - RB_ENTRY(vm_map_entry) addr_entry; /* address tree */ + RBT_ENTRY(vm_map_entry) addr_entry; /* address tree */ SLIST_ENTRY(vm_map_entry) addr_kentry; } daddrs; union { - RB_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ + RBT_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */ TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */ TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */ } dfree; @@ -201,8 +201,8 @@ struct vm_map_entry { #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */ -RB_HEAD(uvm_map_addr, vm_map_entry); -RB_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, +RBT_HEAD(uvm_map_addr, vm_map_entry); +RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, uvm_mapentry_addrcmp); /* Index: sys/uvm/uvm_mmap.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_mmap.c,v retrieving revision 1.138 diff -u -p -r1.138 uvm_mmap.c --- sys/uvm/uvm_mmap.c 8 Aug 2016 17:15:51 -0000 1.138 +++ sys/uvm/uvm_mmap.c 12 Aug 2016 07:06:59 -0000 @@ -234,12 +234,12 @@ sys_mincore(struct proc *p, void *v, reg for (/* nothing */; entry != NULL && entry->start < end; - entry = RB_NEXT(uvm_map_addr, &map->addr, entry)) { + entry = RBT_NEXT(uvm_map_addr, entry)) { KASSERT(!UVM_ET_ISSUBMAP(entry)); KASSERT(start >= entry->start); /* Make sure there are no holes. */ - next = RB_NEXT(uvm_map_addr, &map->addr, entry); + next = RBT_NEXT(uvm_map_addr, entry); if (entry->end < end && (next == NULL || next->start > entry->end)) { Index: sys/uvm/uvm_object.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_object.c,v retrieving revision 1.13 diff -u -p -r1.13 uvm_object.c --- sys/uvm/uvm_object.c 21 Aug 2015 16:04:35 -0000 1.13 +++ sys/uvm/uvm_object.c 12 Aug 2016 07:06:59 -0000 @@ -50,7 +50,7 @@ void uvm_objinit(struct uvm_object *uobj, struct uvm_pagerops *pgops, int refs) { uobj->pgops = pgops; - RB_INIT(&uobj->memt); + RBT_INIT(uvm_objtree, &uobj->memt); uobj->uo_npages = 0; uobj->uo_refs = refs; } Index: sys/uvm/uvm_object.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_object.h,v retrieving revision 1.21 diff -u -p -r1.21 uvm_object.h --- sys/uvm/uvm_object.h 11 Jul 2014 16:35:40 -0000 1.21 +++ sys/uvm/uvm_object.h 12 Aug 2016 07:06:59 -0000 @@ -41,7 +41,7 @@ struct uvm_object { struct uvm_pagerops *pgops; /* pager ops */ - RB_HEAD(uvm_objtree, vm_page) memt; /* pages in object */ + RBT_HEAD(uvm_objtree, vm_page) memt; /* pages in object */ int uo_npages; /* # of pages in memt */ int uo_refs; /* reference count */ }; @@ -76,8 +76,8 @@ extern struct uvm_pagerops uvm_vnodeops; extern struct uvm_pagerops uvm_deviceops; /* For object trees */ -int uvm_pagecmp(struct vm_page *, struct vm_page *); -RB_PROTOTYPE(uvm_objtree, vm_page, objt, uvm_pagecmp) +inline int uvm_pagecmp(const struct vm_page *, const struct vm_page *); +RBT_PROTOTYPE(uvm_objtree, vm_page, objt, uvm_pagecmp); #define UVM_OBJ_IS_VNODE(uobj) \ ((uobj)->pgops == &uvm_vnodeops) Index: sys/uvm/uvm_page.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_page.c,v retrieving revision 1.144 diff -u -p -r1.144 uvm_page.c --- sys/uvm/uvm_page.c 30 Oct 2015 16:47:01 -0000 1.144 +++ sys/uvm/uvm_page.c 12 Aug 2016 07:06:59 -0000 @@ -78,14 +78,14 @@ /* * for object trees */ -RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); - -int -uvm_pagecmp(struct vm_page *a, struct vm_page *b) +static inline int +uvm_pagecmp(const struct vm_page *a, const struct vm_page *b) { return (a->offset < b->offset ? -1 : a->offset > b->offset); } +RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); + /* * global vars... XXXCDC: move to uvm. structure. */ @@ -134,7 +134,7 @@ uvm_pageinsert(struct vm_page *pg) struct vm_page *dupe; KASSERT((pg->pg_flags & PG_TABLED) == 0); - dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg); + dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg); /* not allowed to insert over another page */ KASSERT(dupe == NULL); atomic_setbits_int(&pg->pg_flags, PG_TABLED); @@ -150,7 +150,7 @@ static __inline void uvm_pageremove(struct vm_page *pg) { KASSERT(pg->pg_flags & PG_TABLED); - RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); + RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg); atomic_clearbits_int(&pg->pg_flags, PG_TABLED); pg->uobject->uo_npages--; @@ -1203,7 +1203,7 @@ uvm_pagelookup(struct uvm_object *obj, v struct vm_page pg; pg.offset = off; - return (RB_FIND(uvm_objtree, &obj->memt, &pg)); + return (RBT_FIND(uvm_objtree, &obj->memt, &pg)); } /* Index: sys/uvm/uvm_page.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_page.h,v retrieving revision 1.61 diff -u -p -r1.61 uvm_page.h --- sys/uvm/uvm_page.h 9 Mar 2016 16:45:43 -0000 1.61 +++ sys/uvm/uvm_page.h 12 Aug 2016 07:06:59 -0000 @@ -94,7 +94,7 @@ TAILQ_HEAD(pglist, vm_page); struct vm_page { TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO * queue or free list (P) */ - RB_ENTRY(vm_page) objt; /* object tree */ + RBT_ENTRY(vm_page) objt; /* object tree */ struct vm_anon *uanon; /* anon (P) */ struct uvm_object *uobject; /* object (P) */ Index: sys/uvm/uvm_pmemrange.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_pmemrange.c,v retrieving revision 1.50 diff -u -p -r1.50 uvm_pmemrange.c --- sys/uvm/uvm_pmemrange.c 29 Jan 2016 11:50:40 -0000 1.50 +++ sys/uvm/uvm_pmemrange.c 12 Aug 2016 07:06:59 -0000 @@ -70,11 +70,61 @@ #define VALID_FLAGS(pg_flags) \ (((pg_flags) & ~(PQ_FREE|PG_ZERO|PG_PMAPMASK)) == 0x0) -/* Tree comparators. */ -int uvm_pmemrange_addr_cmp(struct uvm_pmemrange *, struct uvm_pmemrange *); -int uvm_pmemrange_use_cmp(struct uvm_pmemrange *, struct uvm_pmemrange *); -int uvm_pmr_addr_cmp(struct vm_page *, struct vm_page *); -int uvm_pmr_size_cmp(struct vm_page *, struct vm_page *); +/* + * Comparator: sort by address ascending. + */ +static inline int +uvm_pmemrange_addr_cmp(const struct uvm_pmemrange *lhs, + const struct uvm_pmemrange *rhs) +{ + return lhs->low < rhs->low ? -1 : lhs->low > rhs->low; +} + +/* + * Comparator: sort by use ascending. + * + * The higher the use value of a range, the more devices need memory in + * this range. Therefore allocate from the range with the lowest use first. + */ +static inline int +uvm_pmemrange_use_cmp(const struct uvm_pmemrange *lhs, + const struct uvm_pmemrange *rhs) +{ + int result; + + result = lhs->use < rhs->use ? -1 : lhs->use > rhs->use; + if (result == 0) + result = uvm_pmemrange_addr_cmp(lhs, rhs); + return result; +} + +static inline int +uvm_pmr_addr_cmp(const struct vm_page *lhs, const struct vm_page *rhs) +{ + paddr_t lhs_addr, rhs_addr; + + lhs_addr = VM_PAGE_TO_PHYS(lhs); + rhs_addr = VM_PAGE_TO_PHYS(rhs); + + return (lhs_addr < rhs_addr ? -1 : lhs_addr > rhs_addr); +} + +static inline int +uvm_pmr_size_cmp(const struct vm_page *lhs, const struct vm_page *rhs) +{ + psize_t lhs_size, rhs_size; + int cmp; + + /* Using second tree, so we receive pg[1] instead of pg[0]. */ + lhs_size = (lhs - 1)->fpgsz; + rhs_size = (rhs - 1)->fpgsz; + + cmp = (lhs_size < rhs_size ? -1 : lhs_size > rhs_size); + if (cmp == 0) + cmp = uvm_pmr_addr_cmp(lhs - 1, rhs - 1); + return cmp; +} + int uvm_pmr_pg_to_memtype(struct vm_page *); #ifdef DDB @@ -95,9 +145,9 @@ uvm_pmr_pg_to_memtype(struct vm_page *pg } /* Trees. */ -RB_GENERATE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp); -RB_GENERATE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp); -RB_GENERATE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr, +RBT_GENERATE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp); +RBT_GENERATE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp); +RBT_GENERATE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr, uvm_pmemrange_addr_cmp); /* Validation. */ @@ -178,60 +228,6 @@ pow2divide(psize_t num, psize_t denom) #define PMR_ALIGN(pgno, align) \ (((pgno) + ((align) - 1)) & ~((align) - 1)) - -/* - * Comparator: sort by address ascending. - */ -int -uvm_pmemrange_addr_cmp(struct uvm_pmemrange *lhs, struct uvm_pmemrange *rhs) -{ - return lhs->low < rhs->low ? -1 : lhs->low > rhs->low; -} - -/* - * Comparator: sort by use ascending. - * - * The higher the use value of a range, the more devices need memory in - * this range. Therefore allocate from the range with the lowest use first. - */ -int -uvm_pmemrange_use_cmp(struct uvm_pmemrange *lhs, struct uvm_pmemrange *rhs) -{ - int result; - - result = lhs->use < rhs->use ? -1 : lhs->use > rhs->use; - if (result == 0) - result = uvm_pmemrange_addr_cmp(lhs, rhs); - return result; -} - -int -uvm_pmr_addr_cmp(struct vm_page *lhs, struct vm_page *rhs) -{ - paddr_t lhs_addr, rhs_addr; - - lhs_addr = VM_PAGE_TO_PHYS(lhs); - rhs_addr = VM_PAGE_TO_PHYS(rhs); - - return (lhs_addr < rhs_addr ? -1 : lhs_addr > rhs_addr); -} - -int -uvm_pmr_size_cmp(struct vm_page *lhs, struct vm_page *rhs) -{ - psize_t lhs_size, rhs_size; - int cmp; - - /* Using second tree, so we receive pg[1] instead of pg[0]. */ - lhs_size = (lhs - 1)->fpgsz; - rhs_size = (rhs - 1)->fpgsz; - - cmp = (lhs_size < rhs_size ? -1 : lhs_size > rhs_size); - if (cmp == 0) - cmp = uvm_pmr_addr_cmp(lhs - 1, rhs - 1); - return cmp; -} - /* * Find the first range of free pages that is at least sz pages long. */ @@ -245,14 +241,14 @@ uvm_pmr_nfindsz(struct uvm_pmemrange *pm if (sz == 1 && !TAILQ_EMPTY(&pmr->single[mti])) return TAILQ_FIRST(&pmr->single[mti]); - node = RB_ROOT(&pmr->size[mti]); + node = RBT_ROOT(uvm_pmr_size, &pmr->size[mti]); best = NULL; while (node != NULL) { if ((node - 1)->fpgsz >= sz) { best = (node - 1); - node = RB_LEFT(node, objt); + node = RBT_LEFT(uvm_pmr_size, node); } else - node = RB_RIGHT(node, objt); + node = RBT_RIGHT(uvm_pmr_size, node); } return best; } @@ -271,9 +267,9 @@ uvm_pmr_nextsz(struct uvm_pmemrange *pmr if (TAILQ_NEXT(pg, pageq) != NULL) return TAILQ_NEXT(pg, pageq); else - npg = RB_MIN(uvm_pmr_size, &pmr->size[mt]); + npg = RBT_MIN(uvm_pmr_size, &pmr->size[mt]); } else - npg = RB_NEXT(uvm_pmr_size, &pmr->size[mt], pg + 1); + npg = RBT_NEXT(uvm_pmr_size, pg + 1); return npg == NULL ? NULL : npg - 1; } @@ -292,11 +288,11 @@ uvm_pmr_pnaddr(struct uvm_pmemrange *pmr { KASSERT(pg_prev != NULL && pg_next != NULL); - *pg_next = RB_NFIND(uvm_pmr_addr, &pmr->addr, pg); + *pg_next = RBT_NFIND(uvm_pmr_addr, &pmr->addr, pg); if (*pg_next == NULL) - *pg_prev = RB_MAX(uvm_pmr_addr, &pmr->addr); + *pg_prev = RBT_MAX(uvm_pmr_addr, &pmr->addr); else - *pg_prev = RB_PREV(uvm_pmr_addr, &pmr->addr, *pg_next); + *pg_prev = RBT_PREV(uvm_pmr_addr, *pg_next); KDASSERT(*pg_next == NULL || VM_PAGE_TO_PHYS(*pg_next) > VM_PAGE_TO_PHYS(pg)); @@ -326,9 +322,9 @@ uvm_pmr_pnaddr(struct uvm_pmemrange *pmr void uvm_pmr_remove_addr(struct uvm_pmemrange *pmr, struct vm_page *pg) { - KDASSERT(RB_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg); + KDASSERT(RBT_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg); KDASSERT(pg->pg_flags & PQ_FREE); - RB_REMOVE(uvm_pmr_addr, &pmr->addr, pg); + RBT_REMOVE(uvm_pmr_addr, &pmr->addr, pg); pmr->nsegs--; } @@ -357,9 +353,9 @@ uvm_pmr_remove_size(struct uvm_pmemrange #endif TAILQ_REMOVE(&pmr->single[memtype], pg, pageq); } else { - KDASSERT(RB_FIND(uvm_pmr_size, &pmr->size[memtype], + KDASSERT(RBT_FIND(uvm_pmr_size, &pmr->size[memtype], pg + 1) == pg + 1); - RB_REMOVE(uvm_pmr_size, &pmr->size[memtype], pg + 1); + RBT_REMOVE(uvm_pmr_size, &pmr->size[memtype], pg + 1); } } /* Remove from both trees. */ @@ -397,10 +393,10 @@ uvm_pmr_insert_addr(struct uvm_pmemrange TAILQ_FOREACH(i, &pmr->single[mt], pageq) KDASSERT(i != pg); if (pg->fpgsz > 1) { - KDASSERT(RB_FIND(uvm_pmr_size, &pmr->size[mt], + KDASSERT(RBT_FIND(uvm_pmr_size, &pmr->size[mt], pg + 1) == NULL); } - KDASSERT(RB_FIND(uvm_pmr_addr, &pmr->addr, pg) == NULL); + KDASSERT(RBT_FIND(uvm_pmr_addr, &pmr->addr, pg) == NULL); } #endif @@ -420,7 +416,7 @@ uvm_pmr_insert_addr(struct uvm_pmemrange } } - RB_INSERT(uvm_pmr_addr, &pmr->addr, pg); + RBT_INSERT(uvm_pmr_addr, &pmr->addr, pg); pmr->nsegs++; @@ -450,10 +446,10 @@ uvm_pmr_insert_size(struct uvm_pmemrange TAILQ_FOREACH(i, &pmr->single[mti], pageq) KDASSERT(i != pg); if (pg->fpgsz > 1) { - KDASSERT(RB_FIND(uvm_pmr_size, &pmr->size[mti], + KDASSERT(RBT_FIND(uvm_pmr_size, &pmr->size[mti], pg + 1) == NULL); } - KDASSERT(RB_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg); + KDASSERT(RBT_FIND(uvm_pmr_addr, &pmr->addr, pg) == pg); } for (i = pg; i < pg + pg->fpgsz; i++) KASSERT(uvm_pmr_pg_to_memtype(i) == memtype); @@ -462,7 +458,7 @@ uvm_pmr_insert_size(struct uvm_pmemrange if (pg->fpgsz == 1) TAILQ_INSERT_TAIL(&pmr->single[memtype], pg, pageq); else - RB_INSERT(uvm_pmr_size, &pmr->size[memtype], pg + 1); + RBT_INSERT(uvm_pmr_size, &pmr->size[memtype], pg + 1); } /* Insert in both trees. */ struct vm_page * @@ -1224,7 +1220,7 @@ uvm_pmr_assertvalid(struct uvm_pmemrange return; /* Validate address tree. */ - RB_FOREACH(i, uvm_pmr_addr, &pmr->addr) { + RBT_FOREACH(i, uvm_pmr_addr, &pmr->addr) { /* Validate the range. */ KASSERT(i->fpgsz > 0); KASSERT(atop(VM_PAGE_TO_PHYS(i)) >= pmr->low); @@ -1263,7 +1259,7 @@ uvm_pmr_assertvalid(struct uvm_pmemrange } /* Check that it shouldn't be joined with its predecessor. */ - prev = RB_PREV(uvm_pmr_addr, &pmr->addr, i); + prev = RBT_PREV(uvm_pmr_addr, i); if (prev != NULL) { KASSERT(uvm_pmr_pg_to_memtype(i) != uvm_pmr_pg_to_memtype(prev) || @@ -1281,7 +1277,7 @@ uvm_pmr_assertvalid(struct uvm_pmemrange } KASSERT(xref == i); } else { - KASSERT(RB_FIND(uvm_pmr_size, + KASSERT(RBT_FIND(uvm_pmr_size, &pmr->size[uvm_pmr_pg_to_memtype(i)], i + 1) == i + 1); } @@ -1297,7 +1293,7 @@ uvm_pmr_assertvalid(struct uvm_pmemrange } /* Assert i is in the addr tree as well. */ - KASSERT(RB_FIND(uvm_pmr_addr, &pmr->addr, i) == i); + KASSERT(RBT_FIND(uvm_pmr_addr, &pmr->addr, i) == i); /* Assert i is of the correct memory type. */ KASSERT(uvm_pmr_pg_to_memtype(i) == mti); @@ -1306,7 +1302,7 @@ uvm_pmr_assertvalid(struct uvm_pmemrange /* Validate nsegs statistic. */ lcv = 0; - RB_FOREACH(i, uvm_pmr_addr, &pmr->addr) + RBT_FOREACH(i, uvm_pmr_addr, &pmr->addr) lcv++; KASSERT(pmr->nsegs == lcv); } @@ -1364,14 +1360,14 @@ uvm_pmr_split(paddr_t pageno) uvm_pmr_assertvalid(drain); KASSERT(drain->nsegs == 0); - RB_FOREACH(rebuild, uvm_pmr_addr, &pmr->addr) { + RBT_FOREACH(rebuild, uvm_pmr_addr, &pmr->addr) { if (atop(VM_PAGE_TO_PHYS(rebuild)) >= pageno) break; } if (rebuild == NULL) - prev = RB_MAX(uvm_pmr_addr, &pmr->addr); + prev = RBT_MAX(uvm_pmr_addr, &pmr->addr); else - prev = RB_PREV(uvm_pmr_addr, &pmr->addr, rebuild); + prev = RBT_PREV(uvm_pmr_addr, rebuild); KASSERT(prev == NULL || atop(VM_PAGE_TO_PHYS(prev)) < pageno); /* @@ -1399,7 +1395,7 @@ uvm_pmr_split(paddr_t pageno) /* Move free chunks that no longer fall in the range. */ for (; rebuild != NULL; rebuild = next) { - next = RB_NEXT(uvm_pmr_addr, &pmr->addr, rebuild); + next = RBT_NEXT(uvm_pmr_addr, rebuild); uvm_pmr_remove(pmr, rebuild); uvm_pmr_insert(drain, rebuild, 1); @@ -1409,7 +1405,7 @@ uvm_pmr_split(paddr_t pageno) uvm_pmr_assertvalid(pmr); uvm_pmr_assertvalid(drain); - RB_INSERT(uvm_pmemrange_addr, &uvm.pmr_control.addr, drain); + RBT_INSERT(uvm_pmemrange_addr, &uvm.pmr_control.addr, drain); uvm_pmemrange_use_insert(&uvm.pmr_control.use, drain); uvm_unlock_fpageq(); } @@ -1439,7 +1435,7 @@ uvm_pmr_use_inc(paddr_t low, paddr_t hig sz = 0; uvm_lock_fpageq(); /* Increase use count on segments in range. */ - RB_FOREACH(pmr, uvm_pmemrange_addr, &uvm.pmr_control.addr) { + RBT_FOREACH(pmr, uvm_pmemrange_addr, &uvm.pmr_control.addr) { if (PMR_IS_SUBRANGE_OF(pmr->low, pmr->high, low, high)) { TAILQ_REMOVE(&uvm.pmr_control.use, pmr, pmr_use); pmr->use++; @@ -1476,9 +1472,9 @@ uvm_pmr_allocpmr(void) } KASSERT(nw != NULL); memset(nw, 0, sizeof(struct uvm_pmemrange)); - RB_INIT(&nw->addr); + RBT_INIT(uvm_pmr_addr, &nw->addr); for (i = 0; i < UVM_PMR_MEMTYPE_MAX; i++) { - RB_INIT(&nw->size[i]); + RBT_INIT(uvm_pmr_size, &nw->size[i]); TAILQ_INIT(&nw->single[i]); } return nw; @@ -1497,7 +1493,7 @@ uvm_pmr_init(void) int i; TAILQ_INIT(&uvm.pmr_control.use); - RB_INIT(&uvm.pmr_control.addr); + RBT_INIT(uvm_pmemrange_addr, &uvm.pmr_control.addr); TAILQ_INIT(&uvm.pmr_control.allocs); /* By default, one range for the entire address space. */ @@ -1505,7 +1501,7 @@ uvm_pmr_init(void) new_pmr->low = 0; new_pmr->high = atop((paddr_t)-1) + 1; - RB_INSERT(uvm_pmemrange_addr, &uvm.pmr_control.addr, new_pmr); + RBT_INSERT(uvm_pmemrange_addr, &uvm.pmr_control.addr, new_pmr); uvm_pmemrange_use_insert(&uvm.pmr_control.use, new_pmr); for (i = 0; uvm_md_constraints[i] != NULL; i++) { @@ -1525,12 +1521,12 @@ uvm_pmemrange_find(paddr_t pageno) { struct uvm_pmemrange *pmr; - pmr = RB_ROOT(&uvm.pmr_control.addr); + pmr = RBT_ROOT(uvm_pmemrange_addr, &uvm.pmr_control.addr); while (pmr != NULL) { if (pmr->low > pageno) - pmr = RB_LEFT(pmr, pmr_addr); + pmr = RBT_LEFT(uvm_pmemrange_addr, pmr); else if (pmr->high <= pageno) - pmr = RB_RIGHT(pmr, pmr_addr); + pmr = RBT_RIGHT(uvm_pmemrange_addr, pmr); else break; } @@ -1554,11 +1550,11 @@ uvm_pmr_isfree(struct vm_page *pg) pmr = uvm_pmemrange_find(atop(VM_PAGE_TO_PHYS(pg))); if (pmr == NULL) return 0; - r = RB_NFIND(uvm_pmr_addr, &pmr->addr, pg); + r = RBT_NFIND(uvm_pmr_addr, &pmr->addr, pg); if (r == NULL) - r = RB_MAX(uvm_pmr_addr, &pmr->addr); + r = RBT_MAX(uvm_pmr_addr, &pmr->addr); else if (r != pg) - r = RB_PREV(uvm_pmr_addr, &pmr->addr, r); + r = RBT_PREV(uvm_pmr_addr, r); if (r == NULL) return 0; /* Empty tree. */ @@ -1600,9 +1596,9 @@ uvm_pmr_rootupdate(struct uvm_pmemrange atop(VM_PAGE_TO_PHYS(root)) + root->fpgsz, start, end)) { if (direction == 1) - root = RB_RIGHT(root, objt); + root = RBT_RIGHT(uvm_pmr_addr, root); else - root = RB_LEFT(root, objt); + root = RBT_LEFT(uvm_pmr_addr, root); } if (root == NULL || uvm_pmr_pg_to_memtype(root) == memtype) return root; @@ -1620,7 +1616,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange * Cache the upper page, so we can page-walk later. */ high = root; - high_next = RB_RIGHT(high, objt); + high_next = RBT_RIGHT(uvm_pmr_addr, high); while (high_next != NULL && PMR_INTERSECTS_WITH( atop(VM_PAGE_TO_PHYS(high_next)), atop(VM_PAGE_TO_PHYS(high_next)) + high_next->fpgsz, @@ -1628,7 +1624,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange high = high_next; if (uvm_pmr_pg_to_memtype(high) == memtype) return high; - high_next = RB_RIGHT(high, objt); + high_next = RBT_RIGHT(uvm_pmr_addr, high); } /* @@ -1636,7 +1632,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange * Cache the lower page, so we can page-walk later. */ low = root; - low_next = RB_LEFT(low, objt); + low_next = RBT_LEFT(uvm_pmr_addr, low); while (low_next != NULL && PMR_INTERSECTS_WITH( atop(VM_PAGE_TO_PHYS(low_next)), atop(VM_PAGE_TO_PHYS(low_next)) + low_next->fpgsz, @@ -1644,16 +1640,16 @@ uvm_pmr_rootupdate(struct uvm_pmemrange low = low_next; if (uvm_pmr_pg_to_memtype(low) == memtype) return low; - low_next = RB_LEFT(low, objt); + low_next = RBT_LEFT(uvm_pmr_addr, low); } if (low == high) return NULL; /* No hits. Walk the address tree until we find something usable. */ - for (low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low); + for (low = RBT_NEXT(uvm_pmr_addr, low); low != high; - low = RB_NEXT(uvm_pmr_addr, &pmr->addr, low)) { + low = RBT_NEXT(uvm_pmr_addr, low)) { KDASSERT(PMR_IS_SUBRANGE_OF(atop(VM_PAGE_TO_PHYS(low)), atop(VM_PAGE_TO_PHYS(low)) + low->fpgsz, start, end)); @@ -1719,7 +1715,7 @@ uvm_pmr_get1page(psize_t count, int memt * Note that a size tree gives pg[1] instead of * pg[0]. */ - found = RB_MIN(uvm_pmr_size, + found = RBT_MIN(uvm_pmr_size, &pmr->size[memtype]); if (found != NULL) { found--; @@ -1735,7 +1731,7 @@ uvm_pmr_get1page(psize_t count, int memt * Try address-guided search to meet the page * number constraints. */ - found = RB_ROOT(&pmr->addr); + found = RBT_ROOT(uvm_pmr_addr, &pmr->addr); if (found != NULL) { found = uvm_pmr_rootupdate(pmr, found, start, end, memtype); @@ -1858,14 +1854,14 @@ uvm_pmr_print(void) useq_len++; free = 0; for (mt = 0; mt < UVM_PMR_MEMTYPE_MAX; mt++) { - pg = RB_MAX(uvm_pmr_size, &pmr->size[mt]); + pg = RBT_MAX(uvm_pmr_size, &pmr->size[mt]); if (pg != NULL) pg--; else pg = TAILQ_FIRST(&pmr->single[mt]); size[mt] = (pg == NULL ? 0 : pg->fpgsz); - RB_FOREACH(pg, uvm_pmr_addr, &pmr->addr) + RBT_FOREACH(pg, uvm_pmr_addr, &pmr->addr) free += pg->fpgsz; } Index: sys/uvm/uvm_pmemrange.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_pmemrange.h,v retrieving revision 1.12 diff -u -p -r1.12 uvm_pmemrange.h --- sys/uvm/uvm_pmemrange.h 5 Feb 2015 23:51:06 -0000 1.12 +++ sys/uvm/uvm_pmemrange.h 12 Aug 2016 07:06:59 -0000 @@ -23,8 +23,8 @@ #ifndef _UVM_UVM_PMEMRANGE_H_ #define _UVM_UVM_PMEMRANGE_H_ -RB_HEAD(uvm_pmr_addr, vm_page); -RB_HEAD(uvm_pmr_size, vm_page); +RBT_HEAD(uvm_pmr_addr, vm_page); +RBT_HEAD(uvm_pmr_size, vm_page); /* * Page types available: @@ -52,7 +52,7 @@ struct uvm_pmemrange { TAILQ_ENTRY(uvm_pmemrange) pmr_use; /* pmr, sorted by use */ - RB_ENTRY(uvm_pmemrange) pmr_addr; + RBT_ENTRY(uvm_pmemrange) pmr_addr; /* pmr, sorted by address */ }; @@ -94,7 +94,7 @@ struct uvm_pmalloc { #define UVM_PMA_FAIL 0x10 /* page daemon cannot free pages */ #define UVM_PMA_FREED 0x20 /* at least one page in the range was freed */ -RB_HEAD(uvm_pmemrange_addr, uvm_pmemrange); +RBT_HEAD(uvm_pmemrange_addr, uvm_pmemrange); TAILQ_HEAD(uvm_pmemrange_use, uvm_pmemrange); /* @@ -124,12 +124,12 @@ int uvm_pmr_isfree(struct vm_page *pg); * Internal tree logic. */ -int uvm_pmr_addr_cmp(struct vm_page *, struct vm_page *); -int uvm_pmr_size_cmp(struct vm_page *, struct vm_page *); +inline int uvm_pmr_addr_cmp(const struct vm_page *, const struct vm_page *); +inline int uvm_pmr_size_cmp(const struct vm_page *, const struct vm_page *); -RB_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp); -RB_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp); -RB_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr, +RBT_PROTOTYPE(uvm_pmr_addr, vm_page, objt, uvm_pmr_addr_cmp); +RBT_PROTOTYPE(uvm_pmr_size, vm_page, objt, uvm_pmr_size_cmp); +RBT_PROTOTYPE(uvm_pmemrange_addr, uvm_pmemrange, pmr_addr, uvm_pmemrange_addr_cmp); struct vm_page *uvm_pmr_insert_addr(struct uvm_pmemrange *, Index: sys/uvm/uvm_unix.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_unix.c,v retrieving revision 1.58 diff -u -p -r1.58 uvm_unix.c --- sys/uvm/uvm_unix.c 4 Apr 2016 16:34:16 -0000 1.58 +++ sys/uvm/uvm_unix.c 12 Aug 2016 07:06:59 -0000 @@ -150,7 +150,7 @@ uvm_coredump_walkmap(struct proc *p, voi vaddr_t top; int error; - RB_FOREACH(entry, uvm_map_addr, &map->addr) { + RBT_FOREACH(entry, uvm_map_addr, &map->addr) { state.cookie = cookie; state.prot = entry->protection; state.flags = 0; Index: sys/uvm/uvm_vnode.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v retrieving revision 1.92 diff -u -p -r1.92 uvm_vnode.c --- sys/uvm/uvm_vnode.c 19 Mar 2016 12:04:16 -0000 1.92 +++ sys/uvm/uvm_vnode.c 12 Aug 2016 07:06:59 -0000 @@ -362,7 +362,7 @@ uvn_detach(struct uvm_object *uobj) if (uvn->u_flags & UVM_VNODE_WRITEABLE) { LIST_REMOVE(uvn, u_wlist); } - KASSERT(RB_EMPTY(&uobj->memt)); + KASSERT(RBT_EMPTY(uvm_objtree, &uobj->memt)); oldflags = uvn->u_flags; uvn->u_flags = 0; @@ -462,7 +462,7 @@ uvm_vnp_terminate(struct vnode *vp) while (uvn->u_obj.uo_npages) { #ifdef DEBUG struct vm_page *pp; - RB_FOREACH(pp, uvm_objtree, &uvn->u_obj.memt) { + RBT_FOREACH(pp, uvm_objtree, &uvn->u_obj.memt) { if ((pp->pg_flags & PG_BUSY) == 0) panic("uvm_vnp_terminate: detected unbusy pg"); }