Index: rtable.c =================================================================== RCS file: /cvs/src/sys/net/rtable.c,v retrieving revision 1.50 diff -u -p -r1.50 rtable.c --- rtable.c 19 Jul 2016 10:51:44 -0000 1.50 +++ rtable.c 21 Jul 2016 13:50:18 -0000 @@ -510,16 +510,437 @@ rtable_mpath_next(struct rtentry *rt) #else /* ART */ +#include +#include + static inline uint8_t *satoaddr(struct art_root *, struct sockaddr *); -void rtentry_ref(void *, void *); -void rtentry_unref(void *, void *); +static inline int +sockaddreq(const struct sockaddr *a, const struct sockaddr *b) +{ + return (a->sa_len == b->sa_len && memcmp(a, b, a->sa_len) == 0); +} + +#define RTLIST_ENTRIES 4 /* must be 3 or more */ + +struct rtlist { + unsigned long rl_map[RTLIST_ENTRIES]; /* must be first */ + struct rtlist *rl_gc; +}; + +static inline int +rtlist_is_list(unsigned long entry) +{ + return (entry & 1); +} + +static inline struct rtentry * +rtlist_rt(unsigned long entry) +{ + return ((struct rtentry *)entry); +} -struct srpl_rc rt_rc = SRPL_RC_INITIALIZER(rtentry_ref, rtentry_unref, NULL); +static inline struct rtlist * +rtlist_list(unsigned long entry) +{ + entry &= ~1UL; + return ((struct rtlist *)entry); +} + +static inline unsigned long * +rtlist_map(unsigned long entry) +{ + entry &= ~1UL; + return ((unsigned long *)entry); +} + +static inline unsigned long +rtlist_map_list(struct rtlist *rl) +{ + unsigned long entry = (unsigned long)rl; + return (entry | 1); +} + +static inline unsigned long +rtlist_map_rt(struct rtentry *rt) +{ + rtref(rt); + return ((unsigned long)rt); +} + +void rtlist_put(struct rtlist *); +void rtlist_gc(void *); + +struct rtlist_iter { + unsigned long *map; + unsigned int i; +}; + +int rtlist_add(struct rtlist_iter *, struct rtentry *); +int rtlist_cow_insert(struct art_node *, struct rtentry *); +int rtlist_cow_delete(struct art_node *, struct rtentry *); +int rtlist_cow_change(struct art_node *, struct rtentry *, uint8_t); + +struct pool rtlist_pool; + +struct mutex rtlist_gc_mtx = MUTEX_INITIALIZER(IPL_SOFTNET); +struct rtlist *rtlist_gc_list = NULL; +struct task rtlist_gc_task = TASK_INITIALIZER(rtlist_gc, NULL); + +static inline struct rtlist * +rtlist_get(void) +{ + return (pool_get(&rtlist_pool, PR_NOWAIT | PR_ZERO)); +} + +void +rtlist_put(struct rtlist *rl) +{ + mtx_enter(&rtlist_gc_mtx); + rl->rl_gc = rtlist_gc_list; + rtlist_gc_list = rl; + mtx_leave(&rtlist_gc_mtx); + + task_add(systqmp, &rtlist_gc_task); +} + +void +rtlist_gc(void *null) +{ + struct rtlist *rl, *next, *chain; + int i; + + mtx_enter(&rtlist_gc_mtx); + rl = rtlist_gc_list; + rtlist_gc_list = NULL; + mtx_leave(&rtlist_gc_mtx); + + while (rl != NULL) { + next = rl->rl_gc; + + srp_finalize((void *)rtlist_map_list(rl), "rtlfini"); + + i = 1; + do { + do { + KASSERT(!rtlist_is_list(rl->rl_map[i])); + rtfree(rtlist_rt(rl->rl_map[i])); + } while (++i < nitems(rl->rl_map) - 1); + + if (rtlist_is_list(rl->rl_map[i])) { + chain = rtlist_list(rl->rl_map[i]); + i = 0; + } else { + chain = NULL; + rtfree(rtlist_rt(rl->rl_map[i])); + } + + pool_put(&rtlist_pool, rl); + + rl = chain; + } while (rl != NULL); + + rl = next; + } +} + +static inline struct rtentry * +rtlist_cur(struct rtlist_iter *ri) +{ + return (rtlist_rt(ri->map[ri->i])); +} + +struct rtentry * +rtlist_next(struct rtlist_iter *ri) +{ + ri->i++; + if (ri->i >= RTLIST_ENTRIES) + return (NULL); + + if (rtlist_is_list(ri->map[ri->i])) { + ri->map = rtlist_map(ri->map[ri->i]); + ri->i = 0; + } + + return (rtlist_cur(ri)); +} + +int +rtlist_add(struct rtlist_iter *ri, struct rtentry *rt) +{ + + if (ri->i >= RTLIST_ENTRIES) { + struct rtlist *rl; + + rl = rtlist_get(); + if (rl == NULL) + return (ENOBUFS); + + rl->rl_map[0] = ri->map[RTLIST_ENTRIES - 1]; + ri->map[RTLIST_ENTRIES - 1] = rtlist_map_list(rl); + + ri->map = rl->rl_map; + ri->i = 1; + } + + ri->map[ri->i] = rtlist_map_rt(rt); + + ri->i++; + + return (0); +} + +int +rtlist_cow_insert(struct art_node *an, struct rtentry *rt) +{ + struct rtlist *rl; + struct rtentry *mrt; + struct rtlist_iter ri; + struct rtlist_iter nri; + unsigned long route; + unsigned long count; + unsigned long entries[2]; + int error; + + rl = rtlist_get(); + if (rl == NULL) + return (ENOBUFS); + + printf("%s: %p\n", __func__, rl); + + route = (unsigned long)srp_get_locked(&an->an_route); + if (rtlist_is_list(route)) { + ri.map = rtlist_map(route); + ri.i = 1; + count = ri.map[0]; + } else { + entries[0] = route; + entries[1] = 0UL; + ri.map = entries; + ri.i = 0; + count = 1; + } + + mrt = rtlist_cur(&ri); + if (rt->rt_priority > mrt->rt_priority) + rl->rl_map[0] = 1; + else if (rt->rt_priority == mrt->rt_priority) + rl->rl_map[0] = count + 1; + else + rl->rl_map[0] = count; + + nri.map = rl->rl_map; + nri.i = 1; /* skip head count */ + + do { + if (rt->rt_priority > mrt->rt_priority || + memcmp(rt->rt_gateway, mrt->rt_gateway, + rt->rt_gateway->sa_len) < 0) + break; + + error = rtlist_add(&nri, mrt); + if (error != 0) + goto fail; + + mrt = rtlist_next(&ri); + } while (mrt != NULL); + + error = rtlist_add(&nri, rt); + if (error != 0) + goto fail; + + while (mrt != NULL) { + error = rtlist_add(&nri, mrt); + if (error != 0) + goto fail; + + mrt = rtlist_next(&ri); + } + + srp_swap_locked(&an->an_route, (void *)rtlist_map_list(rl)); + if (rtlist_is_list(route)) + rtlist_put(rtlist_list(route)); + else + rtfree(rtlist_rt(route)); + + return (0); + +fail: + rtlist_put(rl); + return (error); +} + +int +rtlist_cow_delete(struct art_node *an, struct rtentry *rt) +{ + struct rtlist *rl; + struct rtlist_iter ri; + struct rtlist_iter nri; + unsigned long route; + struct rtentry *mrt; + uint8_t prio; + unsigned long count; + int error; + + route = (unsigned long)srp_get_locked(&an->an_route); + + /* is this the last route? */ + if (rtlist_rt(route) == rt) { + srp_swap_locked(&an->an_route, NULL); + rtfree(rt); + return (ESHUTDOWN); + } + + KASSERT(rtlist_is_list(route)); + ri.map = rtlist_map(route); + ri.i = 1; + + /* are we leaving one route in the list? */ + if (ri.map[3] == 0) { + struct rtentry *mrt; + + mrt = rtlist_rt(ri.map[1]); + if (mrt == rt) + mrt = rtlist_rt(ri.map[2]); + + an->an_dst = mrt->rt_dest; + srp_swap_locked(&an->an_route, (void *)rtlist_map_rt(mrt)); + + rtlist_put(rtlist_list(route)); + return (0); + } + + /* build a new list */ + rl = rtlist_get(); + if (rl == NULL) + return (ENOBUFS); + + printf("%s: %p\n", __func__, rl); + + nri.map = rl->rl_map; + nri.i = 1; /* skip count */ + + for (mrt = rtlist_cur(&ri); mrt != NULL; mrt = rtlist_next(&ri)) { + if (rt != mrt) { + error = rtlist_add(&nri, mrt); + if (error != 0) + goto fail; + } + } + + /* count the head prio routes */ + ri.map = rl->rl_map; + ri.i = 1; + + mrt = rtlist_cur(&ri); + an->an_dst = mrt->rt_dest; + prio = mrt->rt_priority; + count = 1; + + while ((mrt = rtlist_next(&ri)) != NULL) { + if (mrt->rt_priority == prio) + count++; + else + break; + } + + rl->rl_map[0] = count; + + srp_swap_locked(&an->an_route, (void *)rtlist_map_list(rl)); + rtlist_put(rtlist_list(route)); + + return (0); + +fail: + rtlist_put(rl); + return (error); +} + +int +rtlist_cow_change(struct art_node *an, struct rtentry *rt, uint8_t prio) +{ + struct rtlist *rl; + struct rtlist_iter ri, nri; + unsigned long route; + struct rtentry *mrt; + int error; + + if (rt->rt_priority == prio) + return (0); + + route = (unsigned long)srp_get_locked(&an->an_route); + if (rtlist_rt(route) == rt) { + rt->rt_priority = prio; + return (0); + } + + rl = rtlist_get(); + if (rl == NULL) + return (ENOBUFS); + + printf("%s: %p\n", __func__, rl); + + ri.map = rtlist_map(route); + ri.i = 1; + + mrt = rtlist_cur(&ri); + if (prio > mrt->rt_priority) + rl->rl_map[0] = 1; + else if (prio == mrt->rt_priority) + rl->rl_map[0] = ri.map[0]; + else + panic("%s: invalid priority change", __func__); + + nri.map = rl->rl_map; + nri.i = 1; /* skip head count */ + + rt->rt_priority = prio; + + do { + if (mrt != rt) { + if (rt->rt_priority > mrt->rt_priority || + memcmp(rt->rt_gateway, mrt->rt_gateway, + rt->rt_gateway->sa_len) < 0); + break; + + error = rtlist_add(&nri, mrt); + if (error != 0) + goto fail; + } + + mrt = rtlist_next(&ri); + } while (mrt != NULL); + + error = rtlist_add(&nri, rt); + if (error != 0) + goto fail; + + while (mrt != NULL) { + if (mrt != rt) { + error = rtlist_add(&nri, mrt); + if (error != 0) + goto fail; + } + + mrt = rtlist_next(&ri); + } + + srp_swap_locked(&an->an_route, (void *)rtlist_map_list(rl)); + rtlist_put(rtlist_list(route)); + + return (0); + +fail: + rtlist_put(rl); + return (error); +} void rtable_init_backend(unsigned int keylen) { + pool_init(&rtlist_pool, sizeof(struct rtlist), 0, 0, 0, "rtlist", NULL); + pool_setipl(&rtlist_pool, IPL_SOFTNET); + art_init(); } @@ -529,6 +950,13 @@ rtable_alloc(unsigned int rtableid, unsi return (art_alloc(rtableid, alen, off)); } +int +rtable_lookup_match(struct rtentry *rt, struct sockaddr *gateway, uint8_t prio) +{ + + return (0); +} + struct rtentry * rtable_lookup(unsigned int rtableid, struct sockaddr *dst, struct sockaddr *mask, struct sockaddr *gateway, uint8_t prio) @@ -536,8 +964,13 @@ rtable_lookup(unsigned int rtableid, str struct art_root *ar; struct art_node *an; struct rtentry *rt = NULL; - struct srp_ref sr, nsr; + struct srp_ref sr; uint8_t *addr; + unsigned long entries[2]; +#ifndef SMALL_KERNEL + unsigned long *map; + int i; +#endif int plen; ar = rtable_get(rtableid, dst->sa_family); @@ -548,7 +981,7 @@ rtable_lookup(unsigned int rtableid, str /* No need for a perfect match. */ if (mask == NULL) { - an = art_match(ar, addr, &nsr); + an = art_match(ar, addr, &sr); if (an == NULL) goto out; } else { @@ -556,7 +989,7 @@ rtable_lookup(unsigned int rtableid, str if (plen == -1) return (NULL); - an = art_lookup(ar, addr, plen, &nsr); + an = art_lookup(ar, addr, plen, &sr); /* Make sure we've got a perfect match. */ if (an == NULL || an->an_plen != plen || @@ -564,28 +997,49 @@ rtable_lookup(unsigned int rtableid, str goto out; } -#ifdef SMALL_KERNEL - rt = SRPL_ENTER(&sr, &an->an_rtlist); -#else - SRPL_FOREACH(rt, &sr, &an->an_rtlist, rt_next) { + entries[0] = (unsigned long)srp_follow(&sr, &an->an_route); +#ifndef SMALL_KERNEL + if (rtlist_is_list(entries[0])) { + map = rtlist_map(entries[0]); + i = 1; /* first slot doesnt have a route in it */ + } else { + entries[1] = 0UL; + map = entries; + i = 0; + } + + for (;;) { + rt = rtlist_rt(map[i]); + if (rt == NULL) + break; + if (prio != RTP_ANY && - (rt->rt_priority & RTP_MASK) != (prio & RTP_MASK)) - continue; + ((rt->rt_priority & RTP_MASK) != (prio & RTP_MASK))) + goto next; - if (gateway == NULL) + if (gateway == NULL || sockaddreq(rt->rt_gateway, gateway)) { + rtref(rt); break; + } - if (rt->rt_gateway->sa_len == gateway->sa_len && - memcmp(rt->rt_gateway, gateway, gateway->sa_len) == 0) +next: + rt = NULL; + + i++; + if (i >= RTLIST_ENTRIES) break; + + if (rtlist_is_list(map[i])) { + map = rtlist_map(map[i]); + i = 0; + } } +#else /* SMALL_KERNEL */ + rt = rtmap_rt(entries[0]); + rtref(rt); #endif /* SMALL_KERNEL */ - if (rt != NULL) - rtref(rt); - - SRPL_LEAVE(&sr); out: - srp_leave(&nsr); + srp_leave(&sr); return (rt); } @@ -596,11 +1050,9 @@ rtable_match(unsigned int rtableid, stru struct art_root *ar; struct art_node *an; struct rtentry *rt = NULL; - struct srp_ref sr, nsr; + struct srp_ref sr; uint8_t *addr; -#ifndef SMALL_KERNEL - int hash; -#endif /* SMALL_KERNEL */ + unsigned long entry; ar = rtable_get(rtableid, dst->sa_family); if (ar == NULL) @@ -608,58 +1060,49 @@ rtable_match(unsigned int rtableid, stru addr = satoaddr(ar, dst); - an = art_match(ar, addr, &nsr); + an = art_match(ar, addr, &sr); if (an == NULL) goto out; - rt = SRPL_ENTER(&sr, &an->an_rtlist); - rtref(rt); - SRPL_LEAVE(&sr); - + entry = (unsigned long)srp_follow(&sr, &an->an_route); #ifndef SMALL_KERNEL - /* Gateway selection by Hash-Threshold (RFC 2992) */ - if ((hash = rt_hash(rt, dst, src)) != -1) { - struct rtentry *mrt; - int threshold, npaths = 0; - - KASSERT(hash <= 0xffff); - - SRPL_FOREACH(mrt, &sr, &an->an_rtlist, rt_next) { - /* Only count nexthops with the same priority. */ - if (mrt->rt_priority == rt->rt_priority) - npaths++; - } - SRPL_LEAVE(&sr); - - threshold = (0xffff / npaths) + 1; + if (rtlist_is_list(entry)) { + unsigned long *map; + int hash, threshold, npaths; + int i; + + map = rtlist_map(entry); + npaths = map[0]; + i = 1; + + rt = rtlist_rt(map[i]); + + if (npaths > 1 && (hash = rt_hash(rt, dst, src)) != -1) { + KASSERT(hash <= 0xffff); + + threshold = (0xffff / npaths) + 1; + + while (hash < threshold) { + i++; + if (rtlist_is_list(map[i])) { + map = rtlist_map(map[i]); + i = 0; + } - /* - * we have no protection against concurrent modification of the - * route list attached to the node, so we won't necessarily - * have the same number of routes. for most modifications, - * we'll pick a route that we wouldn't have if we only saw the - * list before or after the change. if we were going to use - * the last available route, but it got removed, we'll hit - * the end of the list and then pick the first route. - */ - - mrt = SRPL_ENTER(&sr, &an->an_rtlist); - while (hash > threshold && mrt != NULL) { - if (mrt->rt_priority == rt->rt_priority) hash -= threshold; - mrt = SRPL_NEXT(&sr, mrt, rt_next); - } + } - if (mrt != NULL) { - rtref(mrt); - rtfree(rt); - rt = mrt; + rt = rtlist_rt(map[i]); } - SRPL_LEAVE(&sr); - } + } else + rt = rtlist_rt(entry); +#else /* SMALL_KERNEL */ + rt = rtlist_rt(entries[0]); #endif /* SMALL_KERNEL */ + KASSERT(rt != NULL); + rtref(rt); out: - srp_leave(&nsr); + srp_leave(&sr); return (rt); } @@ -669,7 +1112,6 @@ rtable_insert(unsigned int rtableid, str struct rtentry *rt) { #ifndef SMALL_KERNEL - struct rtentry *mrt; struct srp_ref sr; #endif /* SMALL_KERNEL */ struct art_root *ar; @@ -679,8 +1121,6 @@ rtable_insert(unsigned int rtableid, str unsigned int rt_flags; int error = 0; - KERNEL_ASSERT_LOCKED(); - ar = rtable_get(rtableid, dst->sa_family); if (ar == NULL) return (EAFNOSUPPORT); @@ -690,28 +1130,50 @@ rtable_insert(unsigned int rtableid, str if (plen == -1) return (EINVAL); - rtref(rt); /* guarantee rtfree won't do anything during insert */ - #ifndef SMALL_KERNEL /* Do not permit exactly the same dst/mask/gw pair. */ an = art_lookup(ar, addr, plen, &sr); srp_leave(&sr); /* an can't go away while we have the lock */ - if (an != NULL && an->an_plen == plen && - !memcmp(an->an_dst, dst, dst->sa_len)) { + + if (an != NULL && an->an_plen == plen && sockaddreq(an->an_dst, dst)) { struct rtentry *mrt; int mpathok = ISSET(rt->rt_flags, RTF_MPATH); + unsigned long entries[2]; + unsigned long *map; + int i; + + entries[0] = (unsigned long)srp_get_locked(&an->an_route); + if (rtlist_is_list(entries[0])) { + map = rtlist_map(entries[0]); + map++; /* skip count of top route */ + i = 1; + } else { + entries[1] = 0UL; + map = entries; + i = 0; + } + + while (i < RTLIST_ENTRIES) { + while (rtlist_is_list(map[i])) { + map = rtlist_map(map[i]); + i = 0; + } + + mrt = rtlist_rt(map[i]); + if (mrt == NULL) + break; - SRPL_FOREACH_LOCKED(mrt, &an->an_rtlist, rt_next) { if (prio != RTP_ANY && (mrt->rt_priority & RTP_MASK) != (prio & RTP_MASK)) - continue; + goto next; - if (!mpathok || - (mrt->rt_gateway->sa_len == gateway->sa_len && - !memcmp(mrt->rt_gateway, gateway, gateway->sa_len))){ + if (!mpathok || sockaddreq(mrt->rt_gateway, gateway)) { error = EEXIST; goto leave; } + +next: + i++; } } #endif /* SMALL_KERNEL */ @@ -727,12 +1189,12 @@ rtable_insert(unsigned int rtableid, str rt->rt_flags &= ~RTF_MPATH; rt->rt_dest = dst; rt->rt_plen = plen; - SRPL_INSERT_HEAD_LOCKED(&rt_rc, &an->an_rtlist, rt, rt_next); + + srp_swap_locked(&an->an_route, (void *)rtlist_map_rt(rt)); prev = art_insert(ar, an, addr, plen); if (prev != an) { - SRPL_REMOVE_LOCKED(&rt_rc, &an->an_rtlist, rt, rtentry, - rt_next); + rtfree(rt); rt->rt_flags = rt_flags; art_put(an); @@ -742,40 +1204,12 @@ rtable_insert(unsigned int rtableid, str } #ifndef SMALL_KERNEL - an = prev; - - mrt = SRPL_FIRST_LOCKED(&an->an_rtlist); - KASSERT(mrt != NULL); - KASSERT((rt->rt_flags & RTF_MPATH) || mrt->rt_priority != prio); - - /* - * An ART node with the same destination/netmask already - * exists, MPATH conflict must have been already checked. - */ - if (rt->rt_flags & RTF_MPATH) { - /* - * Only keep the RTF_MPATH flag if two routes have - * the same gateway. - */ - rt->rt_flags &= ~RTF_MPATH; - SRPL_FOREACH_LOCKED(mrt, &an->an_rtlist, rt_next) { - if (mrt->rt_priority == prio) { - mrt->rt_flags |= RTF_MPATH; - rt->rt_flags |= RTF_MPATH; - } - } - } - - SRPL_INSERT_HEAD_LOCKED(&rt_rc, &an->an_rtlist, rt, rt_next); - - /* Put newly inserted entry at the right place. */ - rtable_mpath_reprio(rtableid, dst, mask, rt->rt_priority, rt); + error = rtlist_cow_insert(prev, rt); #else error = EEXIST; #endif /* SMALL_KERNEL */ } leave: - rtfree(rt); return (error); } @@ -788,10 +1222,7 @@ rtable_delete(unsigned int rtableid, str struct srp_ref sr; uint8_t *addr; int plen; -#ifndef SMALL_KERNEL - struct rtentry *mrt; - int npaths = 0; -#endif /* SMALL_KERNEL */ + int error; ar = rtable_get(rtableid, dst->sa_family); if (ar == NULL) @@ -806,39 +1237,24 @@ rtable_delete(unsigned int rtableid, str srp_leave(&sr); /* an can't go away while we have the lock */ /* Make sure we've got a perfect match. */ - if (an == NULL || an->an_plen != plen || - memcmp(an->an_dst, dst, dst->sa_len)) + if (an == NULL || an->an_plen != plen || !sockaddreq(an->an_dst, dst)) return (ESRCH); -#ifndef SMALL_KERNEL - /* - * If other multipath route entries are still attached to - * this ART node we only have to unlink it. - */ - SRPL_FOREACH_LOCKED(mrt, &an->an_rtlist, rt_next) - npaths++; - - if (npaths > 1) { - KASSERT(rt->rt_refcnt >= 1); - SRPL_REMOVE_LOCKED(&rt_rc, &an->an_rtlist, rt, rtentry, - rt_next); - - mrt = SRPL_FIRST_LOCKED(&an->an_rtlist); - an->an_dst = mrt->rt_dest; - if (npaths == 2) - mrt->rt_flags &= ~RTF_MPATH; - return (0); + error = rtlist_cow_delete(an, rt); + switch (error) { + case ESHUTDOWN: + if (art_delete(ar, an, addr, plen) == NULL) + panic("art_delete node gone under lock"); + art_put(an); + error = 0; + break; + case 0: + break; + default: + break; } -#endif /* SMALL_KERNEL */ - - if (art_delete(ar, an, addr, plen) == NULL) - return (ESRCH); - KASSERT(rt->rt_refcnt >= 1); - SRPL_REMOVE_LOCKED(&rt_rc, &an->an_rtlist, rt, rtentry, rt_next); - - art_put(an); - return (0); + return (error); } struct rtable_walk_cookie { @@ -853,16 +1269,43 @@ struct rtable_walk_cookie { int rtable_walk_helper(struct art_node *an, void *xrwc) { - struct srp_ref sr; struct rtable_walk_cookie *rwc = xrwc; + struct srp_ref sr; struct rtentry *rt; + unsigned long *map; + unsigned long entries[2]; int error = 0; + int i; + + entries[0] = (unsigned long)srp_enter(&sr, &an->an_route); + if (rtlist_is_list(entries[0])) { + map = rtlist_map(entries[0]); + i = 1; /* skip head count */ + } else { + entries[1] = 0UL; + map = entries; + i = 0; + } + + for (;;) { + rt = rtlist_rt(map[i]); + if (rt == NULL) + break; + + error = (*rwc->rwc_func)(rt, rwc->rwc_arg, rwc->rwc_rid); + if (error) + break; - SRPL_FOREACH(rt, &sr, &an->an_rtlist, rt_next) { - if ((error = (*rwc->rwc_func)(rt, rwc->rwc_arg, rwc->rwc_rid))) + i++; + if (i >= RTLIST_ENTRIES) break; + + if (rtlist_is_list(map[i])) { + map = rtlist_map(map[i]); + i = 0; + } } - SRPL_LEAVE(&sr); + srp_leave(&sr); return (error); } @@ -905,7 +1348,7 @@ rtable_mpath_reprio(unsigned int rtablei struct srp_ref sr; uint8_t *addr; int plen; - struct rtentry *mrt, *prt = NULL; + int error = 0; ar = rtable_get(rtableid, dst->sa_family); if (ar == NULL) @@ -920,71 +1363,21 @@ rtable_mpath_reprio(unsigned int rtablei srp_leave(&sr); /* an can't go away while we have the lock */ /* Make sure we've got a perfect match. */ - if (an == NULL || an->an_plen != plen || - memcmp(an->an_dst, dst, dst->sa_len)) - return (ESRCH); - - rtref(rt); /* keep rt alive in between remove and add */ - SRPL_REMOVE_LOCKED(&rt_rc, &an->an_rtlist, rt, rtentry, rt_next); - rt->rt_priority = prio; - - if ((mrt = SRPL_FIRST_LOCKED(&an->an_rtlist)) != NULL) { - /* - * Select the order of the MPATH routes. - */ - while (SRPL_NEXT_LOCKED(mrt, rt_next) != NULL) { - if (mrt->rt_priority > prio) - break; - prt = mrt; - mrt = SRPL_NEXT_LOCKED(mrt, rt_next); - } - - if (mrt->rt_priority > prio) { - /* - * ``rt'' has a higher (smaller) priority than - * ``mrt'' so put it before in the list. - */ - if (prt != NULL) { - SRPL_INSERT_AFTER_LOCKED(&rt_rc, prt, rt, - rt_next); - } else { - SRPL_INSERT_HEAD_LOCKED(&rt_rc, &an->an_rtlist, - rt, rt_next); - } - } else { - SRPL_INSERT_AFTER_LOCKED(&rt_rc, mrt, rt, rt_next); - } - } else { - SRPL_INSERT_HEAD_LOCKED(&rt_rc, &an->an_rtlist, rt, rt_next); - } - rtfree(rt); + if (an == NULL || !sockaddreq(an->an_dst, dst)) + error = ESRCH; + else + error = rtlist_cow_change(an, rt, prio); - return (0); + return (error); } struct rtentry * rtable_mpath_next(struct rtentry *rt) { KERNEL_ASSERT_LOCKED(); - return (SRPL_NEXT_LOCKED(rt, rt_next)); + return (NULL); /* XXX */ } #endif /* SMALL_KERNEL */ - -void -rtentry_ref(void *null, void *xrt) -{ - struct rtentry *rt = xrt; - - rtref(rt); -} - -void -rtentry_unref(void *null, void *xrt) -{ - struct rtentry *rt = xrt; - - rtfree(rt); -} /* * Return a pointer to the address (key). This is an heritage from the