Index: sys/pool.h =================================================================== RCS file: /cvs/src/sys/sys/pool.h,v retrieving revision 1.54 diff -u -p -r1.54 pool.h --- sys/pool.h 1 Nov 2014 23:58:07 -0000 1.54 +++ sys/pool.h 15 Nov 2014 09:44:22 -0000 @@ -189,7 +189,7 @@ void pool_set_constraints(struct pool * void *pool_get(struct pool *, int) __malloc; void pool_request_init(struct pool_request *, void (*)(void *, void *), void *); -void pool_request(struct pool *, struct pool_request *); +void pool_request(struct pool *, struct pool_request *, int); void pool_put(struct pool *, void *); int pool_reclaim(struct pool *); void pool_reclaim_all(void); Index: kern/subr_pool.c =================================================================== RCS file: /cvs/src/sys/kern/subr_pool.c,v retrieving revision 1.167 diff -u -p -r1.167 subr_pool.c --- kern/subr_pool.c 15 Nov 2014 06:55:32 -0000 1.167 +++ kern/subr_pool.c 15 Nov 2014 09:44:22 -0000 @@ -32,6 +32,7 @@ */ #include +#include #include #include #include @@ -396,18 +397,15 @@ pool_request_init(struct pool_request *p } void -pool_request(struct pool *pp, struct pool_request *pr) +pool_request(struct pool *pp, struct pool_request *pr, int flags) { mtx_enter(&pp->pr_requests_mtx); TAILQ_INSERT_TAIL(&pp->pr_requests, pr, pr_entry); - pool_runqueue(pp, PR_NOWAIT); + pool_runqueue(pp, flags); mtx_leave(&pp->pr_requests_mtx); } -struct pool_get_memory { - struct mutex mtx; - void * volatile v; -}; +u_int pool_sleeps; /* * Grab an item from the pool. @@ -420,34 +418,34 @@ pool_get(struct pool *pp, int flags) KASSERT(flags & (PR_WAITOK | PR_NOWAIT)); + if ((flags & (PR_WAITOK|PR_LIMITFAIL)) != PR_WAITOK) { + mtx_enter(&pp->pr_mtx); + if (pp->pr_nout >= pp->pr_hardlimit) { + if (ISSET(flags, PR_NOWAIT|PR_LIMITFAIL)) + goto fail; + } else if ((v = pool_do_get(pp, flags, &slowdown)) == NULL) { + if (ISSET(flags, PR_NOWAIT)) + goto fail; + } + mtx_leave(&pp->pr_mtx); - mtx_enter(&pp->pr_mtx); - if (pp->pr_nout >= pp->pr_hardlimit) { - if (ISSET(flags, PR_NOWAIT|PR_LIMITFAIL)) - goto fail; - } else if ((v = pool_do_get(pp, flags, &slowdown)) == NULL) { - if (ISSET(flags, PR_NOWAIT)) - goto fail; + if (slowdown && ISSET(flags, PR_WAITOK)) + yield(); } - mtx_leave(&pp->pr_mtx); - - if (slowdown && ISSET(flags, PR_WAITOK)) - yield(); if (v == NULL) { - struct pool_get_memory mem = - { MUTEX_INITIALIZER(pp->pr_ipl), NULL }; + struct sleep_state sls; struct pool_request pr; - pool_request_init(&pr, pool_get_done, &mem); - pool_request(pp, &pr); + pool_request_init(&pr, pool_get_done, &v); + pool_request(pp, &pr, flags); - mtx_enter(&mem.mtx); - while (mem.v == NULL) - msleep(&mem, &mem.mtx, PSWP, pp->pr_wchan, 0); - mtx_leave(&mem.mtx); + atomic_inc_int(&pool_sleeps); - v = mem.v; + while (v == NULL) { + sleep_setup(&sls, &v, PWAIT, pp->pr_wchan); + sleep_finish(&sls, (v == NULL)); + } } if (ISSET(flags, PR_ZERO)) @@ -462,15 +460,12 @@ fail: } void -pool_get_done(void *xmem, void *v) +pool_get_done(void *ctx, void *v) { - struct pool_get_memory *mem = xmem; - - mtx_enter(&mem->mtx); - mem->v = v; - mtx_leave(&mem->mtx); + void **vp = ctx; - wakeup_one(mem); + *vp = v; + wakeup_one(vp); } void @@ -486,6 +481,7 @@ pool_runqueue(struct pool *pp, int flags return; do { + int slowdown = 0; pp->pr_requesting = 1; /* no TAILQ_JOIN? :( */ @@ -501,18 +497,19 @@ pool_runqueue(struct pool *pp, int flags mtx_enter(&pp->pr_mtx); pr = TAILQ_FIRST(&prl); while (pr != NULL) { - int slowdown = 0; - if (pp->pr_nout >= pp->pr_hardlimit) break; pr->pr_item = pool_do_get(pp, flags, &slowdown); - if (pr->pr_item == NULL) /* || slowdown ? */ + if (pr->pr_item == NULL) break; pr = TAILQ_NEXT(pr, pr_entry); } mtx_leave(&pp->pr_mtx); + + if (slowdown && ISSET(flags, PR_WAITOK)) + yield(); while ((pr = TAILQ_FIRST(&prl)) != NULL && pr->pr_item != NULL) {