Index: uvm_map.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.c,v diff -u -p -r1.331 uvm_map.c --- uvm_map.c 20 Oct 2024 11:28:17 -0000 1.331 +++ uvm_map.c 20 Oct 2024 11:54:09 -0000 @@ -286,7 +286,7 @@ vaddr_t uvm_maxkaddr; do { \ if ((_map)->ref_count > 0) { \ if (((_map)->flags & VM_MAP_INTRSAFE) == 0) \ - rw_assert_wrlock(&(_map)->lock); \ + rw_assert_wrlock(&(_map)->lock.rrwl_lock); \ else \ MUTEX_ASSERT_LOCKED(&(_map)->mtx); \ } \ @@ -2114,8 +2114,6 @@ uvm_map_pageable_wire(struct vm_map *map #ifdef DIAGNOSTIC timestamp_save = map->timestamp; #endif - vm_map_busy(map); - vm_map_unlock(map); error = 0; for (iter = first; error == 0 && iter != end; @@ -2128,9 +2126,6 @@ uvm_map_pageable_wire(struct vm_map *map iter->protection); } - vm_map_lock(map); - vm_map_unbusy(map); - if (error) { #ifdef DIAGNOSTIC if (timestamp_save != map->timestamp) @@ -2418,11 +2413,10 @@ uvm_map_setup(struct vm_map *map, pmap_t map->s_start = map->s_end = 0; /* Empty stack area by default. */ map->flags = flags; map->timestamp = 0; - map->busy = NULL; if (flags & VM_MAP_ISVMSPACE) - rw_init_flags(&map->lock, "vmmaplk", RWL_DUPOK); + rrw_init_flags(&map->lock, "vmmaplk", RWL_DUPOK); else - rw_init(&map->lock, "kmmaplk"); + rrw_init(&map->lock, "kmmaplk"); mtx_init(&map->mtx, IPL_VM); mtx_init(&map->flags_lock, IPL_VM); @@ -4467,8 +4461,6 @@ uvm_map_clean(struct vm_map *map, vaddr_ } } - vm_map_busy(map); - vm_map_unlock(map); error = 0; for (entry = first; entry != NULL && entry->start < end; entry = RBT_NEXT(uvm_map_addr, entry)) { @@ -4487,7 +4479,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ goto flush_object; if (imut) { - vm_map_unbusy(map); + vm_map_unlock(map); return EPERM; } @@ -4585,7 +4577,7 @@ flush_object: } } - vm_map_unbusy(map); + vm_map_unlock(map); return error; } @@ -5207,26 +5199,9 @@ vm_map_lock_try_ln(struct vm_map *map, c if (!mtx_enter_try(&map->mtx)) return FALSE; } else { - struct proc *busy; - - mtx_enter(&map->flags_lock); - busy = map->busy; - mtx_leave(&map->flags_lock); - if (busy != NULL && busy != curproc) - return FALSE; - - rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP); + rv = rrw_enter(&map->lock, RW_WRITE|RW_NOSLEEP); if (rv != 0) return FALSE; - - /* to be sure, to be sure */ - mtx_enter(&map->flags_lock); - busy = map->busy; - mtx_leave(&map->flags_lock); - if (busy != NULL && busy != curproc) { - rw_exit(&map->lock); - return FALSE; - } } map->timestamp++; @@ -5241,37 +5216,12 @@ void vm_map_lock_ln(struct vm_map *map, char *file, int line) { if ((map->flags & VM_MAP_INTRSAFE) == 0) { - mtx_enter(&map->flags_lock); - for (;;) { - while (map->busy != NULL && map->busy != curproc) { - map->nbusy++; - msleep_nsec(&map->busy, &map->mtx, - PVM, vmmapbsy, INFSLP); - map->nbusy--; - } - mtx_leave(&map->flags_lock); - - rw_enter_write(&map->lock); - - /* to be sure, to be sure */ - mtx_enter(&map->flags_lock); - if (map->busy != NULL && map->busy != curproc) { - /* go around again */ - rw_exit_write(&map->lock); - } else { - /* we won */ - break; - } - } - mtx_leave(&map->flags_lock); + rrw_enter(&map->lock, RW_WRITE); } else { mtx_enter(&map->mtx); } - if (map->busy != curproc) { - KASSERT(map->busy == NULL); - map->timestamp++; - } + map->timestamp++; LPRINTF(("map lock: %p (at %s %d)\n", map, file, line)); uvm_tree_sanity(map, file, line); uvm_tree_size_chk(map, file, line); @@ -5281,7 +5231,7 @@ void vm_map_lock_read_ln(struct vm_map *map, char *file, int line) { if ((map->flags & VM_MAP_INTRSAFE) == 0) - rw_enter_read(&map->lock); + rrw_enter(&map->lock, RW_READ); else mtx_enter(&map->mtx); LPRINTF(("map lock: %p (at %s %d)\n", map, file, line)); @@ -5292,12 +5242,11 @@ vm_map_lock_read_ln(struct vm_map *map, void vm_map_unlock_ln(struct vm_map *map, char *file, int line) { - KASSERT(map->busy == NULL || map->busy == curproc); uvm_tree_sanity(map, file, line); uvm_tree_size_chk(map, file, line); LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line)); if ((map->flags & VM_MAP_INTRSAFE) == 0) - rw_exit(&map->lock); + rrw_exit(&map->lock); else mtx_leave(&map->mtx); } @@ -5309,46 +5258,17 @@ vm_map_unlock_read_ln(struct vm_map *map /* XXX: RO */ uvm_tree_size_chk(map, file, line); LPRINTF(("map unlock: %p (at %s %d)\n", map, file, line)); if ((map->flags & VM_MAP_INTRSAFE) == 0) - rw_exit_read(&map->lock); + rrw_exit(&map->lock); else mtx_leave(&map->mtx); } void -vm_map_busy_ln(struct vm_map *map, char *file, int line) -{ - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); - KASSERT(rw_write_held(&map->lock)); - KASSERT(map->busy == NULL); - - mtx_enter(&map->flags_lock); - map->busy = curproc; - mtx_leave(&map->flags_lock); -} - -void -vm_map_unbusy_ln(struct vm_map *map, char *file, int line) -{ - unsigned int nbusy; - - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); - KASSERT(map->busy == curproc); - - mtx_enter(&map->flags_lock); - nbusy = map->nbusy; - map->busy = NULL; - mtx_leave(&map->flags_lock); - - if (nbusy > 0) - wakeup(&map->busy); -} - -void vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line) { LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, line)); if ((map->flags & VM_MAP_INTRSAFE) == 0) - rw_assert_anylock(&map->lock); + rw_assert_anylock(&map->lock.rrwl_lock); else MUTEX_ASSERT_LOCKED(&map->mtx); } @@ -5359,7 +5279,7 @@ vm_map_assert_wrlock_ln(struct vm_map *m LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line)); if ((map->flags & VM_MAP_INTRSAFE) == 0) { splassert(IPL_NONE); - rw_assert_wrlock(&map->lock); + rw_assert_wrlock(&map->lock.rrwl_lock); } else MUTEX_ASSERT_LOCKED(&map->mtx); } Index: uvm_map.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.h,v diff -u -p -r1.91 uvm_map.h --- uvm_map.h 20 Oct 2024 11:28:17 -0000 1.91 +++ uvm_map.h 20 Oct 2024 11:54:09 -0000 @@ -258,8 +258,6 @@ struct vm_map { int ref_count; /* [a] Reference count */ int flags; /* [f] flags */ unsigned int timestamp; /* Version number */ - struct proc *busy; /* [f] thread holding map busy*/ - unsigned int nbusy; /* [f] waiters for busy */ vaddr_t min_offset; /* [I] First address in map. */ vaddr_t max_offset; /* [I] Last address in map. */ @@ -305,7 +303,7 @@ struct vm_map { * XXX struct mutex changes size because of compile options, so * place after fields which are inspected by libkvm / procmap(8) */ - struct rwlock lock; /* Non-intrsafe lock */ + struct rrwlock lock; /* Non-intrsafe lock */ struct mutex mtx; /* Intrsafe lock */ struct mutex flags_lock; /* flags lock */ }; @@ -391,10 +389,6 @@ int uvm_map_fill_vmmap(struct vm_map *, * * vm_map_unlock_read: release a shared lock on a map. * - * vm_map_busy: mark a map as busy. - * - * vm_map_unbusy: clear busy status on a map. - * */ boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int); @@ -402,8 +396,6 @@ void vm_map_lock_ln(struct vm_map*, cha void vm_map_lock_read_ln(struct vm_map*, char*, int); void vm_map_unlock_ln(struct vm_map*, char*, int); void vm_map_unlock_read_ln(struct vm_map*, char*, int); -void vm_map_busy_ln(struct vm_map*, char*, int); -void vm_map_unbusy_ln(struct vm_map*, char*, int); void vm_map_assert_anylock_ln(struct vm_map*, char*, int); void vm_map_assert_wrlock_ln(struct vm_map*, char*, int); @@ -413,8 +405,6 @@ void vm_map_assert_wrlock_ln(struct vm_ #define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__) #define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__) #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__) -#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) -#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) #define vm_map_assert_anylock(map) \ vm_map_assert_anylock_ln(map, __FILE__, __LINE__) #define vm_map_assert_wrlock(map) \ @@ -425,8 +415,6 @@ void vm_map_assert_wrlock_ln(struct vm_ #define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0) #define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0) #define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0) -#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0) -#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0) #define vm_map_assert_anylock(map) vm_map_assert_anylock_ln(map, NULL, 0) #define vm_map_assert_wrlock(map) vm_map_assert_wrlock_ln(map, NULL, 0) #endif