Index: sys/systm.h =================================================================== RCS file: /cvs/src/sys/sys/systm.h,v retrieving revision 1.106 diff -u -p -r1.106 systm.h --- sys/systm.h 10 Dec 2014 15:29:53 -0000 1.106 +++ sys/systm.h 21 Jan 2015 07:22:04 -0000 @@ -157,6 +157,8 @@ void vfs_op_init(void); int seltrue(dev_t dev, int which, struct proc *); int selfalse(dev_t dev, int which, struct proc *); + +void *hashtblinit(int, size_t, void (*)(void *), int, int, u_long *); void *hashinit(int, int, int, u_long *); int sys_nosys(struct proc *, void *, register_t *); Index: kern/kern_subr.c =================================================================== RCS file: /cvs/src/sys/kern/kern_subr.c,v retrieving revision 1.42 diff -u -p -r1.42 kern_subr.c --- kern/kern_subr.c 10 Dec 2014 15:29:53 -0000 1.42 +++ kern/kern_subr.c 21 Jan 2015 07:22:04 -0000 @@ -156,23 +156,45 @@ again: /* * General routine to allocate a hash table. */ + void * -hashinit(int elements, int type, int flags, u_long *hashmask) +hashtblinit(int elements, size_t size, void (*init)(void *), + int type, int flags, u_long *hashmask) { u_long hashsize, i; - LIST_HEAD(generic, generic) *hashtbl; + u_int8_t *hashtbl, *bucket; if (elements <= 0) - panic("hashinit: bad cnt"); + panic("%s: bad count", __func__); for (hashsize = 1; hashsize < elements; hashsize <<= 1) continue; - hashtbl = mallocarray(hashsize, sizeof(*hashtbl), type, flags); + hashtbl = mallocarray(hashsize, size, type, flags); if (hashtbl == NULL) - return NULL; - for (i = 0; i < hashsize; i++) - LIST_INIT(&hashtbl[i]); + return (NULL); + for (i = 0; i < hashsize; i++) { + bucket = &hashtbl[i * size]; + init(bucket); + } *hashmask = hashsize - 1; return (hashtbl); + +} + +LIST_HEAD(generic_list, generic_type); + +void +hashinit_list(void *bucket) +{ + struct generic_list *head = bucket; + LIST_INIT(head); +} + +void * +hashinit(int elements, int type, int flags, u_long *hashmask) +{ + return (hashtblinit(elements, + sizeof(struct generic_list), hashinit_list, + type, flags, hashmask)); } /* Index: ufs/ufs/ufs_ihash.c =================================================================== RCS file: /cvs/src/sys/ufs/ufs/ufs_ihash.c,v retrieving revision 1.21 diff -u -p -r1.21 ufs_ihash.c --- ufs/ufs/ufs_ihash.c 9 Jan 2015 05:01:57 -0000 1.21 +++ ufs/ufs/ufs_ihash.c 21 Jan 2015 07:22:04 -0000 @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -47,10 +48,17 @@ /* * Structures associated with inode cacheing. */ -LIST_HEAD(ihashhead, inode) *ihashtbl; +struct ihashhead { + LIST_HEAD(, inode) list; + struct rwlock lock; +}; + +struct ihashhead *ihashtbl; + u_long ihash; /* size of hash table - 1 */ SIPHASH_KEY ihashkey; +void ufs_ihashheadinit(void *bucket); struct ihashhead *ufs_ihash(dev_t, ufsino_t); #define INOHASH(device, inum) ufs_ihash((device), (inum)) @@ -70,9 +78,20 @@ ufs_ihash(dev_t dev, ufsino_t inum) * Initialize inode hash table. */ void +ufs_ihashheadinit(void *bucket) +{ + struct ihashhead *head = bucket; + + LIST_INIT(&head->list); + rw_init(&head->lock, "ufsihash"); +} + +void ufs_ihashinit(void) { - ihashtbl = hashinit(initialvnodes, M_UFSMNT, M_WAITOK, &ihash); + ihashtbl = hashtblinit(initialvnodes, + sizeof(*ihashtbl), ufs_ihashheadinit, + M_UFSMNT, M_WAITOK, &ihash); arc4random_buf(&ihashkey, sizeof(ihashkey)); } @@ -86,18 +105,19 @@ ufs_ihashlookup(dev_t dev, ufsino_t inum struct inode *ip; struct ihashhead *ipp; - /* XXXLOCKING lock hash list */ ipp = INOHASH(dev, inum); - LIST_FOREACH(ip, ipp, i_hash) { + + rw_enter_read(&ipp->lock); + LIST_FOREACH(ip, &ipp->list, i_hash) { if (inum == ip->i_number && dev == ip->i_dev) break; } - /* XXXLOCKING unlock hash list? */ + rw_exit_read(&ipp->lock); if (ip) return (ITOV(ip)); - return (NULLVP); + return (NULL); } /* @@ -108,23 +128,16 @@ struct vnode * ufs_ihashget(dev_t dev, ufsino_t inum) { struct proc *p = curproc; - struct ihashhead *ipp; - struct inode *ip; struct vnode *vp; -loop: - /* XXXLOCKING lock hash list */ - ipp = INOHASH(dev, inum); - LIST_FOREACH(ip, ipp, i_hash) { - if (inum == ip->i_number && dev == ip->i_dev) { - vp = ITOV(ip); - /* XXXLOCKING unlock hash list? */ - if (vget(vp, LK_EXCLUSIVE, p)) - goto loop; - return (vp); - } - } - /* XXXLOCKING unlock hash list? */ - return (NULL); + + do { + vp = ufs_ihashlookup(dev, inum); + if (vp == NULL) + return (NULL); + + } while (vget(vp, LK_EXCLUSIVE, p)); + + return (vp); } /* @@ -141,20 +154,19 @@ ufs_ihashins(struct inode *ip) /* lock the inode, then put it on the appropriate hash list */ lockmgr(&ip->i_lock, LK_EXCLUSIVE, NULL); - /* XXXLOCKING lock hash list */ - ipp = INOHASH(dev, inum); - LIST_FOREACH(curip, ipp, i_hash) { + rw_enter_write(&ipp->lock); + LIST_FOREACH(curip, &ipp->list, i_hash) { if (inum == curip->i_number && dev == curip->i_dev) { - /* XXXLOCKING unlock hash list? */ + rw_exit_write(&ipp->lock); lockmgr(&ip->i_lock, LK_RELEASE, NULL); return (EEXIST); } } SET(ip->i_flag, IN_HASHED); - LIST_INSERT_HEAD(ipp, ip, i_hash); - /* XXXLOCKING unlock hash list? */ + LIST_INSERT_HEAD(&ipp->list, ip, i_hash); + rw_exit_write(&ipp->lock); return (0); } @@ -165,17 +177,14 @@ ufs_ihashins(struct inode *ip) void ufs_ihashrem(struct inode *ip) { - /* XXXLOCKING lock hash list */ + struct ihashhead *ipp; - if (ip->i_hash.le_prev == NULL) + if (!ISSET(ip->i_flag, IN_HASHED)) return; - if (ISSET(ip->i_flag, IN_HASHED)) { - LIST_REMOVE(ip, i_hash); - CLR(ip->i_flag, IN_HASHED); - } -#ifdef DIAGNOSTIC - ip->i_hash.le_next = NULL; - ip->i_hash.le_prev = NULL; -#endif - /* XXXLOCKING unlock hash list? */ + + ipp = INOHASH(ip->i_dev, ip->i_number); + rw_enter_write(&ipp->lock); + LIST_REMOVE(ip, i_hash); + CLR(ip->i_flag, IN_HASHED); + rw_exit_write(&ipp->lock); }