Index: uvm.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm.h,v retrieving revision 1.57 diff -u -p -r1.57 uvm.h --- uvm.h 3 Oct 2014 17:41:00 -0000 1.57 +++ uvm.h 22 Apr 2015 04:19:34 -0000 @@ -59,8 +59,6 @@ struct uvm { /* Lock order: pageqlock, then fpageqlock. */ struct mutex fpageqlock; /* lock for free page q + pdaemon */ boolean_t page_init_done; /* TRUE if uvm_page_init() finished */ - boolean_t page_idle_zero; /* TRUE if we should try to zero - pages in the idle loop */ struct uvm_pmr_control pmr_control; /* pmemrange data */ /* page daemon trigger */ Index: uvm_page.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_page.c,v retrieving revision 1.137 diff -u -p -r1.137 uvm_page.c --- uvm_page.c 14 Mar 2015 03:38:53 -0000 1.137 +++ uvm_page.c 22 Apr 2015 04:19:34 -0000 @@ -100,11 +100,6 @@ int vm_nphysseg = 0; /* XXXCDC: uvm.n * of the things necessary to do idle page zero'ing efficiently. * We therefore provide a way to disable it from machdep code here. */ -/* - * XXX disabled until we can find a way to do this without causing - * problems for either cpu caches or DMA latency. - */ -boolean_t vm_page_zero_enable = FALSE; /* * local variables @@ -154,7 +149,6 @@ uvm_pageinsert(struct vm_page *pg) static __inline void uvm_pageremove(struct vm_page *pg) { - KASSERT(pg->pg_flags & PG_TABLED); RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg); @@ -289,9 +283,6 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; - /* determine if we should zero pages in the idle loop. */ - uvm.page_idle_zero = vm_page_zero_enable; - uvm.page_init_done = TRUE; } @@ -1065,9 +1056,6 @@ uvm_pagefree(struct vm_page *pg) #endif uvm_pmr_freepages(pg, 1); - - if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) - uvm.page_idle_zero = vm_page_zero_enable; } /*