Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Start trying to reduce cache misses on vm_page durin...
details: https://anonhg.NetBSD.org/src/rev/8abe0efb85c0
branches: trunk
changeset: 1010200:8abe0efb85c0
user: ad <ad%NetBSD.org@localhost>
date: Sun May 17 19:38:16 2020 +0000
description:
Start trying to reduce cache misses on vm_page during fault processing.
- Make PGO_LOCKED getpages imply PGO_NOBUSY and remove the latter. Mark
pages busy only when there's actually I/O to do.
- When doing COW on a uvm_object, don't mess with neighbouring pages. In
all likelyhood they're already entered.
- Don't mess with neighbouring VAs that have existing mappings as replacing
those mappings with same can be quite costly.
- Don't enqueue pages for neighbour faults unless not enqueued already, and
don't activate centre pages unless uvmpdpol says its useful.
Also:
- Make PGO_LOCKED getpages on UAOs work more like vnodes: do gang lookup in
the radix tree, and don't allocate new pages.
- Fix many assertion failures around faults/loans with tmpfs.
diffstat:
sys/miscfs/genfs/genfs_io.c | 17 +-
sys/nfs/nfs_bio.c | 9 +-
sys/uvm/uvm_aobj.c | 89 ++++-----
sys/uvm/uvm_fault.c | 347 +++++++++++++++------------------------
sys/uvm/uvm_loan.c | 65 ++++---
sys/uvm/uvm_page.h | 16 +-
sys/uvm/uvm_pager.h | 3 +-
sys/uvm/uvm_pdpolicy.h | 3 +-
sys/uvm/uvm_pdpolicy_clock.c | 27 ++-
sys/uvm/uvm_pdpolicy_clockpro.c | 12 +-
10 files changed, 269 insertions(+), 319 deletions(-)
diffs (truncated from 1288 to 300 lines):
diff -r 6813d2717cb3 -r 8abe0efb85c0 sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c Sun May 17 19:37:15 2020 +0000
+++ b/sys/miscfs/genfs/genfs_io.c Sun May 17 19:38:16 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: genfs_io.c,v 1.95 2020/03/22 18:32:41 ad Exp $ */
+/* $NetBSD: genfs_io.c,v 1.96 2020/05/17 19:38:16 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.95 2020/03/22 18:32:41 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.96 2020/05/17 19:38:16 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -128,12 +128,12 @@
/*
* the object must be locked. it can only be a read lock when
- * processing a read fault with PGO_LOCKED | PGO_NOBUSY.
+ * processing a read fault with PGO_LOCKED.
*/
KASSERT(rw_lock_held(uobj->vmobjlock));
KASSERT(rw_write_held(uobj->vmobjlock) ||
- ((~flags & (PGO_LOCKED | PGO_NOBUSY)) == 0 && !memwrite));
+ ((flags & PGO_LOCKED) != 0 && !memwrite));
#ifdef DIAGNOSTIC
if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
@@ -237,9 +237,8 @@
#endif /* defined(DEBUG) */
nfound = uvn_findpages(uobj, origoffset, &npages,
ap->a_m, NULL,
- UFP_NOWAIT | UFP_NOALLOC |
- (memwrite ? UFP_NORDONLY : 0) |
- ((flags & PGO_NOBUSY) != 0 ? UFP_NOBUSY : 0));
+ UFP_NOWAIT | UFP_NOALLOC | UFP_NOBUSY |
+ (memwrite ? UFP_NORDONLY : 0));
KASSERT(npages == *ap->a_count);
if (nfound == 0) {
error = EBUSY;
@@ -250,10 +249,6 @@
* the file behind us.
*/
if (!genfs_node_rdtrylock(vp)) {
- if ((flags & PGO_NOBUSY) == 0) {
- genfs_rel_pages(ap->a_m, npages);
- }
-
/*
* restore the array.
*/
diff -r 6813d2717cb3 -r 8abe0efb85c0 sys/nfs/nfs_bio.c
--- a/sys/nfs/nfs_bio.c Sun May 17 19:37:15 2020 +0000
+++ b/sys/nfs/nfs_bio.c Sun May 17 19:38:16 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: nfs_bio.c,v 1.196 2020/04/23 21:47:08 ad Exp $ */
+/* $NetBSD: nfs_bio.c,v 1.197 2020/05/17 19:38:16 ad Exp $ */
/*
* Copyright (c) 1989, 1993
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.196 2020/04/23 21:47:08 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.197 2020/05/17 19:38:16 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_nfs.h"
@@ -1260,7 +1260,6 @@
bool v3 = NFS_ISV3(vp);
bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
bool locked = (ap->a_flags & PGO_LOCKED) != 0;
- bool nobusy = (ap->a_flags & PGO_NOBUSY);
/*
* XXX NFS wants to modify the pages below and that can't be done
@@ -1348,14 +1347,10 @@
if (!mutex_tryenter(&np->n_commitlock)) {
/*
- * Since PGO_LOCKED is set, we need to unbusy
- * all pages fetched by genfs_getpages() above,
* tell the caller that there are no pages
* available and put back original pgs array.
*/
- if (nobusy == false)
- uvm_page_unbusy(pgs, npages);
*ap->a_count = 0;
memcpy(pgs, opgs,
npages * sizeof(struct vm_pages *));
diff -r 6813d2717cb3 -r 8abe0efb85c0 sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c Sun May 17 19:37:15 2020 +0000
+++ b/sys/uvm/uvm_aobj.c Sun May 17 19:38:16 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_aobj.c,v 1.140 2020/05/15 22:27:04 ad Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.141 2020/05/17 19:38:17 ad Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.140 2020/05/15 22:27:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.141 2020/05/17 19:38:17 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@@ -250,6 +250,8 @@
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash_elt *elt;
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
+
/*
* if noswap flag is set, then we never return a slot
*/
@@ -293,6 +295,7 @@
(uintptr_t)aobj, pageidx, slot, 0);
KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
/*
* if noswap flag is set, then we can't set a non-zero slot.
@@ -365,6 +368,7 @@
{
struct uvm_object *uobj = &aobj->u_obj;
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
uao_dropswap_range(uobj, 0, 0);
rw_exit(uobj->vmobjlock);
@@ -665,6 +669,7 @@
voff_t curoff;
UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
if (flags & PGO_ALLPAGES) {
@@ -808,13 +813,13 @@
/*
* the object must be locked. it can only be a read lock when
- * processing a read fault with PGO_LOCKED | PGO_NOBUSY.
+ * processing a read fault with PGO_LOCKED.
*/
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_lock_held(uobj->vmobjlock));
KASSERT(rw_write_held(uobj->vmobjlock) ||
- ((~flags & (PGO_LOCKED | PGO_NOBUSY)) == 0 &&
- (access_type & VM_PROT_WRITE) == 0));
+ ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
/*
* get number of pages
@@ -827,7 +832,7 @@
*/
if (flags & PGO_LOCKED) {
- krw_t lktype = rw_lock_op(uobj->vmobjlock);
+ struct uvm_page_array a;
/*
* step 1a: get pages that are already resident. only do
@@ -835,77 +840,56 @@
* time through).
*/
+ uvm_page_array_init(&a);
done = true; /* be optimistic */
gotpages = 0; /* # of pages we got so far */
- for (lcv = 0, current_offset = offset ; lcv < maxpages ;
- lcv++, current_offset += PAGE_SIZE) {
- /* do we care about this page? if not, skip it */
- if (pps[lcv] == PGO_DONTCARE)
- continue;
- ptmp = uvm_pagelookup(uobj, current_offset);
-
- /*
- * if page is new, attempt to allocate the page,
- * zero-fill'd. we can only do this if the caller
- * holds a write lock.
- */
-
- if (ptmp == NULL && lktype == RW_WRITER &&
- uao_find_swslot(uobj,
- current_offset >> PAGE_SHIFT) == 0) {
- ptmp = uao_pagealloc(uobj, current_offset,
- UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
- if (ptmp) {
- /* new page */
- ptmp->flags &= ~(PG_FAKE);
- uvm_pagemarkdirty(ptmp,
- UVM_PAGE_STATUS_UNKNOWN);
- if ((flags & PGO_NOBUSY) != 0)
- ptmp->flags &= ~PG_BUSY;
- goto gotpage;
- }
+ for (lcv = 0; lcv < maxpages; lcv++) {
+ ptmp = uvm_page_array_fill_and_peek(&a, uobj,
+ offset + (lcv << PAGE_SHIFT), maxpages, 0);
+ if (ptmp == NULL) {
+ break;
}
+ KASSERT(ptmp->offset >= offset);
+ lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
+ if (lcv >= maxpages) {
+ break;
+ }
+ uvm_page_array_advance(&a);
/*
* to be useful must get a non-busy page
*/
- if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
- if (lcv == centeridx ||
- (flags & PGO_ALLPAGES) != 0)
- /* need to do a wait or I/O! */
- done = false;
+ if ((ptmp->flags & PG_BUSY) != 0) {
continue;
}
/*
- * useful page: busy/lock it and plug it in our
- * result array
+ * useful page: plug it in our result array
*/
+
KASSERT(uvm_pagegetdirty(ptmp) !=
UVM_PAGE_STATUS_CLEAN);
-
- if ((flags & PGO_NOBUSY) == 0) {
- /* caller must un-busy this page */
- ptmp->flags |= PG_BUSY;
- UVM_PAGE_OWN(ptmp, "uao_get1");
- }
-gotpage:
pps[lcv] = ptmp;
gotpages++;
}
+ uvm_page_array_fini(&a);
/*
* step 1b: now we've either done everything needed or we
* to unlock and do some waiting or I/O.
*/
+ if ((flags & PGO_ALLPAGES) != 0) {
+ for (int i = 0; i < maxpages; i++) {
+ done &= (pps[i] != NULL);
+ }
+ } else {
+ done = (pps[centeridx] != NULL);
+ }
UVMHIST_LOG(pdhist, "<- done (done=%jd)", done, 0,0,0);
*npagesp = gotpages;
- if (done)
- return 0;
- else
- return EBUSY;
+ return done ? 0 : EBUSY;
}
/*
@@ -1117,6 +1101,8 @@
{
int slot;
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
+
slot = uao_set_swslot(uobj, pageidx, 0);
if (slot) {
uvm_swap_free(slot, 1);
@@ -1340,6 +1326,7 @@
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
int swpgonlydelta = 0;
+ KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
if (end == 0) {
diff -r 6813d2717cb3 -r 8abe0efb85c0 sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c Sun May 17 19:37:15 2020 +0000
+++ b/sys/uvm/uvm_fault.c Sun May 17 19:38:16 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault.c,v 1.226 2020/05/15 22:35:05 ad Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.227 2020/05/17 19:38:17 ad Exp $ */
Home |
Main Index |
Thread Index |
Old Index