Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Add better color matching selecting free pages. KM ...
details: https://anonhg.NetBSD.org/src/rev/1ddbfa8c3af2
branches: trunk
changeset: 760385:1ddbfa8c3af2
user: matt <matt%NetBSD.org@localhost>
date: Tue Jan 04 08:26:33 2011 +0000
description:
Add better color matching selecting free pages. KM pages will now allocated
so that VA and PA have the same color. On a page fault, choose a physical
page that has the same color as the virtual address.
When allocating kernel memory pages, allow the MD to specify a preferred
VM_FREELIST from which to choose pages. For machines with large amounts
of memory (> 4GB), all kernel memory to come from <4GB to reduce the amount
of bounce buffering needed with 32bit DMA devices.
diffstat:
sys/uvm/uvm_extern.h | 5 ++++-
sys/uvm/uvm_fault.c | 11 ++++++-----
sys/uvm/uvm_km.c | 19 +++++++++++++++----
sys/uvm/uvm_page.c | 14 ++++++++------
4 files changed, 33 insertions(+), 16 deletions(-)
diffs (166 lines):
diff -r 1eacf04e14be -r 1ddbfa8c3af2 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Tue Jan 04 08:21:18 2011 +0000
+++ b/sys/uvm/uvm_extern.h Tue Jan 04 08:26:33 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.167 2010/12/20 00:25:47 matt Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.168 2011/01/04 08:26:33 matt Exp $ */
/*
*
@@ -148,6 +148,7 @@
#define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */
#define UVM_FLAG_WAITVA 0x1000000 /* wait for va */
#define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */
+#define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */
/* macros to extract info */
#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
@@ -466,6 +467,8 @@
#ifdef _KERNEL
/* we need this before including uvm_page.h on some platforms */
extern struct uvmexp uvmexp;
+/* MD code needs this without including <uvm/uvm.h> */
+extern bool vm_page_zero_enable;
#endif
/*
diff -r 1eacf04e14be -r 1ddbfa8c3af2 sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c Tue Jan 04 08:21:18 2011 +0000
+++ b/sys/uvm/uvm_fault.c Tue Jan 04 08:26:33 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault.c,v 1.178 2010/12/20 00:25:47 matt Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.179 2011/01/04 08:26:33 matt Exp $ */
/*
*
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.178 2010/12/20 00:25:47 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.179 2011/01/04 08:26:33 matt Exp $");
#include "opt_uvmhist.h"
@@ -361,7 +361,8 @@
* no page, we must try and bring it in.
*/
- pg = uvm_pagealloc(NULL, 0, anon, 0);
+ pg = uvm_pagealloc(NULL, ufi->orig_rvaddr,
+ NULL, UVM_FLAG_COLORMATCH);
if (pg == NULL) { /* out of RAM. */
uvmfault_unlockall(ufi, amap, NULL, anon);
uvmexp.fltnoram++;
@@ -619,8 +620,8 @@
* so have uvm_pagealloc() do that for us.
*/
- pg = uvm_pagealloc(NULL, 0, anon,
- (opg == NULL) ? UVM_PGA_ZERO : 0);
+ pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
+ UVM_FLAG_COLORMATCH | (opg == NULL ? UVM_PGA_ZERO : 0));
} else {
pg = NULL;
}
diff -r 1eacf04e14be -r 1ddbfa8c3af2 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Tue Jan 04 08:21:18 2011 +0000
+++ b/sys/uvm/uvm_km.c Tue Jan 04 08:26:33 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.106 2010/05/14 05:02:06 cegger Exp $ */
+/* $NetBSD: uvm_km.c,v 1.107 2011/01/04 08:26:33 matt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -127,7 +127,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.106 2010/05/14 05:02:06 cegger Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.107 2011/01/04 08:26:33 matt Exp $");
#include "opt_uvmhist.h"
@@ -588,7 +588,7 @@
loopva = kva;
loopsize = size;
- pgaflags = 0;
+ pgaflags = UVM_FLAG_COLORMATCH;
if (flags & UVM_KMF_NOWAIT)
pgaflags |= UVM_PGA_USERESERVE;
if (flags & UVM_KMF_ZERO)
@@ -599,7 +599,13 @@
while (loopsize) {
KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
- pg = uvm_pagealloc(NULL, offset, NULL, pgaflags);
+ pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
+#ifdef UVM_KM_VMFREELIST
+ UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
+#else
+ UVM_PGA_STRAT_NORMAL, 0
+#endif
+ );
/*
* out of memory?
@@ -725,8 +731,13 @@
struct vm_page *pg;
vaddr_t va;
+
again:
+#ifdef PMAP_ALLOC_POOLPAGE
+ pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE);
+#else
pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
+#endif
if (__predict_false(pg == NULL)) {
if (waitok) {
uvm_wait("plpg");
diff -r 1eacf04e14be -r 1ddbfa8c3af2 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Tue Jan 04 08:21:18 2011 +0000
+++ b/sys/uvm/uvm_page.c Tue Jan 04 08:26:33 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.168 2010/12/11 22:34:03 matt Exp $ */
+/* $NetBSD: uvm_page.c,v 1.169 2011/01/04 08:26:33 matt Exp $ */
/*
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.168 2010/12/11 22:34:03 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.169 2011/01/04 08:26:33 matt Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -1220,7 +1220,7 @@
lwp_t *l;
KASSERT(obj == NULL || anon == NULL);
- KASSERT(anon == NULL || off == 0);
+ KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
KASSERT(off == trunc_page(off));
KASSERT(obj == NULL || mutex_owned(&obj->vmobjlock));
KASSERT(anon == NULL || mutex_owned(&anon->an_lock));
@@ -1230,12 +1230,14 @@
/*
* This implements a global round-robin page coloring
* algorithm.
- *
- * XXXJRT: What about virtually-indexed caches?
*/
ucpu = curcpu()->ci_data.cpu_uvm;
- color = ucpu->page_free_nextcolor;
+ if (flags & UVM_FLAG_COLORMATCH) {
+ color = atop(off) & uvmexp.colormask;
+ } else {
+ color = ucpu->page_free_nextcolor;
+ }
/*
* check to see if we need to generate some free pages waking
Home |
Main Index |
Thread Index |
Old Index