Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Split uvm_fault() into 2 more functions, uvm_fault_c...
details: https://anonhg.NetBSD.org/src/rev/20f75ea774cb
branches: trunk
changeset: 751342:20f75ea774cb
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Mon Feb 01 08:16:32 2010 +0000
description:
Split uvm_fault() into 2 more functions, uvm_fault_check() and
uvm_fault_upper_lookup(). Omit unnecessary arguments passed around.
diffstat:
sys/uvm/uvm_fault.c | 322 +++++++++++++++++++++++++--------------------------
1 files changed, 155 insertions(+), 167 deletions(-)
diffs (truncated from 655 to 300 lines):
diff -r a63a06ea6708 -r 20f75ea774cb sys/uvm/uvm_fault.c
--- a/sys/uvm/uvm_fault.c Mon Feb 01 07:01:40 2010 +0000
+++ b/sys/uvm/uvm_fault.c Mon Feb 01 08:16:32 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_fault.c,v 1.140 2010/02/01 06:56:22 uebayasi Exp $ */
+/* $NetBSD: uvm_fault.c,v 1.141 2010/02/01 08:16:32 uebayasi Exp $ */
/*
*
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.140 2010/02/01 06:56:22 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.141 2010/02/01 08:16:32 uebayasi Exp $");
#include "opt_uvmhist.h"
@@ -707,41 +707,35 @@
struct vm_anon *anon_spare;
};
+static int
+uvm_fault_check(
+ struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
+ struct vm_anon ***ranons, struct vm_page ***rpages);
typedef int
uvm_fault_subfunc_t(
- struct uvm_faultinfo *ufi,
- struct uvm_faultctx *flt,
- struct vm_amap *amap, struct uvm_object *uobj,
- struct vm_anon **anons_store, struct vm_anon **anons,
- struct vm_page **pages, struct vm_page *uobjpage);
+ struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
+ struct vm_anon **anons, struct vm_page **pages);
+static uvm_fault_subfunc_t uvm_fault_upper_lookup;
+static uvm_fault_subfunc_t uvm_fault_upper;
static uvm_fault_subfunc_t uvm_fault_lower;
static uvm_fault_subfunc_t uvm_fault_lower_special;
+static uvm_fault_subfunc_t uvm_fault_lower_generic_lookup;
static uvm_fault_subfunc_t uvm_fault_lower_generic;
static uvm_fault_subfunc_t uvm_fault_lower_generic1;
-static uvm_fault_subfunc_t uvm_fault_upper;
static uvm_fault_subfunc_t uvm_fault_lower_generic2;
-static void
-uvm_fault_lower_generic_lookup(
- struct uvm_faultinfo *ufi,
- struct uvm_faultctx *flt,
- struct vm_amap *amap, struct uvm_object *uobj,
- struct vm_anon **anons_store, struct vm_anon **anons,
- struct vm_page **pages, struct vm_page **ruobjpage);
int
uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
vm_prot_t access_type, int fault_flag)
{
struct uvm_faultinfo ufi;
- struct vm_amap *amap;
- struct uvm_object *uobj;
struct uvm_faultctx flt = {
.access_type = access_type,
.wire_fault = (fault_flag & UVM_FAULT_WIRE) != 0,
.maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0,
};
struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
- struct vm_page *pages[UVM_MAXRANGE], *uobjpage = NULL;
+ struct vm_page *pages_store[UVM_MAXRANGE], **pages;
int error;
UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
@@ -768,22 +762,25 @@
*/
ReFault:
- goto uvm_fault_prepare;
-uvm_fault_prepare_done:
+ anons = anons_store;
+ pages = pages_store;
- goto uvm_fault_upper_lookup;
-uvm_fault_upper_lookup_done:
+ error = uvm_fault_check(&ufi, &flt, &anons, &pages);
+ if (error == ERESTART)
+ goto ReFault;
+ else if (error)
+ goto done;
+
+ error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
+ if (error == ERESTART)
+ goto ReFault;
+ else if (error)
+ goto done;
if (flt.shadowed == true)
- error = uvm_fault_upper(
- &ufi, &flt,
- amap, uobj, anons_store, anons,
- pages, uobjpage);
+ error = uvm_fault_upper(&ufi, &flt, anons, pages);
else
- error = uvm_fault_lower(
- &ufi, &flt,
- amap, uobj, anons_store, anons,
- pages, uobjpage);
+ error = uvm_fault_lower(&ufi, &flt, anons, pages);
if (error == ERESTART)
goto ReFault;
@@ -794,9 +791,15 @@
uvm_anfree(flt.anon_spare);
}
return error;
+}
-uvm_fault_prepare:
- {
+int
+uvm_fault_check(
+ struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
+ struct vm_anon ***ranons, struct vm_page ***rpages)
+{
+ struct vm_amap *amap;
+ struct uvm_object *uobj;
vm_prot_t check_prot;
int nback, nforw;
@@ -804,20 +807,19 @@
* lookup and lock the maps
*/
- if (uvmfault_lookup(&ufi, false) == false) {
+ if (uvmfault_lookup(ufi, false) == false) {
UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", vaddr, 0,0,0);
- error = EFAULT;
- goto done;
+ return EFAULT;
}
/* locked: maps(read) */
#ifdef DIAGNOSTIC
- if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) {
+ if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) {
printf("Page fault on non-pageable map:\n");
- printf("ufi.map = %p\n", ufi.map);
- printf("ufi.orig_map = %p\n", ufi.orig_map);
- printf("ufi.orig_rvaddr = 0x%lx\n", (u_long) ufi.orig_rvaddr);
- panic("uvm_fault: (ufi.map->flags & VM_MAP_PAGEABLE) == 0");
+ printf("ufi->map = %p\n", ufi->map);
+ printf("ufi->orig_map = %p\n", ufi->orig_map);
+ printf("ufi->orig_rvaddr = 0x%lx\n", (u_long) ufi->orig_rvaddr);
+ panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0");
}
#endif
@@ -825,31 +827,30 @@
* check protection
*/
- check_prot = (fault_flag & UVM_FAULT_MAXPROT) ?
- ufi.entry->max_protection : ufi.entry->protection;
- if ((check_prot & flt.access_type) != flt.access_type) {
+ check_prot = flt->maxprot ?
+ ufi->entry->max_protection : ufi->entry->protection;
+ if ((check_prot & flt->access_type) != flt->access_type) {
UVMHIST_LOG(maphist,
"<- protection failure (prot=0x%x, access=0x%x)",
- ufi.entry->protection, flt.access_type, 0, 0);
- uvmfault_unlockmaps(&ufi, false);
- error = EACCES;
- goto done;
+ ufi->entry->protection, flt->access_type, 0, 0);
+ uvmfault_unlockmaps(ufi, false);
+ return EACCES;
}
/*
* "enter_prot" is the protection we want to enter the page in at.
* for certain pages (e.g. copy-on-write pages) this protection can
- * be more strict than ufi.entry->protection. "wired" means either
+ * be more strict than ufi->entry->protection. "wired" means either
* the entry is wired or we are fault-wiring the pg.
*/
- flt.enter_prot = ufi.entry->protection;
- flt.wired = VM_MAPENT_ISWIRED(ufi.entry) || flt.wire_fault;
- if (flt.wired) {
- flt.access_type = flt.enter_prot; /* full access for wired */
- flt.cow_now = (check_prot & VM_PROT_WRITE) != 0;
+ flt->enter_prot = ufi->entry->protection;
+ flt->wired = VM_MAPENT_ISWIRED(ufi->entry) || flt->wire_fault;
+ if (flt->wired) {
+ flt->access_type = flt->enter_prot; /* full access for wired */
+ flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
} else {
- flt.cow_now = (flt.access_type & VM_PROT_WRITE) != 0;
+ flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
}
/*
@@ -859,16 +860,16 @@
* needs_copy]).
*/
- if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
- if (flt.cow_now || (ufi.entry->object.uvm_obj == NULL)) {
- KASSERT((fault_flag & UVM_FAULT_MAXPROT) == 0);
+ if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
+ if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
+ KASSERT(!flt->maxprot);
/* need to clear */
UVMHIST_LOG(maphist,
" need to clear needs_copy and refault",0,0,0,0);
- uvmfault_unlockmaps(&ufi, false);
- uvmfault_amapcopy(&ufi);
+ uvmfault_unlockmaps(ufi, false);
+ uvmfault_amapcopy(ufi);
uvmexp.fltamcopy++;
- goto ReFault;
+ return ERESTART;
} else {
@@ -877,7 +878,7 @@
* needs_copy is still true
*/
- flt.enter_prot &= ~VM_PROT_WRITE;
+ flt->enter_prot &= ~VM_PROT_WRITE;
}
}
@@ -885,8 +886,8 @@
* identify the players
*/
- amap = ufi.entry->aref.ar_amap; /* upper layer */
- uobj = ufi.entry->object.uvm_obj; /* lower layer */
+ amap = ufi->entry->aref.ar_amap; /* upper layer */
+ uobj = ufi->entry->object.uvm_obj; /* lower layer */
/*
* check for a case 0 fault. if nothing backing the entry then
@@ -894,10 +895,9 @@
*/
if (amap == NULL && uobj == NULL) {
- uvmfault_unlockmaps(&ufi, false);
+ uvmfault_unlockmaps(ufi, false);
UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
- error = EFAULT;
- goto done;
+ return EFAULT;
}
/*
@@ -907,42 +907,42 @@
* ReFault we will disable this by setting "narrow" to true.
*/
- if (flt.narrow == false) {
+ if (flt->narrow == false) {
/* wide fault (!narrow) */
- KASSERT(uvmadvice[ufi.entry->advice].advice ==
- ufi.entry->advice);
- nback = MIN(uvmadvice[ufi.entry->advice].nback,
- (ufi.orig_rvaddr - ufi.entry->start) >> PAGE_SHIFT);
- flt.startva = ufi.orig_rvaddr - (nback << PAGE_SHIFT);
- nforw = MIN(uvmadvice[ufi.entry->advice].nforw,
- ((ufi.entry->end - ufi.orig_rvaddr) >>
+ KASSERT(uvmadvice[ufi->entry->advice].advice ==
+ ufi->entry->advice);
+ nback = MIN(uvmadvice[ufi->entry->advice].nback,
+ (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
+ flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
+ nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
+ ((ufi->entry->end - ufi->orig_rvaddr) >>
PAGE_SHIFT) - 1);
/*
* note: "-1" because we don't want to count the
* faulting page as forw
*/
- flt.npages = nback + nforw + 1;
- flt.centeridx = nback;
+ flt->npages = nback + nforw + 1;
+ flt->centeridx = nback;
- flt.narrow = true; /* ensure only once per-fault */
+ flt->narrow = true; /* ensure only once per-fault */
} else {
/* narrow fault! */
nback = nforw = 0;
- flt.startva = ufi.orig_rvaddr;
- flt.npages = 1;
- flt.centeridx = 0;
+ flt->startva = ufi->orig_rvaddr;
+ flt->npages = 1;
+ flt->centeridx = 0;
Home |
Main Index |
Thread Index |
Old Index