Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm32 Take advantage of the new `access_type' for p...
details: https://anonhg.NetBSD.org/src/rev/aa666e749914
branches: trunk
changeset: 467638:aa666e749914
user: mycroft <mycroft%NetBSD.org@localhost>
date: Fri Mar 26 22:00:24 1999 +0000
description:
Take advantage of the new `access_type' for pmap_enter(), and always do R/M
emulation of managed pages. This required the following `interesting' changes:
* File system buffers must be entered with an access type of
VM_PROT_READ|VM_PROT_WRITE, so that the pages will be accessible immediately.
Otherwise we would have to teach pagemove() to update the R/M information.
Since they're never eligible for paging, the latter is overkill.
* We must insure that pages allocated before the pmap is completely set up
(that is, pages allocated early by the VM system) are not eligible for R/M
emulation, since the memory needed for this isn't available. We do this by
allocating the pmap's internal memory with uvm_pageboot_alloc(). This also
fixes an absolutely horrible hack where the pmap only worked because page 0
happened to be mapped.
to be mapped.
Also:
* Push the wired page counting into the p->v list maintenance functions. This
avoids code duplication, and fixes some cases where we were confused about
which pages to do it with.
* Fix lots of problems associated with pmap_nightmare() (and rename it to
pmap_vac_me_harder()).
* Since the early pages are no longer considered `managed', just make
pmap_*_pv() panic if !pmap_initialized.
diffstat:
sys/arch/arm32/arm32/bus_dma.c | 4 +-
sys/arch/arm32/arm32/fault.c | 28 +-
sys/arch/arm32/arm32/machdep.c | 16 +-
sys/arch/arm32/arm32/mem.c | 4 +-
sys/arch/arm32/arm32/pmap.c | 578 +++++++++++++++++--------------------
sys/arch/arm32/arm32/vm_machdep.c | 4 +-
sys/arch/arm32/ofw/ofrom.c | 4 +-
7 files changed, 303 insertions(+), 335 deletions(-)
diffs (truncated from 1118 to 300 lines):
diff -r 2c4b72069613 -r aa666e749914 sys/arch/arm32/arm32/bus_dma.c
--- a/sys/arch/arm32/arm32/bus_dma.c Fri Mar 26 21:58:39 1999 +0000
+++ b/sys/arch/arm32/arm32/bus_dma.c Fri Mar 26 22:00:24 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bus_dma.c,v 1.12 1999/03/24 05:50:53 mrg Exp $ */
+/* $NetBSD: bus_dma.c,v 1.13 1999/03/26 22:00:24 mycroft Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -515,7 +515,7 @@
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, TRUE);
+ VM_PROT_READ | VM_PROT_WRITE, TRUE, 0);
/*
* If the memory must remain coherent with the
* cache then we must make the memory uncacheable
diff -r 2c4b72069613 -r aa666e749914 sys/arch/arm32/arm32/fault.c
--- a/sys/arch/arm32/arm32/fault.c Fri Mar 26 21:58:39 1999 +0000
+++ b/sys/arch/arm32/arm32/fault.c Fri Mar 26 22:00:24 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: fault.c,v 1.40 1999/03/24 05:50:54 mrg Exp $ */
+/* $NetBSD: fault.c,v 1.41 1999/03/26 22:00:24 mycroft Exp $ */
/*
* Copyright (c) 1994-1997 Mark Brinicombe.
@@ -193,19 +193,6 @@
return;
}
-#ifdef DIAGNOSTIC
- if (current_intr_depth > 0) {
-#ifdef DDB
- printf("Fault with intr_depth > 0\n");
- report_abort(NULL, fault_status, fault_address, fault_pc);
- kdb_trap(-1, frame);
- return;
-#else
- panic("Fault with intr_depth > 0");
-#endif /* DDB */
- }
-#endif /* DIAGNOSTIC */
-
/* More debug stuff */
fault_instruction = ReadWord(fault_pc);
@@ -414,6 +401,19 @@
pmap_handled_emulation(map->pmap, va))
goto out;
+#ifdef DIAGNOSTIC
+ if (current_intr_depth > 0) {
+#ifdef DDB
+ printf("Non-emulated page fault with intr_depth > 0\n");
+ report_abort(NULL, fault_status, fault_address, fault_pc);
+ kdb_trap(-1, frame);
+ return;
+#else
+ panic("Fault with intr_depth > 0");
+#endif /* DDB */
+ }
+#endif /* DIAGNOSTIC */
+
rv = uvm_fault(map, va, 0, ftype);
if (rv == KERN_SUCCESS)
goto out;
diff -r 2c4b72069613 -r aa666e749914 sys/arch/arm32/arm32/machdep.c
--- a/sys/arch/arm32/arm32/machdep.c Fri Mar 26 21:58:39 1999 +0000
+++ b/sys/arch/arm32/arm32/machdep.c Fri Mar 26 22:00:24 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.c,v 1.63 1999/03/24 05:50:55 mrg Exp $ */
+/* $NetBSD: machdep.c,v 1.64 1999/03/26 22:00:25 mycroft Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -402,7 +402,8 @@
for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
pmap_enter(pmap_kernel(),
(vm_offset_t)((caddr_t)msgbufaddr + loop * NBPG),
- msgbufphys + loop * NBPG, VM_PROT_ALL, TRUE);
+ msgbufphys + loop * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE,
+ VM_PROT_READ|VM_PROT_WRITE);
initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
/*
@@ -450,10 +451,12 @@
curbufsize = CLBYTES * ((loop < residual) ? (base+1) : base);
while (curbufsize) {
- if ((pg = uvm_pagealloc(NULL, 0, NULL)) == NULL)
- panic("cpu_startup: More RAM needed for buffer cache");
+ pg = uvm_pagealloc(NULL, 0, NULL);
+ if (pg == NULL)
+ panic("cpu_startup: not enough memory for buffer cache");
pmap_enter(kernel_map->pmap, curbuf,
- VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
+ VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE,
+ TRUE, VM_PROT_READ|VM_PROT_WRITE);
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
@@ -482,9 +485,9 @@
* Initialise callouts
*/
callfree = callout;
-
for (loop = 1; loop < ncallout; ++loop)
callout[loop - 1].c_next = &callout[loop];
+ callout[loop - 1].c_next = NULL;
printf("avail mem = %ld\n", ptoa(uvmexp.free));
printf("using %d buffers containing %d bytes of memory\n",
@@ -493,7 +496,6 @@
/*
* Set up buffers, so they can be used to read disk labels.
*/
-
bufinit();
curpcb = &proc0.p_addr->u_pcb;
diff -r 2c4b72069613 -r aa666e749914 sys/arch/arm32/arm32/mem.c
--- a/sys/arch/arm32/arm32/mem.c Fri Mar 26 21:58:39 1999 +0000
+++ b/sys/arch/arm32/arm32/mem.c Fri Mar 26 22:00:24 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: mem.c,v 1.9 1999/03/24 05:50:55 mrg Exp $ */
+/* $NetBSD: mem.c,v 1.10 1999/03/26 22:00:25 mycroft Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -123,7 +123,7 @@
v = uio->uio_offset;
pmap_enter(pmap_kernel(), (vm_offset_t)memhook,
trunc_page(v), uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE, TRUE);
+ VM_PROT_READ : VM_PROT_WRITE, TRUE, 0);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
error = uiomove((caddr_t)memhook + o, c, uio);
diff -r 2c4b72069613 -r aa666e749914 sys/arch/arm32/arm32/pmap.c
--- a/sys/arch/arm32/arm32/pmap.c Fri Mar 26 21:58:39 1999 +0000
+++ b/sys/arch/arm32/arm32/pmap.c Fri Mar 26 22:00:24 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.48 1999/03/24 05:50:55 mrg Exp $ */
+/* $NetBSD: pmap.c,v 1.49 1999/03/26 22:00:25 mycroft Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -58,6 +58,7 @@
/* Include header files */
#include "opt_pmap_debug.h"
+#include "opt_ddb.h"
#include <sys/types.h>
#include <sys/param.h>
@@ -162,6 +163,8 @@
static __inline void pmap_map_in_l1 __P((pmap_t pmap, vm_offset_t va,
vm_offset_t l2pa));
+int mycroft_hack = 0;
+
/* Function to set the debug level of the pmap code */
#ifdef PMAP_DEBUG
@@ -377,7 +380,7 @@
* Enter a new physical-virtual mapping into the pv table
*/
-/*__inline*/ int
+/*__inline*/ void
pmap_enter_pv(pmap, va, pv, flags)
pmap_t pmap;
vm_offset_t va;
@@ -387,8 +390,10 @@
struct pv_entry *npv;
u_int s;
+#ifdef DIAGNOSTIC
if (!pmap_initialized)
- return(1);
+ panic("pmap_enter_pv: !pmap_initialized");
+#endif
s = splimp();
@@ -403,17 +408,17 @@
pv->pv_pmap = pmap;
pv->pv_next = NULL;
pv->pv_flags = flags;
- (void)splx(s);
- return(1);
} else {
/*
* There is at least one other VA mapping this page.
* Place this entry after the header.
*/
+#ifdef DIAGNOSTIC
for (npv = pv; npv; npv = npv->pv_next)
if (pmap == npv->pv_pmap && va == npv->pv_va)
panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
+#endif
npv = pmap_alloc_pv();
npv->pv_va = va;
npv->pv_pmap = pmap;
@@ -421,8 +426,11 @@
npv->pv_next = pv->pv_next;
pv->pv_next = npv;
}
+
+ if (flags & PT_W)
+ ++pmap->pm_stats.wired_count;
+
(void)splx(s);
- return(0);
}
@@ -430,7 +438,7 @@
* Remove a physical-virtual mapping from the pv table
*/
-/* __inline*/ u_int
+/*__inline*/ void
pmap_remove_pv(pmap, va, pv)
pmap_t pmap;
vm_offset_t va;
@@ -440,13 +448,10 @@
u_int s;
u_int flags = 0;
+#ifdef DIAGNOSTIC
if (!pmap_initialized)
- return(0);
-
- /*
- * Remove from the PV table (raise IPL since we
- * may be called at interrupt time).
- */
+ panic("pmap_remove_pv: !pmap_initialized");
+#endif
s = splimp();
@@ -478,8 +483,11 @@
pmap_free_pv(npv);
}
}
+
+ if (flags & PT_W)
+ --pmap->pm_stats.wired_count;
+
(void)splx(s);
- return(flags);
}
/*
@@ -496,13 +504,15 @@
{
struct pv_entry *npv;
u_int s;
- u_int flags;
+ u_int flags, oflags;
PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
pmap, va, pv, bic_mask, eor_mask));
+#ifdef DIAGNOSTIC
if (!pmap_initialized)
- return(0);
+ panic("pmap_modify_pv: !pmap_initialized");
+#endif
s = splimp();
@@ -515,17 +525,24 @@
for (npv = pv; npv; npv = npv->pv_next) {
if (pmap == npv->pv_pmap && va == npv->pv_va) {
- flags = npv->pv_flags;
- npv->pv_flags = ((flags & ~bic_mask) ^ eor_mask);
+ oflags = npv->pv_flags;
+ npv->pv_flags = flags =
+ ((oflags & ~bic_mask) ^ eor_mask);
+ if ((flags ^ oflags) & PT_W) {
+ if (flags & PT_W)
+ ++pmap->pm_stats.wired_count;
+ else
+ --pmap->pm_stats.wired_count;
+ }
PDEBUG(0, printf("done flags=%08x\n", flags));
(void)splx(s);
- return(flags);
+ return (oflags);
}
}
PDEBUG(0, printf("done.\n"));
(void)splx(s);
Home |
Main Index |
Thread Index |
Old Index