Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch allocate and free page table pages explicitly inste...
details: https://anonhg.NetBSD.org/src/rev/3407d5056cdc
branches: trunk
changeset: 517791:3407d5056cdc
user: chs <chs%NetBSD.org@localhost>
date: Mon Nov 19 06:40:11 2001 +0000
description:
allocate and free page table pages explicitly instead of abusing
uvm_fault_wire(). this allows us to make pt_map non-pageable,
but we need to be careful in pmap_remove() not to attempt to
reference PTEs after the PTP has been freed.
diffstat:
sys/arch/amiga/amiga/pmap.c | 100 +++++++++++++++-----------------
sys/arch/atari/atari/pmap.c | 94 +++++++++++++++---------------
sys/arch/cesfic/cesfic/pmap.c | 88 +++++++++++++++-------------
sys/arch/hp300/hp300/pmap.c | 119 +++++++++++++++++++++------------------
sys/arch/luna68k/luna68k/pmap.c | 112 +++++++++++++++++++-----------------
sys/arch/mac68k/mac68k/pmap.c | 114 +++++++++++++++++++------------------
sys/arch/mvme68k/mvme68k/pmap.c | 121 +++++++++++++++++++++------------------
sys/arch/news68k/news68k/pmap.c | 110 ++++++++++++++++++-----------------
sys/arch/next68k/next68k/pmap.c | 112 +++++++++++++++++++-----------------
sys/arch/x68k/x68k/pmap.c | 112 +++++++++++++++++++-----------------
10 files changed, 559 insertions(+), 523 deletions(-)
diffs (truncated from 2213 to 300 lines):
diff -r fe6775aec7b7 -r 3407d5056cdc sys/arch/amiga/amiga/pmap.c
--- a/sys/arch/amiga/amiga/pmap.c Mon Nov 19 06:08:01 2001 +0000
+++ b/sys/arch/amiga/amiga/pmap.c Mon Nov 19 06:40:11 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.92 2001/09/29 22:00:29 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.93 2001/11/19 06:40:11 chs Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@@ -670,7 +670,7 @@
} else
s = maxproc * AMIGA_UPTSIZE;
- pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
TRUE, &pt_map_store);
#if defined(M68040) || defined(M68060)
@@ -897,12 +897,15 @@
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
- if (pmap->pm_ptab)
+ if (pmap->pm_ptab) {
+ pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
+ (vaddr_t)pmap->pm_ptab + AMIGA_UPTSIZE);
+ uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
+ (vaddr_t)pmap->pm_ptab + AMIGA_UPTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
AMIGA_UPTSIZE);
- if (pmap->pm_stab != Segtabzero)
- uvm_km_free_wakeup(kernel_map, (vaddr_t)pmap->pm_stab,
- AMIGA_STSIZE);
+ }
+ KASSERT(pmap->pm_stab == Segtabzero);
}
/*
@@ -2082,23 +2085,18 @@
/*
* If reference count drops to 1, and we're not instructed
* to keep it around, free the PT page.
- *
- * Note: refcnt == 1 comes from the fact that we allocate
- * the page with uvm_fault_wire(), which initially wires
- * the page. The first reference we actually add causes
- * the refcnt to be 2.
*/
if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+#ifdef DIAGNOSTIC
struct pv_entry *pv;
+#endif
paddr_t pa;
pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
#ifdef DIAGNOSTIC
if (PAGE_IS_MANAGED(pa) == 0)
panic("pmap_remove_mapping: unmanaged PT page");
-#endif
pv = pa_to_pvh(pa);
-#ifdef DIAGNOSTIC
if (pv->pv_ptste == NULL)
panic("pmap_remove_mapping: ptste == NULL");
if (pv->pv_pmap != pmap_kernel() ||
@@ -2108,7 +2106,7 @@
"bad PT page pmap %p, va 0x%lx, next %p",
pv->pv_pmap, pv->pv_va, pv->pv_next);
#endif
- pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ pmap_remove_mapping(pmap_kernel(), ptpva,
NULL, PRM_TFLUSH|PRM_CFLUSH);
uvm_pagefree(PHYS_TO_VM_PAGE(pa));
#ifdef DEBUG
@@ -2277,11 +2275,11 @@
pmap_ptpage_addref(ptpva)
vaddr_t ptpva;
{
- struct vm_page *m;
+ struct vm_page *pg;
simple_lock(&uvm.kernel_object->vmobjlock);
- m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
- m->wire_count++;
+ pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ pg->wire_count++;
simple_unlock(&uvm.kernel_object->vmobjlock);
}
@@ -2294,12 +2292,12 @@
pmap_ptpage_delref(ptpva)
vaddr_t ptpva;
{
- struct vm_page *m;
+ struct vm_page *pg;
int rv;
simple_lock(&uvm.kernel_object->vmobjlock);
- m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
- rv = --m->wire_count;
+ pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ rv = --pg->wire_count;
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
}
@@ -2447,6 +2445,7 @@
vaddr_t va;
{
paddr_t ptpa;
+ struct vm_page *pg;
pv_entry_t pv;
#ifdef M68060
u_int stpa;
@@ -2594,42 +2593,35 @@
kpt->kpt_va);
#endif
splx(s);
- }
- /*
- * For user processes we just simulate a fault on that location
- * letting the VM system allocate a zero-filled page.
- *
- * Note we use a wire-fault to keep the page off the paging
- * queues. This sets our PT page's reference (wire) count to
- * 1, which is what we use to check if the page can be freed.
- * See pmap_remove_mapping().
- */
- else {
+ } else {
+
/*
- * Count the segment table reference now so that we won't
+ * For user processes we just allocate a page from the
+ * VM system. Note that we set the page "wired" count to 1,
+ * which is what we use to check if the page can be freed.
+ * See pmap_remove_mapping().
+ *
+ * Count the segment table reference first so that we won't
* lose the segment table when low on memory.
*/
+
pmap->pm_sref++;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
- printf("enter_pt: about to fault UPT pg at %lx\n", va);
+ printf("enter_pt: about to alloc UPT pg at %lx\n", va);
#endif
- s = uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE);
- if (s != 0) {
- printf("uvm_fault_wire(pt_map, 0x%lx, 0%lx, RW) "
- "-> %d\n", va, va + PAGE_SIZE, s);
- panic("pmap_enter: uvm_fault_wire failed");
+ while ((pg = uvm_pagealloc(uvm.kernel_object, va, NULL,
+ UVM_PGA_ZERO)) == NULL) {
+ uvm_wait("ptpage");
}
- ptpa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
-#if 0 /* XXXX what is this? XXXX */
- /*
- * Mark the page clean now to avoid its pageout (and
- * hence creation of a pager) between now and when it
- * is wired; i.e. while it is on a paging queue.
- */
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
-#endif
+ pg->wire_count = 1;
+ pg->flags &= ~(PG_BUSY|PG_FAKE);
+ UVM_PAGE_OWN(pg, NULL);
+ ptpa = VM_PAGE_TO_PHYS(pg);
+ pmap_enter(pmap_kernel(), va, ptpa,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ pmap_update(pmap_kernel());
}
#ifdef M68060
@@ -2733,7 +2725,7 @@
{
pt_entry_t *pte;
paddr_t pa;
- struct vm_page *m;
+ struct vm_page *pg;
int count;
if (!pmap_ste_v(pmap_kernel(), va) ||
@@ -2741,9 +2733,9 @@
return;
pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
- m = PHYS_TO_VM_PAGE(pa);
- if (m->wire_count < 1) {
- printf("*%s*: 0x%lx: wire count %d\n", str, va, m->wire_count);
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg->wire_count < 1) {
+ printf("*%s*: 0x%lx: wire count %d\n", str, va, pg->wire_count);
return;
}
@@ -2751,9 +2743,9 @@
for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)
count++;
- if ((m->wire_count - 1) != count)
+ if ((pg->wire_count - 1) != count)
printf("*%s*: 0x%lx: w%d/a%d\n",
- str, va, (m->wire_count-1), count);
+ str, va, (pg->wire_count-1), count);
}
#endif
diff -r fe6775aec7b7 -r 3407d5056cdc sys/arch/atari/atari/pmap.c
--- a/sys/arch/atari/atari/pmap.c Mon Nov 19 06:08:01 2001 +0000
+++ b/sys/arch/atari/atari/pmap.c Mon Nov 19 06:40:11 2001 +0000
@@ -628,7 +628,7 @@
}
else s = maxproc * ATARI_UPTSIZE;
- pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
TRUE, &pt_map_store);
#ifdef DEBUG
@@ -858,12 +858,15 @@
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
- if (pmap->pm_ptab)
+ if (pmap->pm_ptab) {
+ pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
+ (vaddr_t)pmap->pm_ptab + ATARI_UPTSIZE);
+ uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
+ (vaddr_t)pmap->pm_ptab + ATARI_UPTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
ATARI_UPTSIZE);
- if (pmap->pm_stab != Segtabzero)
- uvm_km_free_wakeup(kernel_map, (vaddr_t)pmap->pm_stab,
- ATARI_STSIZE);
+ }
+ KASSERT(pmap->pm_stab == Segtabzero);
}
/*
@@ -2035,23 +2038,18 @@
/*
* If reference count drops to 1, and we're not instructed
* to keep it around, free the PT page.
- *
- * Note: refcnt == 1 comes from the fact that we allocate
- * the page with uvm_fault_wire(), which initially wires
- * the page. The first reference we actually add causes
- * the refcnt to be 2.
*/
if (refs == 1 && (flags & PRM_KEEPPTPAGE) == 0) {
+#ifdef DIAGNOSTIC
struct pv_entry *pv;
+#endif
paddr_t pa;
pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
#ifdef DIAGNOSTIC
if (PAGE_IS_MANAGED(pa) == 0)
panic("pmap_remove_mapping: unmanaged PT page");
-#endif
pv = pa_to_pvh(pa);
-#ifdef DIAGNOSTIC
if (pv->pv_ptste == NULL)
panic("pmap_remove_mapping: ptste == NULL");
if (pv->pv_pmap != pmap_kernel() ||
@@ -2061,7 +2059,7 @@
"bad PT page pmap %p, va 0x%lx, next %p",
pv->pv_pmap, pv->pv_va, pv->pv_next);
#endif
- pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
+ pmap_remove_mapping(pmap_kernel(), ptpva,
NULL, PRM_TFLUSH|PRM_CFLUSH);
uvm_pagefree(PHYS_TO_VM_PAGE(pa));
#ifdef DEBUG
@@ -2231,11 +2229,11 @@
pmap_ptpage_addref(ptpva)
vaddr_t ptpva;
{
- struct vm_page *m;
+ struct vm_page *pg;
simple_lock(&uvm.kernel_object->vmobjlock);
- m = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
- m->wire_count++;
+ pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
+ pg->wire_count++;
simple_unlock(&uvm.kernel_object->vmobjlock);
}
@@ -2248,12 +2246,12 @@
pmap_ptpage_delref(ptpva)
vaddr_t ptpva;
{
- struct vm_page *m;
+ struct vm_page *pg;
int rv;
Home |
Main Index |
Thread Index |
Old Index