Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/i386/i386 several fixes:
details: https://anonhg.NetBSD.org/src/rev/d5c1ac0463b9
branches: trunk
changeset: 482749:d5c1ac0463b9
user: chs <chs%NetBSD.org@localhost>
date: Mon Feb 21 02:01:24 2000 +0000
description:
several fixes:
1. in pmap_steal_ptp(), don't trylock the pmap that wants a ptp,
the caller already has it locked.
2. do not panic in pmap_enter() due to memory allocation failures when
the PMAP_CANFAIL flag is given. to this end, move all such panics
from the lower-level routines to pmap_enter() and pmap_growkernel()
where we can check for this flag.
3. add #ifdef DIAGNOSTIC around all panics which are only reachable
if there's a bug or a hardware error.
diffstat:
sys/arch/i386/i386/pmap.c | 114 +++++++++++++++++++++++++++++++++------------
1 files changed, 82 insertions(+), 32 deletions(-)
diffs (truncated from 364 to 300 lines):
diff -r c7205bac767c -r d5c1ac0463b9 sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Mon Feb 21 01:51:37 2000 +0000
+++ b/sys/arch/i386/i386/pmap.c Mon Feb 21 02:01:24 2000 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.83 1999/12/11 19:39:31 sommerfeld Exp $ */
+/* $NetBSD: pmap.c,v 1.84 2000/02/21 02:01:24 chs Exp $ */
/*
*
@@ -1200,13 +1200,6 @@
}
}
- /*
- * done! if we didn't get a pv then we panic :(
- */
-
- if (pv == NULL)
- panic("pmap_alloc_pvpage");
-
return(pv);
}
@@ -1552,6 +1545,9 @@
if (just_try)
return(NULL);
ptp = pmap_steal_ptp(&pmap->pm_obj, ptp_i2o(pde_index));
+ if (ptp == NULL) {
+ return (NULL);
+ }
}
/* got one! */
@@ -1583,21 +1579,26 @@
{
struct vm_page *ptp = NULL;
struct pmap *firstpmap;
+ struct uvm_object *curobj;
+ pt_entry_t *ptes;
int idx, lcv;
- pt_entry_t *ptes;
+ boolean_t caller_locked, we_locked;
simple_lock(&pmaps_lock);
-
if (pmaps_hand == NULL)
- pmaps_hand = pmaps.lh_first;
-
+ pmaps_hand = LIST_FIRST(&pmaps);
firstpmap = pmaps_hand;
- if (firstpmap == NULL)
- panic("pmap_steal_ptp: no pmaps to steal from!");
do { /* while we haven't looped back around to firstpmap */
- if (simple_lock_try(&pmaps_hand->pm_obj.vmobjlock)) {
- ptp = pmaps_hand->pm_obj.memq.tqh_first;
+
+ curobj = &pmaps_hand->pm_obj;
+ we_locked = FALSE;
+ caller_locked = (curobj == obj);
+ if (!caller_locked) {
+ we_locked = simple_lock_try(&curobj->vmobjlock);
+ }
+ if (caller_locked || we_locked) {
+ ptp = curobj->memq.tqh_first;
for (/*null*/; ptp != NULL; ptp = ptp->listq.tqe_next) {
/*
@@ -1606,9 +1607,11 @@
*/
idx = ptp_o2i(ptp->offset);
+#ifdef DIAGNOSTIC
if (VM_PAGE_TO_PHYS(ptp) !=
(pmaps_hand->pm_pdir[idx] & PG_FRAME))
panic("pmap_steal_ptp: PTP mismatch!");
+#endif
ptes = (pt_entry_t *)
pmap_tmpmap_pa(VM_PAGE_TO_PHYS(ptp));
@@ -1646,20 +1649,20 @@
uvm_pagerealloc(ptp, obj, offset);
break; /* break out of "for" loop */
}
- simple_unlock(&pmaps_hand->pm_obj.vmobjlock);
+ if (we_locked) {
+ simple_unlock(&curobj->vmobjlock);
+ }
}
/* advance the pmaps_hand */
- pmaps_hand = pmaps_hand->pm_list.le_next;
- if (pmaps_hand == NULL)
- pmaps_hand = pmaps.lh_first;
+ pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);
+ if (pmaps_hand == NULL) {
+ pmaps_hand = LIST_FIRST(&pmaps);
+ }
} while (ptp == NULL && pmaps_hand != firstpmap);
simple_unlock(&pmaps_lock);
- if (ptp == NULL)
- panic("pmap_steal_ptp: failed to steal a PTP!");
-
return(ptp);
}
@@ -1687,8 +1690,10 @@
return(pmap->pm_ptphint);
ptp = uvm_pagelookup(&pmap->pm_obj, ptp_i2o(pde_index));
+#ifdef DIAGNOSTIC
if (ptp == NULL)
panic("pmap_get_ptp: unmanaged user PTP");
+#endif
pmap->pm_ptphint = ptp;
return(ptp);
}
@@ -1769,8 +1774,6 @@
memset(&pmap->pm_pdir[PDSLOT_KERN + nkpde], 0,
NBPG - ((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));
LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
- if (pmaps_hand == NULL)
- pmaps_hand = pmap;
simple_unlock(&pmaps_lock);
}
@@ -1825,7 +1828,7 @@
simple_lock(&pmaps_lock);
if (pmap == pmaps_hand)
- pmaps_hand = pmaps_hand->pm_list.le_next;
+ pmaps_hand = LIST_NEXT(pmaps_hand, pm_list);
LIST_REMOVE(pmap, pm_list);
simple_unlock(&pmaps_lock);
@@ -1835,8 +1838,10 @@
while (pmap->pm_obj.memq.tqh_first != NULL) {
pg = pmap->pm_obj.memq.tqh_first;
+#ifdef DIAGNOSTIC
if (pg->flags & PG_BUSY)
panic("pmap_release: busy page table page");
+#endif
/* pmap_page_protect? currently no need for it. */
pg->wire_count = 0;
@@ -2171,9 +2176,11 @@
}
bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);
+#ifdef DIAGNOSTIC
if (bank == -1)
panic("pmap_remove_ptes: unmanaged page marked "
"PG_PVLIST");
+#endif
/* sync R/M bits */
simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock);
@@ -2245,8 +2252,10 @@
}
bank = vm_physseg_find(i386_btop(opte & PG_FRAME), &off);
+#ifdef DIAGNOSTIC
if (bank == -1)
panic("pmap_remove_pte: unmanaged page marked PG_PVLIST");
+#endif
/* sync R/M bits */
simple_lock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock);
@@ -2307,9 +2316,11 @@
ptp = pmap->pm_ptphint;
} else {
ptp = PHYS_TO_VM_PAGE(ptppa);
+#ifdef DIAGNOSTIC
if (ptp == NULL)
panic("pmap_remove: unmanaged "
"PTP detected");
+#endif
}
}
@@ -2402,9 +2413,11 @@
ptp = pmap->pm_ptphint;
} else {
ptp = PHYS_TO_VM_PAGE(ptppa);
+#ifdef DIAGNOSTIC
if (ptp == NULL)
panic("pmap_remove: unmanaged PTP "
"detected");
+#endif
}
}
pmap_remove_ptes(pmap, prr, ptp,
@@ -2864,8 +2877,10 @@
if (pmap_valid_entry(pmap->pm_pdir[pdei(va)])) {
ptes = pmap_map_ptes(pmap); /* locks pmap */
+#ifdef DIAGNOSTIC
if (!pmap_valid_entry(ptes[i386_btop(va)]))
panic("pmap_unwire: invalid (unmapped) va");
+#endif
if ((ptes[i386_btop(va)] & PG_W) != 0) {
ptes[i386_btop(va)] &= ~PG_W;
pmap->pm_stats.wired_count--;
@@ -3046,9 +3061,11 @@
if (dstvalid == 0) {
if (!pmap_valid_entry(dstpmap->
pm_pdir[pdei(dstl.addr)])) {
+#ifdef DIAGNOSTIC
if (dstl.addr >= VM_MIN_KERNEL_ADDRESS)
panic("pmap_transfer: missing kernel "
"PTP at 0x%lx", dstl.addr);
+#endif
dstl.ptp = pmap_get_ptp(dstpmap,
pdei(dstl.addr), TRUE);
if (dstl.ptp == NULL) /* out of RAM? punt. */
@@ -3120,9 +3137,11 @@
* get new dst PTP
*/
if (!pmap_valid_entry(dstpmap->pm_pdir[pdei(dstl.addr)])) {
+#ifdef DIAGNOSTIC
if (dstl.addr >= VM_MIN_KERNEL_ADDRESS)
panic("pmap_transfer: missing kernel PTP at "
"0x%lx", dstl.addr);
+#endif
dstl.ptp = pmap_get_ptp(dstpmap, pdei(dstl.addr), TRUE);
if (dstl.ptp == NULL) /* out of free RAM? punt. */
break;
@@ -3222,9 +3241,11 @@
if (!pmap_valid_entry(*srcl->pte)) /* skip invalid entrys */
continue;
+#ifdef DIAGNOSTIC
if (pmap_valid_entry(*dstl->pte))
panic("pmap_transfer_ptes: attempt to overwrite "
"active entry");
+#endif
/*
* let's not worry about non-pvlist mappings (typically device
@@ -3256,9 +3277,11 @@
*/
bank = vm_physseg_find(atop(opte & PG_FRAME), &off);
+#ifdef DIAGNOSTIC
if (bank == -1)
panic("pmap_transfer_ptes: PG_PVLIST PTE and "
"no pv_head!");
+#endif
pvh = &vm_physmem[bank].pmseg.pvhead[off];
/*
@@ -3271,9 +3294,11 @@
if (lpve->pv_pmap == srcpmap &&
lpve->pv_va == srcl->addr)
break;
+#ifdef DIAGNOSTIC
if (lpve == NULL)
panic("pmap_transfer_ptes: PG_PVLIST PTE, but "
"entry not found");
+#endif
/*
* update src ptp. if the ptp is null in the pventry, then
@@ -3378,7 +3403,7 @@
struct vm_page *ptp;
struct pv_head *pvh;
struct pv_entry *pve;
- int bank, off;
+ int bank, off, error;
boolean_t wired = (flags & PMAP_WIRED) != 0;
#ifdef DIAGNOSTIC
@@ -3403,10 +3428,17 @@
*/
ptes = pmap_map_ptes(pmap); /* locks pmap */
- if (pmap == pmap_kernel())
+ if (pmap == pmap_kernel()) {
ptp = NULL;
- else
+ } else {
ptp = pmap_get_ptp(pmap, pdei(va), FALSE);
+ if (ptp == NULL) {
+ if (flags & PMAP_CANFAIL) {
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ panic("pmap_enter: get ptp failed");
+ }
+ }
opte = ptes[i386_btop(va)]; /* old PTE */
/*
@@ -3436,9 +3468,11 @@
/* if this is on the PVLIST, sync R/M bit */
if (opte & PG_PVLIST) {
bank = vm_physseg_find(atop(pa), &off);
+#ifdef DIAGNOSTIC
Home |
Main Index |
Thread Index |
Old Index