Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/cesfic/cesfic incorporate patch from Chuck Silvers ...
details: https://anonhg.NetBSD.org/src/rev/1e1cd29b4cb2
branches: trunk
changeset: 513222:1e1cd29b4cb2
user: drochner <drochner%NetBSD.org@localhost>
date: Thu Jul 26 15:41:14 2001 +0000
description:
incorporate patch from Chuck Silvers which basically reduces
pmap_kenter_pa() / pmap_kremove() to the basics - ie, removes pv tracking
diffstat:
sys/arch/cesfic/cesfic/pmap.c | 219 +++++++++++++++++++++++++++--------------
1 files changed, 143 insertions(+), 76 deletions(-)
diffs (truncated from 431 to 300 lines):
diff -r 5edcc889dc06 -r 1e1cd29b4cb2 sys/arch/cesfic/cesfic/pmap.c
--- a/sys/arch/cesfic/cesfic/pmap.c Thu Jul 26 15:35:20 2001 +0000
+++ b/sys/arch/cesfic/cesfic/pmap.c Thu Jul 26 15:41:14 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.3 2001/06/15 17:48:13 drochner Exp $ */
+/* $NetBSD: pmap.c,v 1.4 2001/07/26 15:41:14 drochner Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -464,7 +464,7 @@
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
- kpt_free_list = (struct kpt_page *) 0;
+ kpt_free_list = NULL;
do {
addr2 -= NBPG;
(--kpt_pages)->kpt_next = kpt_free_list;
@@ -491,7 +491,7 @@
* XXX We don't want to hang when we run out of
* page tables, so we lower maxproc so that fork()
* will fail instead. Note that root could still raise
- * this value via sysctl(2).
+ * this value via sysctl(3).
*/
maxproc = (HP_PTMAXSIZE / HP_MAX_PTSIZE);
} else
@@ -744,9 +744,6 @@
{
int count;
- if (pmap == NULL)
- return;
-
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
@@ -796,10 +793,6 @@
pmap_reference(pmap)
pmap_t pmap;
{
-
- if (pmap == NULL)
- return;
-
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
@@ -867,9 +860,6 @@
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
- if (pmap == NULL)
- return;
-
firstpage = TRUE;
needcflush = FALSE;
flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
@@ -964,9 +954,6 @@
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
- if (PAGE_IS_MANAGED(pa) == 0)
- return;
-
switch (prot) {
case VM_PROT_READ|VM_PROT_WRITE:
case VM_PROT_ALL:
@@ -1029,16 +1016,10 @@
("pmap_protect(%p, %lx, %lx, %x)\n",
pmap, sva, eva, prot));
- if (pmap == NULL)
- return;
-
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
}
- if (prot & VM_PROT_WRITE)
- return;
-
isro = pte_prot(pmap, prot);
needtflush = active_pmap(pmap);
firstpage = TRUE;
@@ -1290,20 +1271,6 @@
}
#endif
}
-
- /*
- * Speed pmap_is_referenced() or pmap_is_modified() based
- * on the hint provided in access_type.
- */
-#ifdef DIAGNOSTIC
- if ((flags & VM_PROT_ALL) & ~prot)
- panic("pmap_enter: access_type exceeds prot");
-#endif
- if (flags & VM_PROT_WRITE)
- *pa_to_attribute(pa) |= (PG_U|PG_M);
- else if (flags & VM_PROT_ALL)
- *pa_to_attribute(pa) |= PG_U;
-
splx(s);
}
/*
@@ -1394,7 +1361,54 @@
paddr_t pa;
vm_prot_t prot;
{
- pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
+ struct pmap *pmap = pmap_kernel();
+ pt_entry_t *pte;
+ int s, npte;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
+
+ /*
+ * Segment table entry not valid, we need a new PT page
+ */
+
+ if (!pmap_ste_v(pmap, va)) {
+ s = splvm();
+ pmap_enter_ptpage(pmap, va);
+ splx(s);
+ }
+
+ pa = m68k_trunc_page(pa);
+ pte = pmap_pte(pmap, va);
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
+ KASSERT(!pmap_pte_v(pte));
+
+ /*
+ * Increment counters
+ */
+
+ pmap->pm_stats.resident_count++;
+ pmap->pm_stats.wired_count++;
+
+ /*
+ * Build the new PTE.
+ */
+
+ npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
+#if defined(M68040)
+ if (mmutype == MMU_68040 && (npte & (PG_PROT)) == PG_RW)
+ npte |= PG_CCB;
+#endif
+
+ PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
+#if defined(M68040)
+ if (mmutype == MMU_68040) {
+ DCFP(pa);
+ ICPP(pa);
+ }
+#endif
+ *pte = npte;
}
void
@@ -1402,8 +1416,70 @@
vaddr_t va;
vsize_t len;
{
- for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
- pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
+ struct pmap *pmap = pmap_kernel();
+ vaddr_t sva, eva, nssva;
+ pt_entry_t *pte;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_kremove(%lx, %lx)\n", va, len));
+
+ sva = va;
+ eva = va + len;
+ while (sva < eva) {
+ nssva = m68k_trunc_seg(sva) + NBSEG;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+
+ /*
+ * If VA belongs to an unallocated segment,
+ * skip to the next segment boundary.
+ */
+
+ if (!pmap_ste_v(pmap, sva)) {
+ sva = nssva;
+ continue;
+ }
+
+ /*
+ * Invalidate every valid mapping within this segment.
+ */
+
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (pmap_pte_v(pte)) {
+#ifdef DEBUG
+ struct pv_entry *pv;
+ int s;
+
+ pv = pa_to_pvh(pmap_pte_pa(pte));
+ s = splvm();
+ while (pv->pv_pmap != NULL) {
+ KASSERT(pv->pv_pmap != pmap_kernel() ||
+ pv->pv_va != sva);
+ pv = pv->pv_next;
+ if (pv == NULL) {
+ break;
+ }
+ }
+ splx(s);
+#endif
+ /*
+ * Update statistics
+ */
+
+ pmap->pm_stats.wired_count--;
+ pmap->pm_stats.resident_count--;
+
+ /*
+ * Invalidate the PTE.
+ */
+
+ *pte = PG_NV;
+ TBIS(va);
+ }
+ pte++;
+ sva += NBPG;
+ }
}
}
@@ -1424,9 +1500,6 @@
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_unwire(%p, %lx)\n", pmap, va));
- if (pmap == NULL)
- return;
-
pte = pmap_pte(pmap, va);
#ifdef DEBUG
/*
@@ -1484,7 +1557,7 @@
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_extract(%p, %lx) -> ", pmap, va));
- if (pmap && pmap_ste_v(pmap, va)) {
+ if (pmap_ste_v(pmap, va)) {
pte = *(u_int *)pmap_pte(pmap, va);
if (pte) {
pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
@@ -1654,12 +1727,12 @@
* that page back on the free list.
*/
for (pkpt = &kpt_used_list, kpt = *pkpt;
- kpt != (struct kpt_page *)0;
+ kpt != NULL;
pkpt = &kpt->kpt_next, kpt = *pkpt)
if (kpt->kpt_pa == kpa)
break;
#ifdef DEBUG
- if (kpt == (struct kpt_page *)0)
+ if (kpt == NULL)
panic("pmap_collect: lost a KPT page");
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
printf("collect: %lx (%lx) to free list\n",
@@ -2172,6 +2245,11 @@
PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
("remove: free stab %p\n",
ptpmap->pm_stab));
+ pmap_remove(pmap_kernel(),
+ (vaddr_t)ptpmap->pm_stab,
+ (vaddr_t)ptpmap->pm_stab + HP_STSIZE);
+ uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
+ ptpmap->pm_stpa));
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab,
HP_STSIZE);
@@ -2229,14 +2307,13 @@
pt_entry_t *pte;
int s;
- if (PAGE_IS_MANAGED(pa) == 0)
- return(FALSE);
-
pv = pa_to_pvh(pa);
s = splvm();
+
/*
* Check saved info first
*/
+
if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
@@ -2289,21 +2366,20 @@
PMAP_DPRINTF(PDB_BITS,
Home |
Main Index |
Thread Index |
Old Index