Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x68k/x68k First attempt to implement PMAP_NEW inter...
details: https://anonhg.NetBSD.org/src/rev/324b4098307a
branches: trunk
changeset: 473377:324b4098307a
user: minoura <minoura%NetBSD.org@localhost>
date: Mon May 31 14:39:13 1999 +0000
description:
First attempt to implement PMAP_NEW interface.
The code was mostly written before the pmap_k* inconsistency fixes.
Work around might still remain. Should be removed.
diffstat:
sys/arch/x68k/x68k/pmap.c | 264 ++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 255 insertions(+), 9 deletions(-)
diffs (truncated from 455 to 300 lines):
diff -r 3455be11fed7 -r 324b4098307a sys/arch/x68k/x68k/pmap.c
--- a/sys/arch/x68k/x68k/pmap.c Mon May 31 14:37:20 1999 +0000
+++ b/sys/arch/x68k/x68k/pmap.c Mon May 31 14:39:13 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.35 1999/05/26 19:16:36 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.36 1999/05/31 14:39:13 minoura Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -94,6 +94,7 @@
*/
#include "opt_compat_hpux.h"
+#include "opt_pmap_new.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -283,7 +284,11 @@
*/
void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((paddr_t, int));
+#if defined(PMAP_NEW)
+boolean_t pmap_changebit __P((paddr_t, int, int));
+#else
void pmap_changebit __P((paddr_t, int, int));
+#endif
void pmap_enter_ptpage __P((pmap_t, vaddr_t));
void pmap_collect1 __P((pmap_t, paddr_t, paddr_t));
void pmap_pinit __P((pmap_t));
@@ -717,12 +722,21 @@
*
* Note: no locking is necessary in this function.
*/
+#if defined(PMAP_NEW)
+pmap_t
+pmap_create()
+#else
pmap_t
pmap_create(size)
vsize_t size;
+#endif
{
pmap_t pmap;
+#if defined(PMAP_NEW)
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
+ ("pmap_create()\n"));
+#else
PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
("pmap_create(%lx)\n", size));
@@ -731,6 +745,7 @@
*/
if (size)
return (NULL);
+#endif
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
@@ -987,21 +1002,40 @@
* Lower the permission for all mappings to a given page to
* the permissions specified.
*/
+#if defined(PMAP_NEW)
+void
+pmap_page_protect(pg, prot)
+ struct vm_page *pg;
+ vm_prot_t prot;
+#else
void
pmap_page_protect(pa, prot)
paddr_t pa;
vm_prot_t prot;
+#endif
{
struct pv_entry *pv;
int s;
+#if defined(PMAP_NEW)
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+#endif
#ifdef DEBUG
+#if defined(PMAP_NEW)
+ if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
+ (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
+ printf("pmap_page_protect(%p, %x)\n", pg, prot);
+
+#else
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
+#endif
+#if !defined(PMAP_NEW)
if (!PAGE_IS_MANAGED(pa))
return;
+#endif
switch (prot) {
case VM_PROT_READ|VM_PROT_WRITE:
@@ -1433,6 +1467,143 @@
#endif
}
+#if defined(PMAP_NEW)
+/*
+ * pmap_k* parts for PMAP_NEW is original for x68k.
+ * We support no VAC machines i.e. M68k_MMU_HP. (mi)
+ */
+
+/*
+ * pmap_kenter_pa: [ INTERFACE ]
+ *
+ * Enter a va -> pa mapping into the kernel pmap without any
+ * physical->virtual tracking.
+ *
+ * Note: no locking is necessary in this function.
+ */
+void
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+{
+ pmap_t pmap = pmap_kernel();
+ pt_entry_t *pte;
+ int npte;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
+
+#ifdef DIAGNOSTIC
+ /*
+ * pmap_kenter() should never be used for CADDR1 and CADDR2.
+ */
+ if (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2)
+ panic("pmap_kenter_pa: used for CADDR1 or CADDR2");
+#endif
+
+ /*
+ * Segment table entry not valid, we need a new PT page
+ */
+ if (!pmap_ste_v(pmap, va))
+ pmap_enter_ptpage(pmap, va);
+
+ /*
+ * Build the new PTE.
+ */
+ pte = pmap_pte(pmap, va);
+ npte = pa | pte_prot(pmap, prot) | PG_V;
+#if defined(M68040) || defined(M68060)
+#if defined(M68020) || defined(M68030)
+ if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
+#else
+ if ((npte & PG_PROT) == PG_RW)
+#endif
+#ifdef DEBUG
+ if (dowriteback && dokwriteback)
+#endif
+ npte |= PG_CCB;
+#endif
+
+ *pte = npte;
+ pmap_update();
+}
+
+
+/*
+ * pmap_kenter_pgs: [ INTERFACE ]
+ *
+ * Enter a va -> pa mapping for the array of vm_page's into the
+ * kernel pmap without any physical->virtual tracking, starting
+ * at address va, for npgs pages.
+ *
+ * Note: no locking is necessary in this function.
+ */
+void
+pmap_kenter_pgs(va, pgs, npgs)
+ vaddr_t va;
+ vm_page_t *pgs;
+ int npgs;
+{
+ int i;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
+ ("pmap_kenter_pgs(%lx, %p, %d)\n", va, pgs, npgs));
+
+ for (i = 0; i < npgs; i++)
+ pmap_kenter_pa(va + (NBPG * i),
+ VM_PAGE_TO_PHYS(pgs[i]),
+ VM_PROT_READ|VM_PROT_WRITE);
+}
+
+/*
+ * pmap_kremove: [ INTERFACE ]
+ *
+ * Remove a mapping entered with pmap_kenter_pa() or pmap_kenter_pgs()
+ * starting at va, for size bytes (assumed to be page rounded).
+ *
+ */
+void
+pmap_kremove(sva, size)
+ vaddr_t sva;
+ vsize_t size;
+{
+ pmap_t pmap = pmap_kernel();
+ vaddr_t nssva;
+ pt_entry_t *pte;
+ vaddr_t eva;
+
+ PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
+ ("pmap_kremove(%lx, %lx)\n", sva, size));
+
+ while (sva < eva) {
+ nssva = x68k_trunc_seg(sva) + X68K_SEG_SIZE;
+ if (nssva == 0 || nssva > eva)
+ nssva = eva;
+
+ /*
+ * Invalidate every valid mapping within this segment.
+ */
+ pte = pmap_pte(pmap, sva);
+ while (sva < nssva) {
+ if (!pmap_pte_v(pte)) {
+ printf ("pmap_kremove: attempt to remove invalid mapping.\n");
+ continue;
+ }
+ if (pmap_pte_w(pte)) {
+ printf ("pmap_kremove: attempt to remove wired mapping.\n");
+ pmap->pm_stats.wired_count--;
+ }
+ pmap->pm_stats.resident_count--;
+ *pte = PG_NV;
+ TBIS(sva);
+ pte++;
+ sva += NBPG;
+ }
+ }
+}
+#endif /* PMAP_NEW */
+
/*
* pmap_change_wiring: [ INTERFACE ]
*
@@ -1913,14 +2084,27 @@
*
* Clear the modify bits on the specified physical page.
*/
+#if defined(PMAP_NEW)
+int
+pmap_clear_modify(pg)
+ struct vm_page *pg;
+#else
void
pmap_clear_modify(pa)
paddr_t pa;
+#endif
{
+#if defined(PMAP_NEW)
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
+
+ return pmap_changebit(pa, 0, ~PG_M);
+#else
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
pmap_changebit(pa, 0, ~PG_M);
+#endif
}
/*
@@ -1928,14 +2112,27 @@
*
* Clear the reference bit on the specified physical page.
*/
+#if defined(PMAP_NEW)
+int
+pmap_clear_reference(pg)
+ struct vm_page *pg;
+#else
void
pmap_clear_reference(pa)
paddr_t pa;
+#endif
{
+#if defined(PMAP_NEW)
+ paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
+
+ return pmap_changebit(pa, 0, ~PG_U);
+#else
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%lx)\n", pa));
pmap_changebit(pa, 0, ~PG_U);
+#endif
}
/*
Home |
Main Index |
Thread Index |
Old Index