Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/mac68k/mac68k Pull in the many and various hp300 pm...
details: https://anonhg.NetBSD.org/src/rev/18519ce464ad
branches: trunk
changeset: 471593:18519ce464ad
user: scottr <scottr%NetBSD.org@localhost>
date: Mon Apr 05 06:34:01 1999 +0000
description:
Pull in the many and various hp300 pmap changes that Jason's done
so far this year. There are very few appreciable differences left
between this code and the hp300 version.
diffstat:
sys/arch/mac68k/mac68k/pmap.c | 1120 +++++++++++++++++++---------------------
1 files changed, 532 insertions(+), 588 deletions(-)
diffs (truncated from 2022 to 300 lines):
diff -r 2e30ae280543 -r 18519ce464ad sys/arch/mac68k/mac68k/pmap.c
--- a/sys/arch/mac68k/mac68k/pmap.c Mon Apr 05 06:24:17 1999 +0000
+++ b/sys/arch/mac68k/mac68k/pmap.c Mon Apr 05 06:34:01 1999 +0000
@@ -1,6 +1,6 @@
-/* $NetBSD: pmap.c,v 1.52 1999/03/27 05:57:04 mycroft Exp $ */
+/* $NetBSD: pmap.c,v 1.53 1999/04/05 06:34:01 scottr Exp $ */
-/*
+/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
@@ -98,6 +98,7 @@
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/user.h>
+#include <sys/pool.h>
#include <machine/pte.h>
@@ -109,58 +110,7 @@
#include <machine/cpu.h>
-#ifdef PMAPSTATS
-struct {
- int collectscans;
- int collectpages;
- int kpttotal;
- int kptinuse;
- int kptmaxuse;
-} kpt_stats;
-struct {
- int kernel; /* entering kernel mapping */
- int user; /* entering user mapping */
- int ptpneeded; /* needed to allocate a PT page */
- int nochange; /* no change at all */
- int pwchange; /* no mapping change, just wiring or protection */
- int wchange; /* no mapping change, just wiring */
- int pchange; /* no mapping change, just protection */
- int mchange; /* was mapped but mapping to different page */
- int managed; /* a managed page */
- int firstpv; /* first mapping for this PA */
- int secondpv; /* second mapping for this PA */
- int ci; /* cache inhibited */
- int unmanaged; /* not a managed page */
- int flushes; /* cache flushes */
-} enter_stats;
-struct {
- int calls;
- int removes;
- int pvfirst;
- int pvsearch;
- int ptinvalid;
- int uflushes;
- int sflushes;
-} remove_stats;
-struct {
- int calls;
- int changed;
- int alreadyro;
- int alreadyrw;
-} protect_stats;
-struct chgstats {
- int setcalls;
- int sethits;
- int setmiss;
- int clrcalls;
- int clrhits;
- int clrmiss;
-} changebit_stats[16];
-#endif
-
#ifdef DEBUG
-int debugmap = 0;
-int pmapdebug = 0x2000;
#define PDB_FOLLOW 0x0001
#define PDB_INIT 0x0002
#define PDB_ENTER 0x0004
@@ -177,13 +127,20 @@
#define PDB_WIRING 0x4000
#define PDB_PVDUMP 0x8000
+int debugmap = 0;
+int pmapdebug = PDB_PARANOIA;
+
+#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
+
#if defined(M68040)
int dowriteback = 1; /* 68040: enable writeback caching */
int dokwriteback = 1; /* 68040: enable writeback caching of kernel AS */
-#endif
extern vaddr_t pager_sva, pager_eva;
#endif
+#else /* ! DEBUG */
+#define PMAP_DPRINTF(l, x) /* nothing */
+#endif
/*
* Get STEs and PTEs for user/kernel address space
@@ -224,8 +181,6 @@
#define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
-#define pmap_valid_page(pa) (pmap_initialized && pmap_page_index(pa) >= 0)
-
int pmap_page_index(paddr_t pa);
/*
@@ -292,12 +247,22 @@
int protostfree; /* prototype (default) free ST map */
#endif
+extern caddr_t CADDR1, CADDR2;
+
+pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
+pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
+
+struct pool pmap_pmap_pool; /* memory pool for pmap structures */
+
struct pv_entry *pmap_alloc_pv __P((void));
void pmap_free_pv __P((struct pv_entry *));
void pmap_collect_pv __P((void));
void pmap_pinit __P((pmap_t));
void pmap_release __P((pmap_t));
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
+
#define pa_to_pvh(pa) \
({ \
int bank_, pg_; \
@@ -319,7 +284,7 @@
*/
void pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((paddr_t, int));
-void pmap_changebit __P((paddr_t, int, boolean_t));
+void pmap_changebit __P((paddr_t, int, int));
void pmap_enter_ptpage __P((pmap_t, vaddr_t));
void pmap_collect1 __P((pmap_t, paddr_t, vaddr_t));
@@ -333,12 +298,15 @@
#define PRM_CFLUSH 2
/*
- * Routine: pmap_virtual_space
+ * pmap_virtual_space: [ INTERFACE ]
+ *
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap.
*
- * Function:
- * Report the range of available kernel virtual address
- * space to the VM system during bootstrap. Called by
- * vm_bootstrap_steal_memory().
+ * This is only an interface function if we do not use
+ * pmap_steal_memory()!
+ *
+ * Note: no locking is necessary in this function.
*/
void
pmap_virtual_space(vstartp, vendp)
@@ -380,13 +348,18 @@
char *attr;
int rv, npages, bank;
-#ifdef DEBUG
- if (pmapdebug & PDB_FOLLOW)
- printf("pmap_init()\n");
-#endif
+ PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
+
+ /*
+ * Before we do anything else, initialize the PTE pointers
+ * used by pmap_zero_page() and pmap_copy_page().
+ */
+ caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
+ caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
+
/*
* Now that kernel map has been allocated, we can mark as
- * unavailable regions which we have mapped in pmap_bootstrap.
+ * unavailable regions which we have mapped in pmap_bootstrap().
*/
addr = (vaddr_t)IOBase;
if (uvm_map(kernel_map, &addr,
@@ -411,14 +384,12 @@
panic("pmap_init: bogons in the VM system!\n");
}
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT) {
- printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
- Sysseg, Sysmap, Sysptmap);
- printf(" pstart %lx, plast %x, vstart %lx, vend %lx\n",
- avail_start, avail_remaining, virtual_avail, virtual_end);
- }
-#endif
+ PMAP_DPRINTF(PDB_INIT,
+ ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
+ Sysseg, Sysmap, Sysptmap));
+ PMAP_DPRINTF(PDB_INIT,
+ (" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
+ avail_start, avail_end, virtual_avail, virtual_end));
/*
* Allocate memory for random pmap data structures. Includes the
@@ -443,13 +414,10 @@
pmap_attributes = (char *)addr;
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
- "tbl %p atr %p\n",
- s, page_cnt, Segtabzero, Segtabzeropa,
- pv_table, pmap_attributes);
-#endif
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes));
/*
* Now that the pv and attribute tables have been allocated,
@@ -504,18 +472,10 @@
kpt_pages->kpt_va = addr2;
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
} while (addr != addr2);
-#ifdef PMAPSTATS
- kpt_stats.kpttotal = atop(s);
-#endif
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: KPT: %ld pages from %lx to %lx\n",
- atop(s), addr, addr + s);
-#endif
- /*
- * Allocate the segment table map
- */
+ PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
+ atop(s), addr, addr + s));
+
/*
* Allocate the segment table map and the page table map.
*/
@@ -547,17 +507,28 @@
#endif
/*
+ * Initialize the pmap pools.
+ */
+ pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
+ 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+
+ /*
* Now it is safe to enable pv_table recording.
*/
pmap_initialized = TRUE;
}
+/*
+ * pmap_alloc_pv:
+ *
+ * Allocate a pv_entry.
+ */
struct pv_entry *
pmap_alloc_pv()
{
- struct pv_page *pvp;
- struct pv_entry *pv;
- int i;
+ struct pv_page *pvp;
+ struct pv_entry *pv;
+ int i;
if (pv_nfree == 0) {
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
@@ -586,13 +557,18 @@
return pv;
}
+/*
+ * pmap_free_pv:
+ *
+ * Free a pv_entry.
+ */
void
pmap_free_pv(pv)
struct pv_entry *pv;
{
struct pv_page *pvp;
- pvp = (struct pv_page *)trunc_page(pv);
+ pvp = (struct pv_page *) trunc_page(pv);
switch (++pvp->pvp_pgi.pgi_nfree) {
case 1:
TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
Home |
Main Index |
Thread Index |
Old Index