Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/vax Use pmap_steal_memory() for early memory alloca...
details: https://anonhg.NetBSD.org/src/rev/e2f1ba7d761b
branches: trunk
changeset: 474187:e2f1ba7d761b
user: ragge <ragge%NetBSD.org@localhost>
date: Wed Jun 30 19:31:33 1999 +0000
description:
Use pmap_steal_memory() for early memory allocation.
diffstat:
sys/arch/vax/include/pmap.h | 4 +-
sys/arch/vax/vax/pmap.c | 71 ++++++++++++++++++++++++++++++--------------
2 files changed, 51 insertions(+), 24 deletions(-)
diffs (133 lines):
diff -r c3183c7779c4 -r e2f1ba7d761b sys/arch/vax/include/pmap.h
--- a/sys/arch/vax/include/pmap.h Wed Jun 30 18:48:06 1999 +0000
+++ b/sys/arch/vax/include/pmap.h Wed Jun 30 19:31:33 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.35 1999/06/17 19:23:21 thorpej Exp $ */
+/* $NetBSD: pmap.h,v 1.36 1999/06/30 19:31:34 ragge Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -108,6 +108,8 @@
#define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
#define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
+#define PMAP_STEAL_MEMORY
+
/* Routines that are best to define as macros */
#define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
#define pmap_unwire(pmap, v) /* no need */
diff -r c3183c7779c4 -r e2f1ba7d761b sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c Wed Jun 30 18:48:06 1999 +0000
+++ b/sys/arch/vax/vax/pmap.c Wed Jun 30 19:31:33 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.66 1999/06/06 19:09:50 ragge Exp $ */
+/* $NetBSD: pmap.c,v 1.67 1999/06/30 19:31:33 ragge Exp $ */
/*
* Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -124,25 +124,6 @@
*/
#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
-#ifdef notyet
-#define vax_btoc(x) (((unsigned)(x) + VAX_PGOFSET) >> VAX_PGSHIFT)
- /* all physical memory */
- sysptsize = vax_btoc(avail_end);
- /* reverse mapping struct is in phys memory already */
- /* user page table */
- sysptsize += vax_btoc(USRPTSIZE * sizeof(struct pte) * maxproc);
- /* kernel malloc area */
- sysptsize += vax_btoc(NKMEMCLUSTERS * CLSIZE);
- /* Different table etc... called again in machdep */
- physmem = btoc(avail_end); /* XXX in machdep also */
- sysptsize += vax_btoc((int) allocsys((caddr_t) 0));
- /* buffer pool (buffer_map) */
- sysptsize += vax_btoc(MAXBSIZE * nbuf);
- /* exec argument submap */
- sysptsize += vax_btoc(16 * NCARGS);
- /* phys_map - XXX This submap should be nuked */
- sysptsize += vax_btoc(VM_PHYS_SIZE);
-#else
/* Kernel alloc area */
sysptsize = (((0x100000 * maxproc) >> VAX_PGSHIFT) / 4);
/* reverse mapping struct */
@@ -153,7 +134,6 @@
sysptsize += UPAGES * maxproc;
/* IO device register space */
sysptsize += IOSPSZ;
-#endif
/*
* Virtual_* and avail_* is used for mapping of system page table.
@@ -267,6 +247,46 @@
mtpr(1, PR_MAPEN);
}
+#ifdef PMAP_STEAL_MEMORY
+/*
+ * Let the VM system do early memory allocation from the direct-mapped
+ * physical memory instead.
+ */
+vaddr_t
+pmap_steal_memory(size, vstartp, vendp)
+ vsize_t size;
+ vaddr_t *vstartp, *vendp;
+{
+ vaddr_t v;
+ int npgs;
+
+#ifdef PMAPDEBUG
+ if (startpmapdebug)
+ printf("pmap_steal_memory: size 0x%lx start %p end %p\n",
+ size, vstartp, vendp);
+#endif
+ size = round_page(size);
+ npgs = btoc(size);
+
+ /*
+ * A vax only have one segment of memory.
+ */
+#ifdef DIAGNOSTIC
+ if (vm_physmem[0].pgs)
+ panic("pmap_steal_memory: called _after_ bootstrap");
+#endif
+
+ v = (vm_physmem[0].avail_start << PGSHIFT) | KERNBASE;
+ vm_physmem[0].avail_start += npgs;
+ vm_physmem[0].start += npgs;
+ if (vstartp)
+ *vstartp = virtual_avail;
+ if (vendp)
+ *vendp = virtual_end;
+ bzero((caddr_t)v, size);
+ return v;
+}
+#else
/*
* How much virtual space does this kernel have?
* (After mapping kernel text, data, etc.)
@@ -279,6 +299,7 @@
*v_start = virtual_avail;
*v_end = virtual_end;
}
+#endif
/*
* pmap_init() is called as part of vm init after memory management
@@ -605,10 +626,14 @@
oldpte &= PG_FRAME;
if ((newpte & PG_FRAME) != oldpte) {
- /* Mapped before? Remove it then. */
- if (oldpte)
+ /*
+ * Mapped before? Remove it then.
+ * This can be done more efficient than pmap_page_protect().
+ */
+ if (oldpte) {
pmap_page_protect(PHYS_TO_VM_PAGE((oldpte
<< VAX_PGSHIFT)), 0);
+ }
s = splimp();
if (pv->pv_pte == 0) {
Home |
Main Index |
Thread Index |
Old Index