Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-1-4]: src/sys/arch/vax/vax pullup via patch (ragge): fix critical...
details: https://anonhg.NetBSD.org/src/rev/a8d2be8605f2
branches: netbsd-1-4
changeset: 469203:a8d2be8605f2
user: perry <perry%NetBSD.org@localhost>
date: Mon Jul 12 19:24:42 1999 +0000
description:
pullup via patch (ragge): fix critical paging/swapping problems
diffstat:
sys/arch/vax/vax/pmap.c | 393 ++++++++++++++++++++++++++++-------------------
1 files changed, 232 insertions(+), 161 deletions(-)
diffs (truncated from 710 to 300 lines):
diff -r 36ef44914bc8 -r a8d2be8605f2 sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c Mon Jul 12 19:23:06 1999 +0000
+++ b/sys/arch/vax/vax/pmap.c Mon Jul 12 19:24:42 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.61 1999/03/26 23:41:38 mycroft Exp $ */
+/* $NetBSD: pmap.c,v 1.61.2.1 1999/07/12 19:24:42 perry Exp $ */
/*
* Copyright (c) 1994, 1998 Ludd, University of Lule}, Sweden.
* All rights reserved.
@@ -29,10 +29,13 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_ddb.h"
+
#include <sys/types.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/malloc.h>
+#include <sys/extent.h>
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/systm.h>
@@ -69,7 +72,7 @@
#endif
#define ISTACK_SIZE NBPG
-vaddr_t istack;
+vaddr_t istack;
/*
* This code uses bitfield operators for most page table entries.
*/
@@ -85,11 +88,12 @@
struct pte *Sysmap; /* System page table */
struct pv_entry *pv_table; /* array of entries, one per LOGICAL page */
void *scratch;
-vaddr_t iospace;
+vaddr_t iospace;
vaddr_t ptemapstart, ptemapend;
-vm_map_t pte_map;
-struct vm_map pte_map_store;
+struct extent *ptemap;
+#define PTMAPSZ EXTENT_FIXED_STORAGE_SIZE(100)
+char ptmapstorage[PTMAPSZ];
extern caddr_t msgbufaddr;
@@ -102,8 +106,8 @@
#endif
void rensa __P((int, struct pte *));
-vaddr_t avail_start, avail_end;
-vaddr_t virtual_avail, virtual_end; /* Available virtual memory */
+vaddr_t avail_start, avail_end;
+vaddr_t virtual_avail, virtual_end; /* Available virtual memory */
void pmap_pinit __P((pmap_t));
void pmap_release __P((pmap_t));
@@ -130,25 +134,6 @@
*/
#define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
-#ifdef notyet
-#define vax_btoc(x) (((unsigned)(x) + VAX_PGOFSET) >> VAX_PGSHIFT)
- /* all physical memory */
- sysptsize = vax_btoc(avail_end);
- /* reverse mapping struct is in phys memory already */
- /* user page table */
- sysptsize += vax_btoc(USRPTSIZE * sizeof(struct pte) * maxproc);
- /* kernel malloc area */
- sysptsize += vax_btoc(NKMEMCLUSTERS * CLSIZE);
- /* Different table etc... called again in machdep */
- physmem = btoc(avail_end); /* XXX in machdep also */
- sysptsize += vax_btoc((int) allocsys((caddr_t) 0));
- /* buffer pool (buffer_map) */
- sysptsize += vax_btoc(MAXBSIZE * nbuf);
- /* exec argument submap */
- sysptsize += vax_btoc(16 * NCARGS);
- /* phys_map - XXX This submap should be nuked */
- sysptsize += vax_btoc(VM_PHYS_SIZE);
-#else
/* Kernel alloc area */
sysptsize = (((0x100000 * maxproc) >> VAX_PGSHIFT) / 4);
/* reverse mapping struct */
@@ -159,7 +144,6 @@
sysptsize += UPAGES * maxproc;
/* IO device register space */
sysptsize += IOSPSZ;
-#endif
/*
* Virtual_* and avail_* is used for mapping of system page table.
@@ -216,16 +200,20 @@
uvmexp.pagesize = NBPG;
uvm_setpagesize();
- /* QDSS console mapping hack */
+ /* QDSS console mapping hack */
#if NQD > 0
- /*
- * This allocates some kernel virtual address space. qdcninit
- * maps things here
- */
- MAPVIRT(qvmem[0], 64 * 1024 * NQD / VAX_NBPG);
- MAPVIRT(qd_ubaio, 16);
+ /*
+ * This allocates some kernel virtual address space. qdcninit
+ * maps things here
+ */
+ MAPVIRT(qvmem[0], 64 * 1024 * NQD / VAX_NBPG);
+ MAPVIRT(qd_ubaio, 16);
#endif
+ /* User page table map. This is big. */
+ MAPVIRT(ptemapstart, USRPTSIZE);
+ ptemapend = virtual_avail;
+
MAPVIRT(iospace, IOSPSZ); /* Device iospace mapping area */
/* Init SCB and set up stray vectors. */
@@ -245,9 +233,11 @@
printf("Sysmap %p, istack %lx, scratch %p\n",Sysmap,istack,scratch);
printf("etext %p\n", &etext);
printf("SYSPTSIZE %x\n",sysptsize);
- printf("pv_table %p, \n", pv_table);
+ printf("pv_table %p, ptemapstart %lx ptemapend %lx\n",
+ pv_table, ptemapstart, ptemapend);
printf("avail_start %lx, avail_end %lx\n",avail_start,avail_end);
- printf("virtual_avail %lx,virtual_end %lx\n",virtual_avail,virtual_end);
+ printf("virtual_avail %lx,virtual_end %lx\n",
+ virtual_avail, virtual_end);
printf("startpmapdebug %p\n",&startpmapdebug);
#endif
@@ -294,14 +284,19 @@
/*
* pmap_init() is called as part of vm init after memory management
* is enabled. It is meant to do machine-specific allocations.
- * Here we allocate virtual memory for user page tables.
+ * Here is the resource map for the user page tables inited.
*/
void
pmap_init()
{
- /* reserve place on SPT for UPT */
- pte_map = uvm_km_suballoc(kernel_map, &ptemapstart, &ptemapend,
- USRPTSIZE * 4 * maxproc, TRUE, FALSE, &pte_map_store);
+ /*
+ * Create the extent map used to manage the page table space.
+ * XXX - M_HTABLE is bogus.
+ */
+ ptemap = extent_create("ptemap", ptemapstart, ptemapend,
+ M_HTABLE, ptmapstorage, PTMAPSZ, EX_NOCOALESCE);
+ if (ptemap == NULL)
+ panic("pmap_init");
}
@@ -346,14 +341,17 @@
pmap_pinit(pmap)
pmap_t pmap;
{
- int bytesiz;
+ int bytesiz, res;
/*
* Allocate PTEs and stash them away in the pmap.
* XXX Ok to use kmem_alloc_wait() here?
*/
bytesiz = USRPTSIZE * sizeof(struct pte);
- pmap->pm_p0br = (void *)uvm_km_valloc_wait(pte_map, bytesiz);
+ res = extent_alloc(ptemap, bytesiz, 4, 0, EX_WAITSPACE|EX_WAITOK,
+ (u_long *)&pmap->pm_p0br);
+ if (res)
+ panic("pmap_pinit");
pmap->pm_p0lr = vax_btoc(MAXTSIZ + MAXDSIZ + MMAPSPACE) | AST_PCB;
(vaddr_t)pmap->pm_p1br = (vaddr_t)pmap->pm_p0br + bytesiz - 0x800000;
pmap->pm_p1lr = (0x200000 - vax_btoc(MAXSSIZ));
@@ -378,16 +376,30 @@
pmap_release(pmap)
struct pmap *pmap;
{
+ vaddr_t saddr, eaddr;
+ paddr_t paddr;
+
#ifdef PMAPDEBUG
if(startpmapdebug)printf("pmap_release: pmap %p\n",pmap);
#endif
- if (pmap->pm_p0br)
- uvm_km_free_wakeup(pte_map, (vaddr_t)pmap->pm_p0br,
- USRPTSIZE * sizeof(struct pte));
+ if (pmap->pm_p0br == 0)
+ return;
+
+ saddr = (vaddr_t)pmap->pm_p0br;
+ eaddr = saddr + USRPTSIZE * sizeof(struct pte);
+ for (; saddr < eaddr; saddr += NBPG) {
+ paddr = (kvtopte(saddr)->pg_pfn << VAX_PGSHIFT);
+ if (paddr == 0)
+ continue; /* page not mapped */
+ bzero(kvtopte(saddr), sizeof(struct pte) * 8); /* XXX */
+ uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
+ }
+ extent_free(ptemap, (u_long)pmap->pm_p0br,
+ USRPTSIZE * sizeof(struct pte), EX_WAITOK);
+ mtpr(0, PR_TBIA);
}
-
/*
* pmap_destroy(pmap): Remove a reference from the pmap.
* If the pmap is NULL then just return else decrese pm_count.
@@ -469,7 +481,7 @@
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
- paddr_t pa;
+ paddr_t pa;
vm_prot_t prot;
{
int *ptp;
@@ -477,7 +489,7 @@
ptp = (int *)kvtopte(va);
#ifdef PMAPDEBUG
if(startpmapdebug)
-printf("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n", va, pa, prot, ptp);
+ printf("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n", va, pa, prot, ptp);
#endif
ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
PG_PFNUM(pa) | PG_SREF;
@@ -501,7 +513,7 @@
#ifdef PMAPDEBUG
if(startpmapdebug)
-printf("pmap_kremove: va: %lx, len %lx, ptp %p\n", va, len, kvtopte(va));
+ printf("pmap_kremove: va: %lx, len %lx, ptp %p\n", va, len, kvtopte(va));
#endif
/*
@@ -532,7 +544,7 @@
#ifdef PMAPDEBUG
if(startpmapdebug)
-printf("pmap_kenter_pgs: va: %lx, pgs %p, npgs %x\n", va, pgs, npgs);
+ printf("pmap_kenter_pgs: va: %lx, pgs %p, npgs %x\n", va, pgs, npgs);
#endif
/*
@@ -556,71 +568,117 @@
}
#endif
-void
+/*
+ * pmap_enter() is the main routine that puts in mappings for pages, or
+ * upgrades mappings to more "rights". Note that:
+ * - "wired" isn't used. We don't loose mappings unless asked for.
+ * - "access_type" is set if the entering was caused by a fault.
+ */
+void
pmap_enter(pmap, v, p, prot, wired, access_type)
- register pmap_t pmap;
- vaddr_t v;
- paddr_t p;
- vm_prot_t prot;
- boolean_t wired;
- vm_prot_t access_type;
+ pmap_t pmap;
+ vaddr_t v;
+ paddr_t p;
+ vm_prot_t prot, access_type;
+ boolean_t wired;
{
struct pv_entry *pv, *tmp;
- int i, s, nypte, *patch;
-
+ int i, s, newpte, oldpte, *patch;
#ifdef PMAPDEBUG
-if(startpmapdebug)
-printf("pmap_enter: pmap: %p,virt %lx, phys %lx, prot %x w %x\n",
- pmap,v,p,prot, wired);
+if (startpmapdebug)
+ printf("pmap_enter: pmap %p v %lx p %lx prot %x wired %d access %x\n",
+ pmap, v, p, prot, wired, access_type);
#endif
+ /* Can this happen with UVM??? */
if (pmap == 0)
return;
- if (v < 0x40000000) {
- patch = (int *)pmap->pm_p0br;
- i = (v >> VAX_PGSHIFT);
- if (i >= (pmap->pm_p0lr & ~AST_MASK))
- panic("P0 too small in pmap_enter");
- patch = (int *)pmap->pm_p0br;
- nypte = PG_V|(p>>VAX_PGSHIFT)|(prot&VM_PROT_WRITE?PG_RW:PG_RO);
- } else if (v & KERNBASE) {
Home |
Main Index |
Thread Index |
Old Index