Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc64/sparc64 Clean this up a little. More later...
details: https://anonhg.NetBSD.org/src/rev/557b7f0338d4
branches: trunk
changeset: 473898:557b7f0338d4
user: eeh <eeh%NetBSD.org@localhost>
date: Mon Jun 21 01:44:14 1999 +0000
description:
Clean this up a little. More later. (Gee, this really is becoming a huge
mess).
diffstat:
sys/arch/sparc64/sparc64/pmap.c | 499 ++++++++++++++++++++++++++++++---------
1 files changed, 379 insertions(+), 120 deletions(-)
diffs (truncated from 790 to 300 lines):
diff -r 137cde40ba30 -r 557b7f0338d4 sys/arch/sparc64/sparc64/pmap.c
--- a/sys/arch/sparc64/sparc64/pmap.c Mon Jun 21 01:42:36 1999 +0000
+++ b/sys/arch/sparc64/sparc64/pmap.c Mon Jun 21 01:44:14 1999 +0000
@@ -1,37 +1,32 @@
-/* $NetBSD: pmap.c,v 1.40 1999/06/17 19:23:27 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.41 1999/06/21 01:44:14 eeh Exp $ */
/* #define NO_VCACHE */ /* Don't forget the locked TLB in dostart */
-#define HWREF 1
+#define HWREF 1
/* #define BOOT_DEBUG */
/* #define BOOT1_DEBUG */
/*
*
- * Copyright (C) 1996, 1997 Eduardo Horvath.
+ * Copyright (C) 1996-1999 Eduardo Horvath.
* All rights reserved.
*
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by TooLs GmbH.
- * 4. The name of TooLs GmbH may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*
- * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "opt_ddb.h"
@@ -158,6 +153,31 @@
vaddr_t pv_va; /* virtual address for mapping */
} *pv_entry_t;
/* PV flags encoded in the low bits of the VA of the first pv_entry */
+
+/*
+ * Diatribe on ref/mod counting:
+ *
+ * First of all, ref/mod info must be non-volatile. Hence we need to keep it
+ * in the pv_entry structure for each page. (We could bypass this for the
+ * vm_page_t, but that's a long story....)
+ *
+ * This architecture has nice, fast traps with lots of space for software bits
+ * in the TTE. To accellerate ref/mod counts we make use of these features.
+ *
+ * When we map a page initially, we place a TTE in the page table. It's
+ * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really
+ * writeable we set the TLB_REAL_W bit for the trap handler.
+ *
+ * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
+ * bit in the approprate TTE in the page table. Whenever we take a protection
+ * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
+ * bits to enable writing and mark the page as modified.
+ *
+ * This means that we may have ref/mod information all over the place. The
+ * pmap routines must traverse the page tables of all pmaps with a given page
+ * and collect/clear all the ref/mod information and copy it into the pv_entry.
+ */
+
#define PV_ALIAS 0x1LL
#define PV_REF 0x2LL
#define PV_MOD 0x4LL
@@ -170,6 +190,8 @@
pv_entry_t pv_table; /* array of entries, one per page */
extern void pmap_remove_pv __P((struct pmap *pm, vaddr_t va, paddr_t pa));
+extern void pmap_enter_pv __P((struct pmap *pm, vaddr_t va, paddr_t pa));
+extern void pmap_page_cache __P((paddr_t pa, int mode));
/*
* First and last managed physical addresses. XXX only used for dumping the system.
@@ -317,7 +339,7 @@
/*
*
* A context is simply a small number that differentiates multiple mappings
- * of the same address. Contextx on the spitfire are 13 bits, but could
+ * of the same address. Contexts on the spitfire are 13 bits, but could
* be as large as 17 bits.
*
* Each context is either free or attached to a pmap.
@@ -334,7 +356,7 @@
#define pmap_get_page(p) uvm_page_physget((p));
/*
- * This is called during initppc, before the system is really initialized.
+ * This is called during bootstrap, before the system is really initialized.
*
* It's called with the start and end virtual addresses of the kernel.
* We bootstrap the pmap allocator now. We will allocate the basic
@@ -395,7 +417,8 @@
* Get hold or the message buffer.
*/
msgbufp = (struct kern_msgbuf *)MSGBUF_VA;
- msgbufsiz = NBPG /* round_page(sizeof(struct msgbuf)) */;
+/* XXXXX -- for uvmhist printing */
+ msgbufsiz = 4*NBPG /* round_page(sizeof(struct msgbuf)) */;
#ifdef BOOT_DEBUG
prom_printf("Trying to allocate msgbuf at %lx, size %lx\r\n",
(long)msgbufp, (long)msgbufsiz);
@@ -1067,6 +1090,23 @@
#endif
{
struct pmap *pm;
+
+#if defined(PMAP_NEW)
+#ifdef DEBUG
+ if (pmapdebug & (PDB_CREATE))
+ printf("pmap_create()\n");
+#endif
+#else /* ! PMAP_NEW */
+#ifdef DEBUG
+ if (pmapdebug & (PDB_CREATE))
+ printf("pmap_create(%lx)\n", size);
+#endif
+ /*
+ * Software use map does not need a pmap
+ */
+ if (size)
+ return(NULL);
+#endif /* PMAP_NEW */
pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
bzero((caddr_t)pm, sizeof *pm);
@@ -1171,10 +1211,16 @@
for (j=0; j<PTSZ; j++) {
int64_t data = ldxa(&ptbl[j], ASI_PHYS_CACHED);
if (data&TLB_V &&
- IS_VM_PHYSADDR(data&TLB_PA_MASK))
+ IS_VM_PHYSADDR(data&TLB_PA_MASK)) {
+#ifdef DEBUG
+ printf("pmap_release: pm=%p page %p still in use\n", pm,
+ ((long)i<<STSHIFT)|((long)k<<PDSHIFT)|((long)j<<PTSHIFT));
+ Debugger();
+#endif
pmap_remove_pv(pm,
((long)i<<STSHIFT)|((long)k<<PDSHIFT)|((long)j<<PTSHIFT),
data&TLB_PA_MASK);
+ }
}
vm_page_free1((vm_page_t)PHYS_TO_VM_PAGE((paddr_t)ptbl));
stxa(&pdir[k], ASI_PHYS_CACHED, NULL);
@@ -1601,18 +1647,19 @@
OF_enter();
}
#endif
- /* allocate new page table if needed */
#ifdef NOTDEF_DEBUG
if (pa>>32)
prom_printf("pmap_enter: va=%08x 64-bit pa=%x:%08x seg=%08x pte=%08x\r\n",
- va, (int)(pa>>32), (int)pa, (int)va_to_seg(va), (int)va_to_pte(va));
+ va, (int)(pa>>32), (int)pa,
+ (int)va_to_seg(va), (int)va_to_pte(va));
#endif
/*
- * If a mapping at this address already exists, remove it.
+ * XXXX If a mapping at this address already exists, remove it.
*/
if ((tte.data.data = pseg_get(pm, va))<0) {
pmap_remove(pm, va, va+NBPG-1);
- }
+ }
+
/*
* Construct the TTE.
*/
@@ -1623,7 +1670,7 @@
if (access_type & ~prot)
panic("pmap_enter: access_type exceeds prot");
#endif
- /* If we don't have the traphandler do it set the ref/mod bits now */
+ /* If we don't have the traphandler do it, set the ref/mod bits now */
if (access_type & VM_PROT_ALL)
pv->pv_va |= PV_REF;
if (access_type & VM_PROT_WRITE)
@@ -1644,16 +1691,19 @@
#ifdef DEBUG
enter_stats.ci ++;
#endif
- tte.tag.tag = TSB_TAG(0,pm->pm_ctx,va);
+/* tte.tag.tag = TSB_TAG(0,pm->pm_ctx,va); /* Not used any more. */
tte.data.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
(access_type & VM_PROT_WRITE),
(!(pa & PMAP_NC)),aliased,1,(pa & PMAP_LITTLE));
+#ifdef HWREF
if (prot & VM_PROT_WRITE) tte.data.data |= TLB_REAL_W;
+#endif
if (wired) tte.data.data |= TLB_TSB_LOCK;
ASSERT((tte.data.data & TLB_NFO) == 0);
pg = NULL;
#ifdef NOTDEF_DEBUG
- printf("pmap_enter: inserting %x:%x at %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va);
+ printf("pmap_enter: inserting %x:%x at %x\n",
+ (int)(tte.data.data>>32), (int)tte.data.data, (int)va);
#endif
while (pseg_set(pm, va, tte.data.data, pg) != NULL) {
if (pmap_initialized || !uvm_page_physget(&pg)) {
@@ -1677,7 +1727,8 @@
enter_stats.ptpneeded ++;
#endif
#ifdef NOTDEF_DEBUG
- printf("pmap_enter: inserting %x:%x at %x with %x\n", (int)(tte.data.data>>32), (int)tte.data.data, (int)va, (int)pg);
+ printf("pmap_enter: inserting %x:%x at %x with %x\n",
+ (int)(tte.data.data>>32), (int)tte.data.data, (int)va, (int)pg);
#endif
}
@@ -1720,26 +1771,19 @@
* we are only changing the protection bits.
*/
for (npv = pv; npv; npv = npv->pv_next) {
- aliased = (aliased || (pm == npv->pv_pmap && ((pv->pv_va^npv->pv_va)&VA_ALIAS_MASK)));
+ aliased = (aliased ||
+ (pm == npv->pv_pmap &&
+ ((pv->pv_va^npv->pv_va)&VA_ALIAS_MASK)));
if (pm == npv->pv_pmap && PV_MATCH(npv,va)) {
-#ifdef XXXX_DIAGNOSTIC
- unsigned entry;
+#ifdef PARANOIADIAG
+ int64_t data;
- if (!pm->pm_segtab)
- entry = kvtopte(va)->pt_entry;
- else {
- pte = pmap_segmap(pm, va);
- if (pte) {
- pte += (va >> PGSHIFT) &
- (NPTEPG - 1);
- entry = pte->pt_entry;
- } else
- entry = 0;
- }
- if (!(entry & PG_V) ||
- (entry & PG_FRAME) != pa)
- printf("pmap_enter: found va %x pa %x in pv_table but != %x\n",
- va, pa, entry);
+ data = pseg_get(pm, va);
+ if (data >= 0 ||
+ data&TLB_PA_MASK != pa)
+ printf(
+ "pmap_enter: found va %lx pa %lx in pv_table but != %lx\n",
+ va, pa, (long)data);
#endif
goto fnd;
}
@@ -1776,19 +1820,19 @@
npv->pv_va, npv->pv_pmap->pm_ctx);
#endif
/* Turn off cacheing of this TTE */
- if (pseg_set(npv->pv_pmap, va, pseg_get(npv->pv_pmap, va) & ~TLB_CV, 0)) {
+ if (pseg_set(pm, (npv->pv_va&PV_VAMASK),
+ pseg_get(pm, (npv->pv_va&PV_VAMASK))
+ & ~TLB_CV, 0)) {
printf("pmap_enter: aliased pseg empty!\n");
Debugger();
/* panic? */
}
-#if 0 /* This breaks refcounting */
- /* This may cause us to enter the same mapping twice. */
- tsb_enter(npv->pv_pmap->pm_ctx,(npv->pv_va&PV_VAMASK),
- pseg_get(npv->pv_pmap, va));
-#else
- i = ptelookup_va(va);
+ /* pmap unused? */
+ if ((!pm->pm_ctx) && pm != pmap_kernel())
+ continue;
+ i = ptelookup_va((npv->pv_va&PV_VAMASK));
if (tsb[i].tag.tag > 0 && tsb[i].tag.tag ==
- TSB_TAG(0,pm->pm_ctx,va)) {
+ TSB_TAG(0, pm->pm_ctx, (npv->pv_va&PV_VAMASK))) {
/*
* Invalidate the TSB
*
@@ -1803,11 +1847,10 @@
Home |
Main Index |
Thread Index |
Old Index