Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-1-4]: src/sys/arch/sparc/sparc pullup 1.142->1.145 (pk): support ...
details: https://anonhg.NetBSD.org/src/rev/b420c40bb09d
branches: netbsd-1-4
changeset: 468886:b420c40bb09d
user: perry <perry%NetBSD.org@localhost>
date: Tue Jun 22 16:42:45 1999 +0000
description:
pullup 1.142->1.145 (pk): support for hypersparc CPU modules
diffstat:
sys/arch/sparc/sparc/pmap.c | 176 ++++++++++++++++++++++++++++---------------
1 files changed, 115 insertions(+), 61 deletions(-)
diffs (truncated from 455 to 300 lines):
diff -r 417098f0bb82 -r b420c40bb09d sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c Tue Jun 22 14:37:43 1999 +0000
+++ b/sys/arch/sparc/sparc/pmap.c Tue Jun 22 16:42:45 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.141.2.1 1999/04/26 15:43:49 perry Exp $ */
+/* $NetBSD: pmap.c,v 1.141.2.2 1999/06/22 16:42:45 perry Exp $ */
/*
* Copyright (c) 1996
@@ -520,14 +520,20 @@
*/
#if defined(SUN4M)
-
-/* Macros which implement SRMMU TLB flushing/invalidation */
-
-#define tlb_flush_page(va) sta((va & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
-#define tlb_flush_segment(vreg, vseg) sta((vreg << RGSHIFT) | (vseg << SGSHIFT)\
- | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
-#define tlb_flush_context() sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
-#define tlb_flush_all() sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
+/*
+ * Macros which implement SRMMU TLB flushing/invalidation
+ */
+#define tlb_flush_page(va) \
+ sta(((vaddr_t)(va) & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0)
+
+#define tlb_flush_segment(vr, vs) \
+ sta(((vr)<<RGSHIFT) | ((vs)<<SGSHIFT) | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
+
+#define tlb_flush_region(vr) \
+ sta(((vr) << RGSHIFT) | ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
+
+#define tlb_flush_context() sta(ASI_SRMMUFP_L0, ASI_SRMMUFP, 0)
+#define tlb_flush_all() sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
static u_int VA2PA __P((caddr_t));
static u_long srmmu_bypass_read __P((u_long));
@@ -551,7 +557,8 @@
/* Try each level in turn until we find a valid pte. Otherwise panic */
pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
-(void)lda(SRMMU_SFSR, ASI_SRMMU);
+ /* Unlock fault status; required on Hypersparc modules */
+ (void)lda(SRMMU_SFSR, ASI_SRMMU);
if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
((u_int)addr & 0xfff));
@@ -621,6 +628,7 @@
#endif
}
+
/*
* Set the page table entry for va to pte. Only affects software MMU page-
* tables (the in-core pagetables read by the MMU). Ignores TLB, and
@@ -1708,10 +1716,16 @@
if (pm == pmap_kernel())
printf("mmu_pagein: kernel wants map at va 0x%x, vr %d, vs %d\n", va, vr, vs);
#endif
+#if 0
+#if defined(SUN4_MMU3L)
+printf("mmu_pagein: pm=%p, va 0x%x, vr %d, vs %d, rp=%p, segmap=%p\n", pm, va, vr, vs, rp, rp->rg_segmap);
+#endif
+#endif
/* return 0 if we have no PMEGs to load */
if (rp->rg_segmap == NULL)
return (0);
+
#if defined(SUN4_MMU3L)
if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
smeg_t smeg;
@@ -2465,8 +2479,13 @@
flags |= MR4M(tpte);
if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
- cache_flush_page(va); /* XXX: do we need this?*/
- tlb_flush_page(va); /* paranoid? */
+ /* Only do this for write-back caches? */
+ cache_flush_page(va);
+ /*
+ * VIPT caches might use the TLB when
+ * flushing, so we flush the TLB again.
+ */
+ tlb_flush_page(va);
}
/* Clear mod/ref bits from PTE and write it back */
@@ -2634,6 +2653,10 @@
if (pm->pm_ctx) {
setcontext(pm->pm_ctxnum);
cache_flush_page(pv->pv_va);
+#if defined(SUN4M)
+ if (CPU_ISSUN4M)
+ tlb_flush_page(pv->pv_va);
+#endif
}
pv = pv->pv_next;
if (pv == NULL)
@@ -2883,7 +2906,8 @@
#if defined(SUN4_MMU3L)
/* Reserve one region for temporary mappings */
- tregion = --nregion;
+ if (HASSUN4_MMU3L)
+ tregion = --nregion;
#endif
/*
@@ -4221,7 +4245,7 @@
}
/* if we're done with a region, leave it wired */
}
-#endif /* sun4m */
+#endif /* SUN4M */
/*
* Just like pmap_rmk_magic, but we have a different threshold.
* Note that this may well deserve further tuning work.
@@ -4451,9 +4475,6 @@
for (; va < endva; va += NBPG) {
int tpte;
- if (pm->pm_ctx)
- tlb_flush_page(va);
-
tpte = pte0[VA_SUN4M_VPG(va)];
if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
@@ -4484,6 +4505,9 @@
panic("pmap_rmu: too many PTEs in segment; "
"va 0x%lx; endva 0x%lx", va, endva);
#endif
+ if (pm->pm_ctx)
+ tlb_flush_page(va);
+
setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
}
@@ -4507,7 +4531,7 @@
if (--rp->rg_nsegmap == 0) {
if (pm->pm_ctx)
- tlb_flush_context(); /* Paranoia? */
+ tlb_flush_region(vr); /* Paranoia? */
setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
@@ -4515,7 +4539,7 @@
}
}
}
-#endif /* sun4m */
+#endif /* SUN4M */
/*
* Lower (make more strict) the protection on the specified
@@ -5027,7 +5051,7 @@
if (--rp->rg_nsegmap == 0) {
if (pm->pm_ctx)
- tlb_flush_context();
+ tlb_flush_region(vr);
setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
free(rp->rg_segmap, M_VMPMAP);
rp->rg_segmap = NULL;
@@ -5064,8 +5088,8 @@
vaddr_t sva, eva;
vm_prot_t prot;
{
- int va, nva, vr, vs;
- int s, ctx;
+ vaddr_t va, nva;
+ int s, ctx, vr, vs;
struct regmap *rp;
struct segmap *sp;
@@ -5077,6 +5101,13 @@
return;
}
+#ifdef DEBUG
+ if (pmapdebug & PDB_CHANGEPROT)
+ printf("pmap_protect[curpid %d, ctx %d](%lx, %lx, %x)\n",
+ curproc==NULL ? -1 : curproc->p_pid,
+ pm->pm_ctx ? pm->pm_ctxnum : -1, sva, eva, prot);
+#endif
+
write_user_windows();
ctx = getcontext4m();
s = splpmap();
@@ -5108,7 +5139,9 @@
if (sp->sg_pte == NULL)
panic("pmap_protect: no pages");
#endif
- /* pages loaded: take away write bits from MMU PTEs */
+ /*
+ * pages loaded: take away write bits from MMU PTEs
+ */
if (pm->pm_ctx)
setcontext4m(pm->pm_ctxnum);
@@ -5116,11 +5149,6 @@
for (; va < nva; va += NBPG) {
int tpte;
- if (pm->pm_ctx) {
- /* Flush TLB entry */
- tlb_flush_page(va);
- }
-
tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
/*
* Flush cache so that any existing cache
@@ -5132,6 +5160,8 @@
pmap_stats.ps_npg_prot_actual++;
if (pm->pm_ctx) {
cache_flush_page(va);
+ /* Flush TLB entry */
+ tlb_flush_page(va);
}
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
tpte & ~PPROT_WRITE);
@@ -5180,14 +5210,7 @@
rp = &pm->pm_regmap[VA_VREG(va)];
sp = &rp->rg_segmap[VA_VSEG(va)];
- ctx = getcontext4m();
- if (pm->pm_ctx) {
- /* Flush TLB entry */
- setcontext4m(pm->pm_ctxnum);
- tlb_flush_page(va);
- }
pte = sp->sg_pte[VA_SUN4M_VPG(va)];
-
if ((pte & SRMMU_PROT_MASK) == newprot) {
/* only wiring changed, and we ignore wiring */
pmap_stats.ps_useless_changeprots++;
@@ -5200,18 +5223,24 @@
* Flush cache if page has been referenced to
* avoid stale protection bits in the cache tags.
*/
+
+ ctx = getcontext4m();
+ setcontext4m(pm->pm_ctxnum);
if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
(SRMMU_PG_C|PG_SUN4M_OBMEM))
cache_flush_page(va);
+
+ tlb_flush_page(va);
+ setcontext4m(ctx);
}
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
(pte & ~SRMMU_PROT_MASK) | newprot);
+
out:
- setcontext4m(ctx);
splx(s);
}
-#endif /* 4m */
+#endif /* SUN4M */
/*
* Insert (MI) physical page pa at virtual address va in the given pmap.
@@ -5465,6 +5494,26 @@
rp->rg_nsegmap = 0;
for (i = NSEGRG; --i >= 0;)
sp++->sg_pmeg = seginval;
+
+#if defined(SUN4_MMU3L)
+/*
+ * XXX - preallocate the region MMU cookies.
+ * XXX - Doing this keeps the machine running for a while
+ * XXX - Remove or alter this after dealing with the bugs...
+ */
+ if (HASSUN4_MMU3L) {
+ vaddr_t tva;
+ rp->rg_smeg = region_alloc(®ion_lru, pm, vr)->me_cookie;
+ setregmap(va, rp->rg_smeg);
+
+ tva = VA_ROUNDDOWNTOREG(va);
+ for (i = 0; i < NSEGRG; i++) {
+ setsegmap(tva, seginval);
+ tva += NBPSG;
+ };
+ }
+/* XXX - end of work-around */
+#endif
}
sp = &rp->rg_segmap[vs];
@@ -5613,8 +5662,11 @@
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
- printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
- pm, va, pa, prot, wired);
+ printf("pmap_enter[curpid %d, ctx %d]"
+ "(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
Home |
Main Index |
Thread Index |
Old Index