Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc/sparc implement pmap_k{enter_pa, remove}() cor...
details: https://anonhg.NetBSD.org/src/rev/0f4f4ebe7146
branches: trunk
changeset: 512093:0f4f4ebe7146
user: chs <chs%NetBSD.org@localhost>
date: Thu Jul 05 07:05:02 2001 +0000
description:
implement pmap_k{enter_pa,remove}() correctly.
other cleanup in preparation for upcoming UVM changes.
diffstat:
sys/arch/sparc/sparc/pmap.c | 545 ++++++++++++++++++++++++++-----------------
1 files changed, 333 insertions(+), 212 deletions(-)
diffs (truncated from 996 to 300 lines):
diff -r 189c560bba88 -r 0f4f4ebe7146 sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c Thu Jul 05 06:37:58 2001 +0000
+++ b/sys/arch/sparc/sparc/pmap.c Thu Jul 05 07:05:02 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.193 2001/06/03 04:03:28 mrg Exp $ */
+/* $NetBSD: pmap.c,v 1.194 2001/07/05 07:05:02 chs Exp $ */
/*
* Copyright (c) 1996
@@ -659,9 +659,8 @@
pcache_flush_page(pa, 1);
/* Map the page */
- pmap_enter(pmap_kernel(), va, pa | (cacheit ? 0 : PMAP_NC),
- VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC),
+ VM_PROT_READ | VM_PROT_WRITE);
pmap_update();
return ((void *)va);
@@ -673,7 +672,16 @@
unsigned long sz;
int mtype;
{
- uvm_km_free(kernel_map, (vaddr_t)v, sz);
+ vaddr_t va;
+ paddr_t pa;
+ boolean_t rv;
+
+ va = (vaddr_t)v;
+ rv = pmap_extract(pmap_kernel(), va, &pa);
+ KASSERT(rv);
+ uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+ pmap_kremove(va, sz);
+ uvm_km_free(kernel_map, va, sz);
}
#endif /* 4m only */
@@ -1268,10 +1276,6 @@
panic("me_alloc: all pmegs gone");
pm = me->me_pmap;
- if (pm == NULL)
- panic("me_alloc: LRU entry has no pmap");
- if (pm == pmap_kernel())
- panic("me_alloc: stealing from kernel");
#ifdef DEBUG
if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
@@ -1292,12 +1296,8 @@
#endif
rp = &pm->pm_regmap[me->me_vreg];
- if (rp->rg_segmap == NULL)
- panic("me_alloc: LRU entry's pmap has no segments");
sp = &rp->rg_segmap[me->me_vseg];
pte = sp->sg_pte;
- if (pte == NULL)
- panic("me_alloc: LRU entry's pmap has no ptes");
/*
* The PMEG must be mapped into some context so that we can
@@ -1354,7 +1354,7 @@
/* off old pmap chain */
TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
simple_unlock(&pm->pm_lock);
- setcontext4(ctx); /* done with old context */
+ setcontext4(ctx);
/* onto new pmap chain; new pmap is already locked, if needed */
TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
@@ -1684,7 +1684,7 @@
int s, cnum, i, doflush;
struct regmap *rp;
int gap_start, gap_end;
- unsigned long va;
+ vaddr_t va;
/*XXX-GCC!*/gap_start=gap_end=0;
#ifdef DEBUG
@@ -1873,15 +1873,12 @@
union ctxinfo *c;
int newc, oldc;
- if ((c = pm->pm_ctx) == NULL)
- panic("ctx_free");
+ c = pm->pm_ctx;
pm->pm_ctx = NULL;
oldc = getcontext();
-
if (CACHEINFO.c_vactype != VAC_NONE) {
/* Do any cache flush needed on context switch */
(*cpuinfo.pure_vcache_flush)();
-
newc = pm->pm_ctxnum;
CHANGE_CONTEXTS(oldc, newc);
cache_flush_context();
@@ -1945,7 +1942,6 @@
struct segmap *sp;
write_user_windows(); /* paranoid? */
-
s = splvm(); /* paranoid? */
if (pv0->pv_pmap == NULL) {
splx(s);
@@ -1958,16 +1954,11 @@
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
- if (rp->rg_segmap == NULL)
- panic("pv_changepte: no segments");
-
sp = &rp->rg_segmap[vs];
pte = sp->sg_pte;
if (sp->sg_pmeg == seginval) {
/* not in hardware: just fix software copy */
- if (pte == NULL)
- panic("pv_changepte: pte == NULL");
pte += VA_VPG(va);
*pte = (*pte | bis) & ~bic;
} else {
@@ -1975,14 +1966,6 @@
/* in hardware: fix hardware copy */
if (CTX_USABLE(pm,rp)) {
- /*
- * Bizarreness: we never clear PG_W on
- * pager pages.
- */
- if (bic == PG_W &&
- va >= uvm.pager_sva && va < uvm.pager_eva)
- continue;
-
setcontext4(pm->pm_ctxnum);
/* XXX should flush only when necessary */
tpte = getpte4(va);
@@ -2031,8 +2014,6 @@
struct regmap *rp;
struct segmap *sp;
- write_user_windows(); /* paranoid? */
-
s = splvm(); /* paranoid? */
if (pv0->pv_pmap == NULL) { /* paranoid */
splx(s);
@@ -2046,13 +2027,9 @@
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
- if (rp->rg_segmap == NULL)
- panic("pv_syncflags: no segments");
sp = &rp->rg_segmap[vs];
-
if ((pmeg = sp->sg_pmeg) == seginval)
continue;
-
if (CTX_USABLE(pm,rp)) {
setcontext4(pm->pm_ctxnum);
/* XXX should flush only when necessary */
@@ -2099,10 +2076,6 @@
{
struct pvlist *npv;
-#ifdef DIAGNOSTIC
- if (pv->pv_pmap == NULL)
- panic("pv_unlink0");
-#endif
/*
* First entry is special (sigh).
*/
@@ -2135,8 +2108,6 @@
for (prev = pv;; prev = npv, npv = npv->pv_next) {
pmap_stats.ps_unlink_pvsearch++;
- if (npv == NULL)
- panic("pv_unlink");
if (npv->pv_pmap == pm && npv->pv_va == va)
break;
}
@@ -2238,10 +2209,6 @@
* Walk the given pv list, and for each PTE, set or clear some bits
* (e.g., PG_W or PG_NC).
*
- * As a special case, this never clears PG_W on `pager' pages.
- * These, being kernel addresses, are always in hardware and have
- * a context.
- *
* This routine flushes the cache for any page whose PTE changes,
* as long as the process has a context; this is overly conservative.
* It also copies ref and mod bits to the pvlist, on the theory that
@@ -2260,7 +2227,6 @@
struct segmap *sp;
write_user_windows(); /* paranoid? */
-
s = splvm(); /* paranoid? */
if (pv0->pv_pmap == NULL) {
splx(s);
@@ -2273,20 +2239,9 @@
va = pv->pv_va;
vr = VA_VREG(va);
rp = &pm->pm_regmap[vr];
- if (rp->rg_segmap == NULL)
- panic("pv_changepte: no segments");
-
sp = &rp->rg_segmap[VA_VSEG(va)];
if (pm->pm_ctx) {
- /*
- * Bizarreness: we never clear PG_W on
- * pager pages.
- */
- if ((bic & PPROT_WRITE) &&
- va >= uvm.pager_sva && va < uvm.pager_eva)
- continue;
-
setcontext4m(pm->pm_ctxnum);
/*
@@ -2298,15 +2253,10 @@
/* Flush TLB so memory copy is up-to-date */
tlb_flush_page(va);
-
}
tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
- if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
- printf("pv_changepte: invalid PTE for 0x%x\n", va);
- continue;
- }
-
+ KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE);
pv0->pv_flags |= MR4M(tpte);
tpte = (tpte | bis) & ~bic;
setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
@@ -2331,9 +2281,9 @@
int ctx, s;
struct regmap *rp;
struct segmap *sp;
+ boolean_t doflush;
write_user_windows(); /* paranoid? */
-
s = splvm(); /* paranoid? */
if (pv0->pv_pmap == NULL) { /* paranoid */
splx(s);
@@ -2347,41 +2297,46 @@
vr = VA_VREG(va);
vs = VA_VSEG(va);
rp = &pm->pm_regmap[vr];
- if (rp->rg_segmap == NULL)
- panic("pv_syncflags: no segments");
sp = &rp->rg_segmap[vs];
-
- if (sp->sg_pte == NULL) /* invalid */
+ if (sp->sg_pte == NULL) {
continue;
+ }
/*
* We need the PTE from memory as the TLB version will
* always have the SRMMU_PG_R bit on.
*/
+
if (pm->pm_ctx) {
setcontext4m(pm->pm_ctxnum);
tlb_flush_page(va);
}
tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
-
if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
(tpte & (SRMMU_PG_M|SRMMU_PG_R))) { /* and mod/refd */
-
flags |= MR4M(tpte);
- if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
+ /*
+ * Clear mod/ref bits from PTE and write it back.
+ * We must do this before flushing the cache to
+ * avoid races with another cpu setting the M bit
+ * and creating dirty cache lines again.
+ */
+
+ doflush = pm->pm_ctx && (tpte & SRMMU_PG_M);
+ tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
+ setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
+ if (doflush) {
+
/* Only do this for write-back caches? */
cache_flush_page(va);
+
/*
* VIPT caches might use the TLB when
* flushing, so we flush the TLB again.
*/
tlb_flush_page(va);
}
Home |
Main Index |
Thread Index |
Old Index