Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc/sparc Pass the CPU context to all TLB flush r...
details: https://anonhg.NetBSD.org/src/rev/0336c51ef28b
branches: trunk
changeset: 541035:0336c51ef28b
user: pk <pk%NetBSD.org@localhost>
date: Tue Dec 31 15:23:29 2002 +0000
description:
Pass the CPU context to all TLB flush routines. Because of this (and the
fact that cache flushes are also passed the context number), most
"long-term" context switches can be eliminated from the SRMMU versions
of the pmap functions.
diffstat:
sys/arch/sparc/sparc/pmap.c | 390 +++++++++++++++++++++----------------------
1 files changed, 189 insertions(+), 201 deletions(-)
diffs (truncated from 1021 to 300 lines):
diff -r c489c6ddd11c -r 0336c51ef28b sys/arch/sparc/sparc/pmap.c
--- a/sys/arch/sparc/sparc/pmap.c Tue Dec 31 15:10:28 2002 +0000
+++ b/sys/arch/sparc/sparc/pmap.c Tue Dec 31 15:23:29 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.216 2002/12/21 12:52:56 pk Exp $ */
+/* $NetBSD: pmap.c,v 1.217 2002/12/31 15:23:29 pk Exp $ */
/*
* Copyright (c) 1996
@@ -435,11 +435,11 @@
void setpte4m __P((vaddr_t va, int pte));
#ifdef MULTIPROCESSOR
-void setpgt4m_va __P((vaddr_t, int *, int, int));
+void setpgt4m_va __P((vaddr_t, int *, int, int, int));
#else
-#define setpgt4m_va(va, ptep, pte, pageflush) do { \
+#define setpgt4m_va(va, ptep, pte, pageflush, ctx) do { \
if ((pageflush)) \
- tlb_flush_page((va)); \
+ tlb_flush_page(va, ctx); \
setpgt4m((ptep), (pte)); \
} while (0)
#endif
@@ -531,27 +531,55 @@
*/
#if defined(SUN4M) || defined(SUN4D)
+#ifdef MULTIPROCESSOR
+/* XXX - should be optimised by hand-coding */
+#define trapoff() do { setpsr(getpsr() & ~PSR_ET); } while(0)
+#define trapon() do { setpsr(getpsr() | PSR_ET); } while(0)
+#else
+#define trapoff()
+#define trapon()
+#endif
/*
* SP versions of the tlb flush routines.
*/
-static __inline__ void sp_tlb_flush_page(int va)
+static __inline__ void sp_tlb_flush_page(int va, int ctx)
{
+ int octx = getcontext4m();
+ trapoff();
+ setcontext4m(ctx);
tlb_flush_page_real(va);
+ setcontext4m(octx);
+ trapon();
}
-static __inline__ void sp_tlb_flush_segment(int va)
+static __inline__ void sp_tlb_flush_segment(int va, int ctx)
{
+ int octx = getcontext4m();
+ trapoff();
+ setcontext4m(ctx);
tlb_flush_segment_real(va);
+ setcontext4m(octx);
+ trapon();
}
-static __inline__ void sp_tlb_flush_region(int va)
+static __inline__ void sp_tlb_flush_region(int va, int ctx)
{
+ int octx = getcontext4m();
+ trapoff();
+ setcontext4m(ctx);
tlb_flush_region_real(va);
+ setcontext4m(octx);
+ trapon();
}
-static __inline__ void sp_tlb_flush_context(void)
+static __inline__ void sp_tlb_flush_context(int ctx)
{
+ int octx = getcontext4m();
+ trapoff();
+ setcontext4m(ctx);
tlb_flush_context_real();
+ setcontext4m(octx);
+ trapon();
}
static __inline__ void sp_tlb_flush_all(void)
@@ -563,10 +591,10 @@
/*
* The SMP versions of the tlb flush routines.
*/
-static __inline__ void smp_tlb_flush_context __P((void));
-static __inline__ void smp_tlb_flush_region __P((int va));
-static __inline__ void smp_tlb_flush_segment __P((int va));
-static __inline__ void smp_tlb_flush_page __P((int va));
+static __inline__ void smp_tlb_flush_context __P((int ctx));
+static __inline__ void smp_tlb_flush_region __P((int va, int ctx));
+static __inline__ void smp_tlb_flush_segment __P((int va, int ctx));
+static __inline__ void smp_tlb_flush_page __P((int va, int ctx));
static __inline__ void smp_tlb_flush_all __P((void));
#if 0
@@ -600,38 +628,34 @@
* SMP TLB flush routines; these *must* be broadcast on sun4m systems
*/
static __inline__ void
-smp_tlb_flush_page(va)
- int va;
+smp_tlb_flush_page(int va, int ctx)
{
-
INCR_COUNT(smp_tlb_fp_cnt);
- xcall((xcall_func_t)sp_tlb_flush_page, (int)va, 0/*ctx*/, 0, 0, 0);
+ xcall((xcall_func_t)sp_tlb_flush_page, va, ctx, 0, 0, 0);
}
static __inline__ void
-smp_tlb_flush_segment(va)
- int va;
+smp_tlb_flush_segment(int va, int ctx)
{
INCR_COUNT(smp_tlb_fs_cnt);
- xcall((xcall_func_t)sp_tlb_flush_segment, va, 0/*ctx*/, 0, 0, 0);
+ xcall((xcall_func_t)sp_tlb_flush_segment, va, ctx, 0, 0, 0);
}
static __inline__ void
-smp_tlb_flush_region(va)
- int va;
+smp_tlb_flush_region(int va, int ctx)
{
INCR_COUNT(smp_tlb_fr_cnt);
- xcall((xcall_func_t)sp_tlb_flush_region, va, 0/*ctx*/, 0, 0, 0);
+ xcall((xcall_func_t)sp_tlb_flush_region, va, ctx, 0, 0, 0);
}
static __inline__ void
-smp_tlb_flush_context()
+smp_tlb_flush_context(int ctx)
{
INCR_COUNT(smp_tlb_fc_cnt);
- xcall((xcall_func_t)sp_tlb_flush_context, 0/*ctx*/, 0, 0, 0, 0);
+ xcall((xcall_func_t)sp_tlb_flush_context, ctx, 0, 0, 0, 0);
}
static __inline__ void
@@ -644,16 +668,16 @@
#endif
#if defined(MULTIPROCESSOR)
-#define tlb_flush_page(va) smp_tlb_flush_page((int)va)
-#define tlb_flush_segment(va) smp_tlb_flush_segment(va)
-#define tlb_flush_region(va) smp_tlb_flush_region(va)
-#define tlb_flush_context() smp_tlb_flush_context()
+#define tlb_flush_page(va,ctx) smp_tlb_flush_page(va,ctx)
+#define tlb_flush_segment(va,ctx) smp_tlb_flush_segment(va,ctx)
+#define tlb_flush_region(va,ctx) smp_tlb_flush_region(va,ctx)
+#define tlb_flush_context(ctx) smp_tlb_flush_context(ctx)
#define tlb_flush_all() smp_tlb_flush_all()
#else
-#define tlb_flush_page(va) sp_tlb_flush_page(va)
-#define tlb_flush_segment(va) sp_tlb_flush_segment(va)
-#define tlb_flush_region(va) sp_tlb_flush_region(va)
-#define tlb_flush_context() sp_tlb_flush_context()
+#define tlb_flush_page(va,ctx) sp_tlb_flush_page(va,ctx)
+#define tlb_flush_segment(va,ctx) sp_tlb_flush_segment(va,ctx)
+#define tlb_flush_region(va,ctx) sp_tlb_flush_region(va,ctx)
+#define tlb_flush_context(ctx) sp_tlb_flush_context(ctx)
#define tlb_flush_all() sp_tlb_flush_all()
#endif
@@ -662,15 +686,16 @@
* PTE at the same time we are. This is the procedure that is
* recommended in the SuperSPARC user's manual.
*/
-int updatepte4m __P((vaddr_t, int *, int, int));
+int updatepte4m __P((vaddr_t, int *, int, int, int));
static struct simplelock pte4m_lock = SIMPLELOCK_INITIALIZER;
int
-updatepte4m(va, pte, bic, bis)
+updatepte4m(va, pte, bic, bis, ctx)
vaddr_t va;
int *pte;
int bic;
int bis;
+ int ctx;
{
int oldval, swapval;
volatile int *vpte = (volatile int *)pte;
@@ -691,7 +716,7 @@
do {
swapval = 0;
swap(vpte, swapval);
- tlb_flush_page(va);
+ tlb_flush_page(va, ctx);
oldval |= swapval;
} while (*vpte != 0);
@@ -721,8 +746,10 @@
{
u_int pte;
- /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
- /* Try each level in turn until we find a valid pte. Otherwise panic */
+ /*
+ * We'll use that handy SRMMU flush/probe.
+ * Try each level in turn until we find a valid pte. Otherwise panic.
+ */
pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
/* Unlock fault status; required on Hypersparc modules */
@@ -765,14 +792,15 @@
#ifdef MULTIPROCESSOR
__inline void
-setpgt4m_va(va, ptep, pte, pageflush)
+setpgt4m_va(va, ptep, pte, pageflush, ctx)
vaddr_t va;
int *ptep;
int pte;
- int pageflush; /* ignored */
+ int pageflush;
+ int ctx;
{
- updatepte4m(va, ptep, 0xffffffff, pte);
+ updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0);
}
#endif
@@ -808,7 +836,7 @@
panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
(caddr_t)va, rm, sm);
#endif
- tlb_flush_page(va);
+ tlb_flush_page(va, 0);
setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
}
@@ -1904,6 +1932,7 @@
cnum, c->c_pmap);
#endif
c->c_pmap->pm_ctx = NULL;
+ c->c_pmap->pm_ctxnum = 0;
doflush = (CACHEINFO.c_vactype != VAC_NONE);
if (CPU_ISSUN4 || CPU_ISSUN4C) {
if (gap_start < c->c_pmap->pm_gap_start)
@@ -1992,13 +2021,10 @@
* XXX: Do we have to flush cache after reloading ctx tbl?
*/
- /* Do any cache flush needed on context switch */
- (*cpuinfo.pure_vcache_flush)();
-
/*
* We need to flush the cache only when stealing a context
* from another pmap. In that case it's Ok to switch the
- * context and leave it set, since it the context table
+ * context and leave it set, since the context table
* will have a valid region table entry for this context
* number.
*
@@ -2006,7 +2032,6 @@
* the context table entry with the new pmap's region.
*/
if (doflush) {
- setcontext4m(cnum);
cache_flush_context(cnum);
}
@@ -2036,51 +2061,42 @@
}
simple_unlock(&pm->pm_lock);
- /* Set context if not yet done above to flush the cache */
- if (!doflush)
- setcontext4m(cnum);
-
- tlb_flush_context(); /* remove any remnant garbage from tlb */
-#endif
+ /* And finally switch to the new context */
+ (*cpuinfo.pure_vcache_flush)();
+ setcontext4m(cnum);
+#endif /* SUN4M || SUN4D */
splx(s);
}
}
/*
- * Give away a context. Flushes cache and sets current context to 0.
+ * Give away a context. Always called in the context of proc0 (reaper)
*/
void
ctx_free(pm)
struct pmap *pm;
{
union ctxinfo *c;
- int newc, oldc;
+ int ctx;
Home |
Main Index |
Thread Index |
Old Index