Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc/sparc Brush-up the generic cross-call routine...
details: https://anonhg.NetBSD.org/src/rev/d26a7a1352d3
branches: trunk
changeset: 540582:d26a7a1352d3
user: pk <pk%NetBSD.org@localhost>
date: Thu Dec 19 10:38:28 2002 +0000
description:
Brush-up the generic cross-call routine and use it to implement the SMP
cache flush ops.
Also a standard soft interrupt handler for standard cross-call notification
reserving the NMI level 15 softint for urgent cross calls.
diffstat:
sys/arch/sparc/sparc/cache.c | 123 +++---------------------------------------
sys/arch/sparc/sparc/cpu.c | 93 ++++++++++++-------------------
sys/arch/sparc/sparc/intr.c | 74 +++++++++++++++++++++----
3 files changed, 107 insertions(+), 183 deletions(-)
diffs (truncated from 544 to 300 lines):
diff -r 91c3d35a0a48 -r d26a7a1352d3 sys/arch/sparc/sparc/cache.c
--- a/sys/arch/sparc/sparc/cache.c Thu Dec 19 10:30:39 2002 +0000
+++ b/sys/arch/sparc/sparc/cache.c Thu Dec 19 10:38:28 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cache.c,v 1.66 2002/12/16 16:59:10 pk Exp $ */
+/* $NetBSD: cache.c,v 1.67 2002/12/19 10:38:28 pk Exp $ */
/*
* Copyright (c) 1996
@@ -280,7 +280,7 @@
cache_alias_bits = (cache_alias_dist - 1) & ~PGOFSET;
pcr = lda(SRMMU_PCR, ASI_SRMMU);
- pcr &= ~(CYPRESS_PCR_CE | CYPRESS_PCR_CM);
+ pcr &= ~CYPRESS_PCR_CM;
/* Now reset cache tag memory if cache not yet enabled */
ls = CACHEINFO.c_linesize;
@@ -974,28 +974,7 @@
int va;
int ctx;
{
- int n, s;
-
- cpuinfo.sp_vcache_flush_page(va, ctx);
- if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
- return;
- LOCK_XPMSG();
- for (n = 0; n < ncpu; n++) {
- struct cpu_info *cpi = cpus[n];
- struct xpmsg_flush_page *p;
-
- if (CPU_READY(cpi))
- continue;
- p = &cpi->msg.u.xpmsg_flush_page;
- s = splhigh();
- simple_lock(&cpi->msg.lock);
- cpi->msg.tag = XPMSG_VCACHE_FLUSH_PAGE;
- p->ctx = getcontext4m();
- p->va = va;
- raise_ipi_wait_and_unlock(cpi);
- splx(s);
- }
- UNLOCK_XPMSG();
+ xcall((xcall_func_t)cpuinfo.sp_vcache_flush_page, va, ctx, 0, 0, 0);
}
void
@@ -1003,29 +982,7 @@
int vr, vs;
int ctx;
{
- int n, s;
-
- cpuinfo.sp_vcache_flush_segment(vr, vs, ctx);
- if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
- return;
- LOCK_XPMSG();
- for (n = 0; n < ncpu; n++) {
- struct cpu_info *cpi = cpus[n];
- struct xpmsg_flush_segment *p;
-
- if (CPU_READY(cpi))
- continue;
- p = &cpi->msg.u.xpmsg_flush_segment;
- s = splhigh();
- simple_lock(&cpi->msg.lock);
- cpi->msg.tag = XPMSG_VCACHE_FLUSH_SEGMENT;
- p->ctx = getcontext4m();
- p->vr = vr;
- p->vs = vs;
- raise_ipi_wait_and_unlock(cpi);
- splx(s);
- }
- UNLOCK_XPMSG();
+ xcall((xcall_func_t)cpuinfo.sp_vcache_flush_segment, vr, vs, ctx, 0, 0);
}
void
@@ -1033,84 +990,22 @@
int vr;
int ctx;
{
- int n, s;
-
- cpuinfo.sp_vcache_flush_region(vr, ctx);
- if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
- return;
- LOCK_XPMSG();
- for (n = 0; n < ncpu; n++) {
- struct cpu_info *cpi = cpus[n];
- struct xpmsg_flush_region *p;
-
- if (CPU_READY(cpi))
- continue;
- p = &cpi->msg.u.xpmsg_flush_region;
- s = splhigh();
- simple_lock(&cpi->msg.lock);
- cpi->msg.tag = XPMSG_VCACHE_FLUSH_REGION;
- p->ctx = getcontext4m();
- p->vr = vr;
- raise_ipi_wait_and_unlock(cpi);
- splx(s);
- }
- UNLOCK_XPMSG();
+ xcall((xcall_func_t)cpuinfo.sp_vcache_flush_region, vr, ctx, 0, 0, 0);
}
void
smp_vcache_flush_context(ctx)
int ctx;
{
- int n, s;
-
- cpuinfo.sp_vcache_flush_context(ctx);
- if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
- return;
- LOCK_XPMSG();
- for (n = 0; n < ncpu; n++) {
- struct cpu_info *cpi = cpus[n];
- struct xpmsg_flush_context *p;
-
- if (CPU_READY(cpi))
- continue;
- p = &cpi->msg.u.xpmsg_flush_context;
- s = splhigh();
- simple_lock(&cpi->msg.lock);
- cpi->msg.tag = XPMSG_VCACHE_FLUSH_CONTEXT;
- p->ctx = ctx;
- raise_ipi_wait_and_unlock(cpi);
- splx(s);
- }
- UNLOCK_XPMSG();
+ xcall((xcall_func_t)cpuinfo.sp_vcache_flush_context, ctx, 0, 0, 0, 0);
}
void
-smp_cache_flush(va, size)
+smp_cache_flush(va, size, ctx)
caddr_t va;
u_int size;
+ int ctx;
{
- int n, s;
-
- cpuinfo.sp_cache_flush(va, size);
- if (cold || (cpuinfo.flags & CPUFLG_READY) == 0)
- return;
- LOCK_XPMSG();
- for (n = 0; n < ncpu; n++) {
- struct cpu_info *cpi = cpus[n];
- struct xpmsg_flush_range *p;
-
- if (CPU_READY(cpi))
- continue;
- p = &cpi->msg.u.xpmsg_flush_range;
- s = splhigh();
- simple_lock(&cpi->msg.lock);
- cpi->msg.tag = XPMSG_VCACHE_FLUSH_RANGE;
- p->ctx = getcontext4m();
- p->va = va;
- p->size = size;
- raise_ipi_wait_and_unlock(cpi);
- splx(s);
- }
- UNLOCK_XPMSG();
+ xcall((xcall_func_t)cpuinfo.sp_cache_flush, (int)va, (int)size, ctx, 0, 0);
}
#endif /* MULTIPROCESSOR */
diff -r 91c3d35a0a48 -r d26a7a1352d3 sys/arch/sparc/sparc/cpu.c
--- a/sys/arch/sparc/sparc/cpu.c Thu Dec 19 10:30:39 2002 +0000
+++ b/sys/arch/sparc/sparc/cpu.c Thu Dec 19 10:38:28 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.139 2002/12/16 16:59:10 pk Exp $ */
+/* $NetBSD: cpu.c,v 1.140 2002/12/19 10:38:28 pk Exp $ */
/*
* Copyright (c) 1996
@@ -75,6 +75,10 @@
#include <machine/oldmon.h>
#include <machine/idprom.h>
+#if defined(MULTIPROCESSOR) && defined(DDB)
+#include <machine/db_machdep.h>
+#endif
+
#include <sparc/sparc/cache.h>
#include <sparc/sparc/asm.h>
#include <sparc/sparc/cpuvar.h>
@@ -554,55 +558,34 @@
printf("CPU did not spin up\n");
}
-/*
- * Calls raise_ipi(), waits for the remote CPU to notice the message, and
- * unlocks this CPU's message lock, which we expect was locked at entry.
- */
-void
-raise_ipi_wait_and_unlock(cpi)
- struct cpu_info *cpi;
-{
- int i;
-
- raise_ipi(cpi);
- i = 0;
- while ((cpi->flags & CPUFLG_GOTMSG) == 0) {
- if (i++ > 500000) {
- printf("raise_ipi_wait_and_unlock(cpu%d): couldn't ping cpu%d\n",
- cpuinfo.ci_cpuid, cpi->ci_cpuid);
- break;
- }
- }
- simple_unlock(&cpi->msg.lock);
-}
-
/*
* Call a function on every CPU. One must hold xpmsg_lock around
* this function.
*/
void
-cross_call(func, arg0, arg1, arg2, arg3, cpuset)
+xcall(func, arg0, arg1, arg2, arg3, cpuset)
int (*func)(int, int, int, int);
int arg0, arg1, arg2, arg3;
int cpuset; /* XXX unused; cpus to send to: we do all */
{
- int n, i, not_done;
- struct xpmsg_func *p;
+ int s, n, i, done;
+ volatile struct xpmsg_func *p;
+
+ /* XXX - note p->retval is probably no longer useful */
/*
* If no cpus are configured yet, just call ourselves.
*/
if (cpus == NULL) {
p = &cpuinfo.msg.u.xpmsg_func;
- p->func = func;
- p->arg0 = arg0;
- p->arg1 = arg1;
- p->arg2 = arg2;
- p->arg3 = arg3;
- p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
+ if (func)
+ p->retval = (*func)(arg0, arg1, arg2, arg3);
return;
}
+ s = splvm(); /* XXX - should validate this level */
+ LOCK_XPMSG();
+
/*
* Firstly, call each CPU. We do this so that they might have
* finished by the time we start looking.
@@ -615,55 +598,47 @@
simple_lock(&cpi->msg.lock);
cpi->msg.tag = XPMSG_FUNC;
+ cpi->flags &= ~CPUFLG_GOTMSG;
p = &cpi->msg.u.xpmsg_func;
p->func = func;
p->arg0 = arg0;
p->arg1 = arg1;
p->arg2 = arg2;
p->arg3 = arg3;
- cpi->flags &= ~CPUFLG_GOTMSG;
- raise_ipi(cpi);
+ cpi->intreg_4m->pi_set = PINTR_SINTRLEV(13);/*xcall_cookie->pil*/
+ /*was: raise_ipi(cpi);*/
}
/*
* Second, call ourselves.
*/
-
p = &cpuinfo.msg.u.xpmsg_func;
-
- /* Call this on me first. */
- p->func = func;
- p->arg0 = arg0;
- p->arg1 = arg1;
- p->arg2 = arg2;
- p->arg3 = arg3;
-
- p->retval = (*p->func)(p->arg0, p->arg1, p->arg2, p->arg3);
+ if (func)
+ p->retval = (*func)(arg0, arg1, arg2, arg3);
/*
* Lastly, start looping, waiting for all cpu's to register that they
* have completed (bailing if it takes "too long", being loud about
* this in the process).
*/
- i = 0;
- while (not_done) {
- not_done = 0;
+ done = 0;
+ i = 100000; /* time-out */
+ while (!done) {
Home |
Main Index |
Thread Index |
Old Index