Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc/sparc remove schedintr_4m(), and go back to o...
details: https://anonhg.NetBSD.org/src/rev/f98ebc3bf213
branches: trunk
changeset: 792854:f98ebc3bf213
user: mrg <mrg%NetBSD.org@localhost>
date: Sun Jan 19 00:22:33 2014 +0000
description:
remove schedintr_4m(), and go back to only having the level 14
timer call statclock/schedclock. the randomness for this clock
makes the hardclock() call very jittery.
instead of this, trigger a softintr IPI at level 10 on non-primary
CPUs from the primary CPU's clock interrupt (at level 10.) this
cleans up the generic sparc timer code a little as well.
this makes the time goes backwards problem *much* less frequent,
but i still see it sometimes.
diffstat:
sys/arch/sparc/sparc/timer.c | 20 ++-----
sys/arch/sparc/sparc/timer_sun4m.c | 97 ++++++++++++++++++++++++++-----------
sys/arch/sparc/sparc/timervar.h | 5 +-
3 files changed, 75 insertions(+), 47 deletions(-)
diffs (279 lines):
diff -r 893cba33dc62 -r f98ebc3bf213 sys/arch/sparc/sparc/timer.c
--- a/sys/arch/sparc/sparc/timer.c Sat Jan 18 21:27:11 2014 +0000
+++ b/sys/arch/sparc/sparc/timer.c Sun Jan 19 00:22:33 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: timer.c,v 1.31 2013/10/19 19:40:23 mrg Exp $ */
+/* $NetBSD: timer.c,v 1.32 2014/01/19 00:22:33 mrg Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.31 2013/10/19 19:40:23 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.32 2014/01/19 00:22:33 mrg Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -87,7 +87,7 @@
static struct counter {
volatile u_int *cntreg; /* counter register */
u_int limit; /* limit we count up to */
- u_int offset; /* accumulated offet due to wraps */
+ u_int offset; /* accumulated offset due to wraps */
u_int shift; /* scaling for valid bits */
u_int mask; /* valid bit mask */
} cntr;
@@ -117,12 +117,11 @@
u_int c, res, r;
int s;
-
s = splhigh();
res = c = *ctr->cntreg;
- res &= ~TMR_LIMIT;
+ res &= ~TMR_LIMIT;
if (c != res) {
r = ctr->limit;
@@ -155,7 +154,6 @@
timerattach(volatile int *cntreg, volatile int *limreg)
{
u_int prec = 0, t0;
- void (*sched_intr_fn)(void *);
/*
* Calibrate delay() by tweaking the magic constant
@@ -198,11 +196,11 @@
printf(": delay constant %d, frequency = %" PRIu64 " Hz\n",
timerblurb, counter_timecounter.tc_frequency);
+printf("timer: limit %u shift %u mask %x\n", cntr.limit, cntr.shift, cntr.mask);
#if defined(SUN4) || defined(SUN4C)
if (CPU_ISSUN4 || CPU_ISSUN4C) {
timer_init = timer_init_4;
- sched_intr_fn = schedintr;
level10.ih_fun = clockintr_4;
level14.ih_fun = statintr_4;
cntr.limit = tmr_ustolim(tick);
@@ -211,12 +209,6 @@
#if defined(SUN4M)
if (CPU_ISSUN4M) {
timer_init = timer_init_4m;
-#if defined(MULTIPROCESSOR)
- if (sparc_ncpus > 1)
- sched_intr_fn = schedintr_4m;
- else
-#endif
- sched_intr_fn = schedintr;
level10.ih_fun = clockintr_4m;
level14.ih_fun = statintr_4m;
cntr.limit = tmr_ustolim4m(tick);
@@ -227,7 +219,7 @@
intr_establish(14, 0, &level14, NULL, true);
/* Establish a soft interrupt at a lower level for schedclock */
- sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL);
+ sched_cookie = sparc_softintr_establish(IPL_SCHED, schedintr, NULL);
if (sched_cookie == NULL)
panic("timerattach: cannot establish schedintr");
diff -r 893cba33dc62 -r f98ebc3bf213 sys/arch/sparc/sparc/timer_sun4m.c
--- a/sys/arch/sparc/sparc/timer_sun4m.c Sat Jan 18 21:27:11 2014 +0000
+++ b/sys/arch/sparc/sparc/timer_sun4m.c Sun Jan 19 00:22:33 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: timer_sun4m.c,v 1.29 2013/11/16 23:54:01 mrg Exp $ */
+/* $NetBSD: timer_sun4m.c,v 1.30 2014/01/19 00:22:33 mrg Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -58,7 +58,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.29 2013/11/16 23:54:01 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.30 2014/01/19 00:22:33 mrg Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -78,6 +78,11 @@
#define counterreg4m cpuinfo.counterreg_4m
/*
+ * SMP hardclock handler.
+ */
+#define IPL_HARDCLOCK 10
+
+/*
* Set up the real-time and statistics clocks.
* Leave stathz 0 only if no alternative timer is available.
*
@@ -96,31 +101,45 @@
icr_si_bic(SINTR_T);
}
-void
-schedintr_4m(void *v)
-{
-
- kpreempt_disable();
#ifdef MULTIPROCESSOR
- /*
- * We call hardclock() here so that we make sure it is called on
- * all CPUs. This function ends up being called on sun4m systems
- * every tick.
- */
- if (!CPU_IS_PRIMARY(curcpu()))
- hardclock(v);
+/*
+ * Handle SMP hardclock() calling for this CPU.
+ */
+static void
+hardclock_ipi(void *cap)
+{
+ int s = splsched();
+
+ hardclock((struct clockframe *)cap);
+ splx(s);
+}
+#endif
- /*
- * The factor 8 is only valid for stathz==100.
- * See also clock.c
- */
- if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0)
+/*
+ * Call hardclock on all CPUs.
+ */
+static void
+handle_hardclock(struct clockframe *cap)
+{
+ int s;
+#ifdef MULTIPROCESSOR
+ struct cpu_info *cpi;
+ CPU_INFO_ITERATOR n;
+
+ for (CPU_INFO_FOREACH(n, cpi)) {
+ if (cpi == cpuinfo.ci_self) {
+ KASSERT(CPU_IS_PRIMARY(cpi));
+ continue;
+ }
+
+ raise_ipi(cpi, IPL_HARDCLOCK);
+ }
#endif
- schedclock(curlwp);
- kpreempt_enable();
+ s = splsched();
+ hardclock(cap);
+ splx(s);
}
-
/*
* Level 10 (clock) interrupts from system counter.
*/
@@ -128,7 +147,6 @@
clockintr_4m(void *cap)
{
- KASSERT(CPU_IS_PRIMARY(curcpu()));
/*
* XXX this needs to be fixed in a more general way
* problem is that the kernel enables interrupts and THEN
@@ -146,7 +164,14 @@
/* read the limit register to clear the interrupt */
*((volatile int *)&timerreg4m->t_limit);
tickle_tc();
- hardclock((struct clockframe *)cap);
+
+ /*
+ * We don't have a system-clock per-cpu, and we'd like to keep
+ * the per-cpu timer for the statclock, so, send an IPI to
+ * everyone to call hardclock.
+ */
+ handle_hardclock(cap);
+
kpreempt_enable();
return (1);
}
@@ -183,23 +208,19 @@
* The factor 8 is only valid for stathz==100.
* See also clock.c
*/
-#if !defined(MULTIPROCESSOR)
if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) {
-#endif
if (CLKF_LOPRI(frame, IPL_SCHED)) {
/* No need to schedule a soft interrupt */
spllowerschedclock();
- schedintr_4m(cap);
+ schedintr(cap);
} else {
/*
* We're interrupting a thread that may have the
- * scheduler lock; run schedintr_4m() on this CPU later.
+ * scheduler lock; run schedintr() on this CPU later.
*/
raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */
}
-#if !defined(MULTIPROCESSOR)
}
-#endif
kpreempt_enable();
return (1);
@@ -261,6 +282,22 @@
cpi->counterreg_4m = (struct counter_4m *)bh;
}
+#if defined(MULTIPROCESSOR)
+ if (sparc_ncpus > 1) {
+ /*
+ * Note that we don't actually use this cookie after checking
+ * it was establised, we call directly via raise_ipi() on
+ * IPL_HARDCLOCK.
+ */
+ void *hardclock_cookie;
+
+ hardclock_cookie = sparc_softintr_establish(IPL_HARDCLOCK,
+ hardclock_ipi, NULL);
+ if (hardclock_cookie == NULL)
+ panic("timerattach: cannot establish hardclock_intr");
+ }
+#endif
+
/* Put processor counter in "timer" mode */
timerreg4m->t_cfg = 0;
diff -r 893cba33dc62 -r f98ebc3bf213 sys/arch/sparc/sparc/timervar.h
--- a/sys/arch/sparc/sparc/timervar.h Sat Jan 18 21:27:11 2014 +0000
+++ b/sys/arch/sparc/sparc/timervar.h Sun Jan 19 00:22:33 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: timervar.h,v 1.11 2012/07/29 00:04:05 matt Exp $ */
+/* $NetBSD: timervar.h,v 1.12 2014/01/19 00:22:33 mrg Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -50,7 +50,6 @@
#endif /* SUN4 || SUN4C */
#if defined(SUN4M)
-void schedintr_4m(void *);
int clockintr_4m(void *);
int statintr_4m(void *);
void timer_init_4m(void);
@@ -66,7 +65,7 @@
/* Common timer attach routine in timer.c: */
void timerattach(volatile int *, volatile int *);
-extern void *sched_cookie; /* for schedclock() interrupts */
+extern void *sched_cookie; /* for schedclock() interrupts */
static inline u_long __attribute__((__unused__))
new_interval(void)
Home |
Main Index |
Thread Index |
Old Index