Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Change tsleep() to ltsleep(), which takes an interlock a...
details: https://anonhg.NetBSD.org/src/rev/b23ca15068fd
branches: trunk
changeset: 487462:b23ca15068fd
user: thorpej <thorpej%NetBSD.org@localhost>
date: Thu Jun 08 05:50:37 2000 +0000
description:
Change tsleep() to ltsleep(), which takes an interlock argument. The
interlock is released once the scheduler is locked, so that a race
between a sleeper and an awakener is prevented in a multiprocessor
environment. Provide a tsleep() macro that provides the old API.
diffstat:
sys/kern/kern_synch.c | 176 ++++++++++++++++++++++++++++++++++---------------
sys/sys/param.h | 8 +-
sys/sys/proc.h | 12 ++-
3 files changed, 136 insertions(+), 60 deletions(-)
diffs (truncated from 496 to 300 lines):
diff -r 992f7e032ff7 -r b23ca15068fd sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c Thu Jun 08 04:47:13 2000 +0000
+++ b/sys/kern/kern_synch.c Thu Jun 08 05:50:37 2000 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_synch.c,v 1.76 2000/05/31 05:02:33 thorpej Exp $ */
+/* $NetBSD: kern_synch.c,v 1.77 2000/06/08 05:50:37 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@@ -108,12 +108,12 @@
__volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
-void roundrobin __P((void *));
-void schedcpu __P((void *));
-void updatepri __P((struct proc *));
-void endtsleep __P((void *));
+void roundrobin(void *);
+void schedcpu(void *);
+void updatepri(struct proc *);
+void endtsleep(void *);
-__inline void awaken __P((struct proc *));
+__inline void awaken(struct proc *);
struct callout roundrobin_ch = CALLOUT_INITIALIZER;
struct callout schedcpu_ch = CALLOUT_INITIALIZER;
@@ -123,8 +123,7 @@
*/
/* ARGSUSED */
void
-roundrobin(arg)
- void *arg;
+roundrobin(void *arg)
{
struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
int s;
@@ -236,8 +235,7 @@
*/
/* ARGSUSED */
void
-schedcpu(arg)
- void *arg;
+schedcpu(void *arg)
{
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
struct proc *p;
@@ -304,8 +302,7 @@
* least six times the loadfactor will decay p_estcpu to zero.
*/
void
-updatepri(p)
- struct proc *p;
+updatepri(struct proc *p)
{
unsigned int newcpu = p->p_estcpu;
fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
@@ -342,18 +339,30 @@
* signal needs to be delivered, ERESTART is returned if the current system
* call should be restarted if possible, and EINTR is returned if the system
* call should be interrupted by the signal (return EINTR).
+ *
+ * The interlock is held until the scheduler_slock is held. The
+ * interlock will be locked before returning back to the caller
+ * unless the PNORELOCK flag is specified, in which case the
+ * interlock will always be unlocked upon return.
*/
int
-tsleep(ident, priority, wmesg, timo)
- void *ident;
- int priority, timo;
- const char *wmesg;
+ltsleep(void *ident, int priority, const char *wmesg, int timo,
+ __volatile struct simplelock *interlock)
{
struct proc *p = curproc;
struct slpque *qp;
- int s;
- int sig, catch = priority & PCATCH;
+ int sig, s;
+ int catch = priority & PCATCH;
+ int relock = (priority & PNORELOCK) == 0;
+#if 0 /* XXXSMP */
+ int dobiglock = (p->p_flags & P_BIGLOCK) != 0;
+#endif
+ /*
+ * XXXSMP
+ * This is probably bogus. Figure out what the right
+ * thing to do here really is.
+ */
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration,
@@ -364,6 +373,8 @@
s = splhigh();
splx(safepri);
splx(s);
+ if (interlock != NULL && relock == 0)
+ simple_unlock(interlock);
return (0);
}
@@ -371,28 +382,45 @@
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 1, 0);
#endif
- s = splhigh();
+
+ s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
#ifdef DIAGNOSTIC
if (ident == NULL)
- panic("tsleep: ident == NULL");
+ panic("ltsleep: ident == NULL");
if (p->p_stat != SONPROC)
- panic("tsleep: p_stat %d != SONPROC", p->p_stat);
+ panic("ltsleep: p_stat %d != SONPROC", p->p_stat);
if (p->p_back != NULL)
- panic("tsleep: p_back != NULL");
+ panic("ltsleep: p_back != NULL");
#endif
+
p->p_wchan = ident;
p->p_wmesg = wmesg;
p->p_slptime = 0;
p->p_priority = priority & PRIMASK;
+
qp = SLPQUE(ident);
if (qp->sq_head == 0)
qp->sq_head = p;
else
*qp->sq_tailp = p;
*(qp->sq_tailp = &p->p_forw) = 0;
+
if (timo)
callout_reset(&p->p_tsleep_ch, timo, endtsleep, p);
+
+ /*
+ * We can now release the interlock; the scheduler_slock
+ * is held, so a thread can't get in to do wakeup() before
+ * we do the switch.
+ *
+ * XXX We leave the code block here, after inserting ourselves
+ * on the sleep queue, because we might want a more clever
+ * data structure for the sleep queues at some point.
+ */
+ if (interlock != NULL)
+ simple_unlock(interlock);
+
/*
* We put ourselves on the sleep queue and start our timeout
* before calling CURSIG, as we could stop there, and a wakeup
@@ -405,29 +433,65 @@
if (catch) {
p->p_flag |= P_SINTR;
if ((sig = CURSIG(p)) != 0) {
- if (p->p_wchan)
+ if (p->p_wchan != NULL)
unsleep(p);
p->p_stat = SONPROC;
+#if 0 /* XXXSMP */
+ /*
+ * We're going to skip the unlock, so
+ * we don't need to relock after resume.
+ */
+ dobiglock = 0;
+#endif
goto resume;
}
- if (p->p_wchan == 0) {
+ if (p->p_wchan == NULL) {
catch = 0;
+#if 0 /* XXXSMP */
+ /* See above. */
+ dobiglock = 0;
+#endif
goto resume;
}
} else
sig = 0;
p->p_stat = SSLEEP;
p->p_stats->p_ru.ru_nvcsw++;
+
+#if 0 /* XXXSMP */
+ if (dobiglock) {
+ /*
+ * Release the kernel_lock, as we are about to
+ * yield the CPU. The scheduler_slock is still
+ * held until cpu_switch() selects a new process
+ * and removes it from the run queue.
+ */
+ kernel_lock_release();
+ }
+#endif
+
+ /* scheduler_slock held */
mi_switch(p);
+ /* scheduler_slock held */
#ifdef DDB
/* handy breakpoint location after process "wakes" */
asm(".globl bpendtsleep ; bpendtsleep:");
#endif
-resume:
+
+ resume:
KDASSERT(p->p_cpu != NULL);
KDASSERT(p->p_cpu == curcpu());
p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
- splx(s);
+ splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
+#if 0 /* XXXSMP */
+ if (dobiglock) {
+ /*
+ * Reacquire the kernel_lock now. We do this after
+ * we've released scheduler_slock to avoid deadlock.
+ */
+ kernel_lock_acquire(LK_EXCLUSIVE);
+ }
+#endif
p->p_flag &= ~P_SINTR;
if (p->p_flag & P_TIMEOUT) {
p->p_flag &= ~P_TIMEOUT;
@@ -436,6 +500,8 @@
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 0, 0);
#endif
+ if (relock && interlock != NULL)
+ simple_lock(interlock);
return (EWOULDBLOCK);
}
} else if (timo)
@@ -445,6 +511,8 @@
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 0, 0);
#endif
+ if (relock && interlock != NULL)
+ simple_lock(interlock);
if ((p->p_sigacts->ps_sigact[sig].sa_flags & SA_RESTART) == 0)
return (EINTR);
return (ERESTART);
@@ -453,6 +521,8 @@
if (KTRPOINT(p, KTR_CSW))
ktrcsw(p, 0, 0);
#endif
+ if (relock && interlock != NULL)
+ simple_lock(interlock);
return (0);
}
@@ -463,8 +533,7 @@
* is stopped, just unsleep so it will remain stopped.
*/
void
-endtsleep(arg)
- void *arg;
+endtsleep(void *arg)
{
struct proc *p;
int s;
@@ -485,8 +554,7 @@
* Remove a process from its wait queue
*/
void
-unsleep(p)
- struct proc *p;
+unsleep(struct proc *p)
{
struct slpque *qp;
struct proc **hp;
@@ -509,14 +577,14 @@
* Optimized-for-wakeup() version of setrunnable().
*/
__inline void
-awaken(p)
- struct proc *p;
+awaken(struct proc *p)
{
if (p->p_slptime > 1)
updatepri(p);
p->p_slptime = 0;
p->p_stat = SRUN;
+
/*
* Since curpriority is a user priority, p->p_priority
* is always better than curpriority.
@@ -525,23 +593,23 @@
setrunqueue(p);
need_resched();
} else
- wakeup((caddr_t)&proc0);
+ wakeup(&proc0);
}
/*
* Make all processes sleeping on the specified identifier runnable.
*/
void
-wakeup(ident)
Home |
Main Index |
Thread Index |
Old Index