Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sun2/sun2 Picked up some old scheduler changes. No...
details: https://anonhg.NetBSD.org/src/rev/a7a7c5790e8e
branches: trunk
changeset: 518873:a7a7c5790e8e
user: fredette <fredette%NetBSD.org@localhost>
date: Fri Dec 07 05:24:56 2001 +0000
description:
Picked up some old scheduler changes. Now the sun2 locore
corresponds tightly to revision 1.81 of the sun3 locore.
diffstat:
sys/arch/sun2/sun2/locore.s | 114 +++++++++++++++++++++++--------------------
1 files changed, 62 insertions(+), 52 deletions(-)
diffs (198 lines):
diff -r 06443db00dcb -r a7a7c5790e8e sys/arch/sun2/sun2/locore.s
--- a/sys/arch/sun2/sun2/locore.s Fri Dec 07 05:24:28 2001 +0000
+++ b/sys/arch/sun2/sun2/locore.s Fri Dec 07 05:24:56 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.s,v 1.10 2001/12/06 21:05:12 fredette Exp $ */
+/* $NetBSD: locore.s,v 1.11 2001/12/07 05:24:56 fredette Exp $ */
/*
* Copyright (c) 2001 Matthew Fredette
@@ -48,6 +48,7 @@
#include "opt_compat_svr4.h"
#include "opt_compat_sunos.h"
#include "opt_kgdb.h"
+#include "opt_lockdebug.h"
#include "assym.h"
#include <machine/asm.h>
@@ -541,6 +542,12 @@
* Use common m68k sigcode.
*/
#include <m68k/m68k/sigcode.s>
+#ifdef COMPAT_SUNOS
+#include <m68k/m68k/sunos_sigcode.s>
+#endif
+#ifdef COMPAT_SVR4
+#include <m68k/m68k/svr4_sigcode.s>
+#endif
.text
@@ -586,8 +593,12 @@
/* Schedule the vmspace and stack to be freed. */
movl %a0,%sp@- | exit2(p)
jbsr _C_LABEL(exit2)
+ lea %sp@(4),%sp | pop args
- /* Don't pop the proc; pass it to cpu_switch(). */
+#if defined(LOCKDEBUG)
+ /* Acquire sched_lock */
+ jbsr _C_LABEL(sched_lock_idle)
+#endif
jra _C_LABEL(cpu_switch)
@@ -595,19 +606,20 @@
* When no processes are on the runq, cpu_switch() branches to idle
* to wait for something to come ready.
*/
- .data
-GLOBAL(Idle_count)
- .long 0
- .text
-
Lidle:
+#if defined(LOCKDEBUG)
+ /* Release sched_lock */
+ jbsr _C_LABEL(sched_unlock_idle)
+#endif
stop #PSL_LOWIPL
GLOBAL(_Idle) | See clock.c
movw #PSL_HIGHIPL,%sr
- addql #1, _C_LABEL(Idle_count)
- tstl _C_LABEL(sched_whichqs)
+#if defined(LOCKDEBUG)
+ /* Acquire sched_lock */
+ jbsr _C_LABEL(sched_lock_idle)
+#endif
+ movl _C_LABEL(sched_whichqs),%d0
jeq Lidle
- movw #PSL_LOWIPL,%sr
jra Lsw1
Lbadsw:
@@ -618,68 +630,52 @@
/*
* cpu_switch()
* Hacked for sun3
- * XXX - Arg 1 is a proc pointer (curproc) but this doesn't use it.
- * XXX - Sould we use p->p_addr instead of curpcb? -gwr
*/
ENTRY(cpu_switch)
- movl _C_LABEL(curpcb),%a1 | current pcb
- movw %sr,%a1@(PCB_PS) | save sr before changing ipl
+ movl _C_LABEL(curpcb),%a0 | current pcb
+ movw %sr,%a0@(PCB_PS) | save sr before changing ipl
#ifdef notyet
movl _C_LABEL(curproc),%sp@- | remember last proc running
#endif
clrl _C_LABEL(curproc)
-Lsw1:
/*
* Find the highest-priority queue that isn't empty,
* then take the first proc from that queue.
*/
- clrl %d0
- lea _C_LABEL(sched_whichqs),%a0
- movl %a0@,%d1
-Lswchk:
- btst %d0,%d1
- jne Lswfnd
- addqb #1,%d0
- cmpb #32,%d0
- jne Lswchk
- jra _C_LABEL(_Idle)
-Lswfnd:
- movw #PSL_HIGHIPL,%sr | lock out interrupts
- movl %a0@,%d1 | and check again...
- bclr %d0,%d1
- jeq Lsw1 | proc moved, rescan
- movl %d1,%a0@ | update whichqs
- moveq #1,%d1 | double check for higher priority
- lsll %d0,%d1 | process (which may have snuck in
- subql #1,%d1 | while we were finding this one)
- andl %a0@,%d1
- jeq Lswok | no one got in, continue
- movl %a0@,%d1
- bset %d0,%d1 | otherwise put this one back
- movl %d1,%a0@
- jra Lsw1 | and rescan
-Lswok:
- movl %d0,%d1
+ movl _C_LABEL(sched_whichqs),%d0
+ jeq Lidle
+Lsw1:
+ /*
+ * Interrupts are blocked, sched_lock is held. If
+ * we come here via Idle, %d0 contains the contents
+ * of a non-zero sched_whichqs.
+ */
+ moveq #31,%d1
+1: lsrl #1,%d0
+ dbcs %d1,1b
+ eorib #31,%d1
+
+ movl %d1,%d0
lslb #3,%d1 | convert queue number to index
addl #_C_LABEL(sched_qs),%d1 | locate queue (q)
movl %d1,%a1
- cmpl %a1@(P_FORW),%a1 | anyone on queue?
+ movl %a1@(P_FORW),%a0 | p = q->p_forw
+ cmpal %d1,%a0 | anyone on queue?
jeq Lbadsw | no, panic
- movl %a1@(P_FORW),%a0 | p = q->p_forw
#ifdef DIAGNOSTIC
tstl %a0@(P_WCHAN)
jne Lbadsw
cmpb #SRUN,%a0@(P_STAT)
jne Lbadsw
#endif
- movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
- movl %a0@(P_FORW),%a1 | q = p->p_forw
- movl %a0@(P_BACK),%a1@(P_BACK) | q->p_back = p->p_back
- cmpl %a0@(P_FORW),%d1 | anyone left on queue?
- jeq Lsw2 | no, skip
+ movl %a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
+ movl %a0@(P_FORW),%a1 | n = p->p_forw
+ movl %a0@(P_BACK),%a1@(P_BACK) | n->p_back = q
+ cmpal %d1,%a1 | anyone left on queue?
+ jne Lsw2 | yes, skip
movl _C_LABEL(sched_whichqs),%d1
- bset %d0,%d1 | yes, reset bit
+ bclr %d0,%d1 | no, clear bit
movl %d1,_C_LABEL(sched_whichqs)
Lsw2:
/* p->p_cpu initialized in fork1() for single-processor */
@@ -710,6 +706,18 @@
movl %a0@(P_ADDR),%a1 | get p_addr
movl %a1,_C_LABEL(curpcb)
+#if defined(LOCKDEBUG)
+ /*
+ * Done mucking with the run queues, release the
+ * scheduler lock, but keep interrupts out.
+ */
+ movl %a0,%sp@- | not args...
+ movl %a1,%sp@- | ...just saving
+ jbsr _C_LABEL(sched_unlock_idle)
+ movl %sp@+,%a1
+ movl %sp@+,%a0
+#endif
+
/*
* Load the new VM context (new MMU root pointer)
*/
@@ -717,8 +725,10 @@
#ifdef DIAGNOSTIC
| XXX fredette - tstl with an address register EA not supported
| on the 68010, too lazy to fix this instance now.
-| tstl %a2 | vm == VM_MAP_NULL?
-| jeq Lbadsw | panic
+#if 0
+ tstl %a2 | vm == VM_MAP_NULL?
+ jeq Lbadsw | panic
+#endif
#endif
/*
* Call _pmap_switch().
Home |
Main Index |
Thread Index |
Old Index