Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Implement scheduler lock protocol, this f...
details: https://anonhg.NetBSD.org/src/rev/310269dcf7c4
branches: trunk
changeset: 526955:310269dcf7c4
user: chris <chris%NetBSD.org@localhost>
date: Tue May 14 19:22:34 2002 +0000
description:
Implement scheduler lock protocol, this fixes PR arm/10863.
Also add correct locking when freeing pages in pmap_destroy (fix from potr)
This now means that arm32 kernels can be built with LOCKDEBUG enabled. (only tested on cats though)
diffstat:
sys/arch/arm/arm32/cpuswitch.S | 65 ++++++++++++++++++++++++++++++++---------
sys/arch/arm/arm32/pmap.c | 10 +++++-
2 files changed, 58 insertions(+), 17 deletions(-)
diffs (191 lines):
diff -r a25b81a9b170 -r 310269dcf7c4 sys/arch/arm/arm32/cpuswitch.S
--- a/sys/arch/arm/arm32/cpuswitch.S Tue May 14 18:57:31 2002 +0000
+++ b/sys/arch/arm/arm32/cpuswitch.S Tue May 14 19:22:34 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpuswitch.S,v 1.6 2002/01/25 19:19:24 thorpej Exp $ */
+/* $NetBSD: cpuswitch.S,v 1.7 2002/05/14 19:22:34 chris Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -231,30 +231,35 @@
/*
* Idle loop, exercised while waiting for a process to wake up.
*/
+/* LINTSTUB: Ignore */
ASENTRY_NP(idle)
+
+#if defined(LOCKDEBUG)
+ bl _C_LABEL(sched_unlock_idle)
+#endif
/* Enable interrupts */
IRQenable
- /* XXX - r1 needs to be preserved for cpu_switch */
- mov r7, r1
ldr r3, Lcpufuncs
mov r0, #0
add lr, pc, #Lidle_slept - . - 8
ldr pc, [r3, #CF_SLEEP]
+ /* should also call the uvm pageidlezero stuff */
+
Lidle_slept:
- mov r1, r7
/* Disable interrupts while we check for an active queue */
IRQdisable
+#if defined(LOCKDEBUG)
+ bl _C_LABEL(sched_lock_idle)
+#endif
ldr r7, Lwhichqs
ldr r3, [r7]
teq r3, #0x00000000
- bne sw1
- /* All processes are still asleep so idle a while longer */
- b _ASM_LABEL(idle)
-
+ beq _ASM_LABEL(idle)
+ b Lidle_ret
/*
* Find a new process to run, save the current context and
@@ -287,9 +292,15 @@
ldr r7, Lcurpcb
str r0, [r7]
- /* Lower the spl level to spl0 and get the current spl level. */
+ /* stash the old proc */
mov r7, r1
+#if defined(LOCKDEBUG)
+ /* release the sched_lock before handling interrupts */
+ bl _C_LABEL(sched_unlock_idle)
+#endif
+
+ /* Lower the spl level to spl0 and get the current spl level. */
#ifdef __NEWINTR
mov r0, #(IPL_NONE)
bl _C_LABEL(_spllower)
@@ -305,14 +316,19 @@
/* Push the old spl level onto the stack */
str r0, [sp, #-0x0004]!
- mov r1, r7
+ mov r5, r7
/* First phase : find a new process */
- /* rem: r1 = old proc */
+ /* rem: r5 = old proc */
+
-switch_search:
+Lswitch_search:
IRQdisable
+#if defined(LOCKDEBUG)
+ bl _C_LABEL(sched_lock_idle)
+#endif
+
/* Do we have any active queues */
ldr r7, Lwhichqs
@@ -321,8 +337,11 @@
/* If not we must idle until we do. */
teq r3, #0x00000000
beq _ASM_LABEL(idle)
+Lidle_ret:
-sw1:
+ /* restore old proc */
+ mov r1, r5
+
/* rem: r1 = old proc */
/* rem: r3 = whichqs */
/* rem: interrupts are disabled */
@@ -407,6 +426,15 @@
*/
str r0, [r6, #(P_BACK)]
+#if defined(LOCKDEBUG)
+ /*
+ * unlock the sched_lock, but leave interrupts off, for now.
+ */
+ mov r7, r1
+ bl _C_LABEL(sched_unlock_idle)
+ mov r1, r7
+#endif
+
/* p->p_cpu initialized in fork1() for single-processor */
/* Process is now on a processor. */
@@ -569,7 +597,13 @@
Lkernel_map:
.word _C_LABEL(kernel_map)
+/*
+ * void switch_exit(struct proc *p);
+ * Switch to proc0's saved context and deallocate the address space and kernel
+ * stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
+ */
+/* LINTSTUB: Func: void switch_exit(struct proc *p) */
ENTRY(switch_exit)
/*
* r0 = proc
@@ -633,9 +667,10 @@
mov r0, #0x00000000
str r0, [r1]
- ldr r1, Lproc0
- b switch_search
+ ldr r5, Lproc0
+ b Lswitch_search
+/* LINTSTUB: Func: void savectx(struct pcb *pcb) */
ENTRY(savectx)
/*
* r0 = pcb
diff -r a25b81a9b170 -r 310269dcf7c4 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Tue May 14 18:57:31 2002 +0000
+++ b/sys/arch/arm/arm32/pmap.c Tue May 14 19:22:34 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.96 2002/04/24 17:35:10 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $ */
/*
* Copyright (c) 2002 Wasabi Systems, Inc.
@@ -143,7 +143,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.96 2002/04/24 17:35:10 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@@ -1597,11 +1597,16 @@
* entries looking for pt's
* taken from i386 pmap.c
*/
+ /*
+ * vmobjlock must be held while freeing pages
+ */
+ simple_lock(&pmap->pm_obj.vmobjlock);
while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
KASSERT((page->flags & PG_BUSY) == 0);
page->wire_count = 0;
uvm_pagefree(page);
}
+ simple_unlock(&pmap->pm_obj.vmobjlock);
/* Free the page dir */
pmap_freepagedir(pmap);
@@ -2999,6 +3004,7 @@
simple_lock(&pg->mdpage.pvh_slock);
printf("%s %08lx:", m, phys);
if (pg->mdpage.pvh_list == NULL) {
+ simple_unlock(&pg->mdpage.pvh_slock);
printf(" no mappings\n");
return;
}
Home |
Main Index |
Thread Index |
Old Index