Subject: Re: spl checks in simple_lock [was Re: v_interlock/splbio protocol violations]
To: Chuck Silvers <chuq@chuq.com>
From: Darrin B. Jewell <dbj@NetBSD.org>
List: tech-kern
Date: 03/09/2004 15:06:30
--=-=-=
"Darrin B. Jewell" <dbj@NetBSD.org> writes:
> I include below the highlights of a larger patch which can be found at
> <URL:ftp://ftp.netbsd.org/pub/NetBSD/misc/dbj/lockspl.diff>
>
> The highlights show the changes necessary to kern_lock.c and
> subr_pool.c to support this check.
Of course i managed to leave out the kern_lock.c changes from the
highlights, which I now include below. The full diff should still
contain everything.
--=-=-=
Content-Type: text/x-patch
Content-Disposition: attachment; filename=lockspl.highlights2.diff
Content-Description: highlights of changes to simple_lock_init
Index: src/sys/kern/kern_lock.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_lock.c,v
retrieving revision 1.75
diff -u -u -p -r1.75 kern_lock.c
--- src/sys/kern/kern_lock.c 13 Feb 2004 11:36:22 -0000 1.75
+++ src/sys/kern/kern_lock.c 9 Mar 2004 18:59:41 -0000
@@ -303,7 +303,7 @@ do { \
#if defined(LOCKDEBUG) /* { */
#if defined(MULTIPROCESSOR) /* { */
-struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
+struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER(IPL_NONE);
#define SPINLOCK_LIST_LOCK() \
__cpu_simple_lock(&spinlock_list_slock.lock_data)
@@ -380,7 +380,11 @@ lockinit(struct lock *lkp, int prio, con
{
memset(lkp, 0, sizeof(struct lock));
- simple_lock_init(&lkp->lk_interlock);
+ /* XXX lock(9) says simplelocks are only interrupts that can be used
+ * in an interrupt handler. However, the kern_lock is a spinlock,
+ * so we allow spin locks from interrupts as well, at least for now. */
+ simple_lock_init(&lkp->lk_interlock,
+ ((flags & LK_SPIN) ? IPL_LOCK : IPL_NONE));
lkp->lk_flags = flags & LK_EXTFLG_MASK;
if (flags & LK_SPIN)
lkp->lk_cpu = LK_NOCPU;
@@ -994,7 +998,7 @@ TAILQ_HEAD(, simplelock) simplelock_list
TAILQ_HEAD_INITIALIZER(simplelock_list);
#if defined(MULTIPROCESSOR) /* { */
-struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
+struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER(IPL_LOCK);
#define SLOCK_LIST_LOCK() \
__cpu_simple_lock(&simplelock_list_slock.lock_data)
@@ -1027,6 +1031,8 @@ do { \
lock_printf(str); \
lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
SLOCK_MP(); \
+ if ((alp)->lock_spl != IPL_NONE) \
+ lock_printf("requiring spl 0x%x\n", (alp)->lock_spl); \
if ((alp)->lock_file != NULL) \
lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
(alp)->lock_line); \
@@ -1042,7 +1048,7 @@ do { \
* they are being called.
*/
void
-simple_lock_init(struct simplelock *alp)
+simple_lock_init(struct simplelock *alp, int spl)
{
#if defined(MULTIPROCESSOR) /* { */
@@ -1055,6 +1061,7 @@ simple_lock_init(struct simplelock *alp)
alp->unlock_file = NULL;
alp->unlock_line = 0;
alp->lock_holder = LK_NOCPU;
+ alp->lock_spl = spl;
}
void
@@ -1063,6 +1070,15 @@ _simple_lock(__volatile struct simpleloc
cpuid_t cpu_id = cpu_number();
int s;
+#ifdef SPL_ASSERT /* XXX Not all arch'es implement spl assert functions yet */
+ if ((alp->lock_spl == IPL_NONE) && spl_in_context()) {
+ SLOCK_WHERE("simple_lock: invoked in interrupt context\n", alp, id, l);
+ }
+ if (!spl_is_blocked(alp->lock_spl)) {
+ SLOCK_WHERE("simple_lock: invoked without required spl\n", alp, id, l);
+ }
+#endif
+
s = spllock();
/*
@@ -1191,6 +1207,15 @@ _simple_unlock(__volatile struct simplel
{
int s;
+#ifdef SPL_ASSERT /* XXX Not all arch'es implement spl assert functions yet */
+ if ((alp->lock_spl == IPL_NONE) && spl_in_context()) {
+ SLOCK_WHERE("simple_unlock: invoked in interrupt context\n", alp, id, l);
+ }
+ if (!spl_is_blocked(alp->lock_spl)) {
+ SLOCK_WHERE("simple_unlock: invoked without required spl\n", alp, id, l);
+ }
+#endif
+
s = spllock();
/*
--=-=-=--