Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Add a LOCKDEBUG check for a r/w spinlock spinning out of...
details: https://anonhg.NetBSD.org/src/rev/2aa904019a86
branches: trunk
changeset: 499545:2aa904019a86
user: thorpej <thorpej%NetBSD.org@localhost>
date: Wed Nov 22 06:31:22 2000 +0000
description:
Add a LOCKDEBUG check for a r/w spinlock spinning out of control.
Partially from Bill Sommerfeld.
diffstat:
sys/kern/kern_lock.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++-
sys/sys/lock.h | 25 +++++++++++++++-
2 files changed, 106 insertions(+), 2 deletions(-)
diffs (244 lines):
diff -r bd192155fd74 -r 2aa904019a86 sys/kern/kern_lock.c
--- a/sys/kern/kern_lock.c Wed Nov 22 05:50:59 2000 +0000
+++ b/sys/kern/kern_lock.c Wed Nov 22 06:31:22 2000 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_lock.c,v 1.49 2000/11/20 20:04:49 thorpej Exp $ */
+/* $NetBSD: kern_lock.c,v 1.50 2000/11/22 06:31:23 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@@ -150,16 +150,51 @@
} \
} while (0)
+#if defined(LOCKDEBUG)
+#if defined(DDB)
+#define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
+#else
+#define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
+#endif
+
+#define SPINLOCK_SPINCHECK_DECL \
+ /* 32-bits of count -- wrap constitutes a "spinout" */ \
+ uint32_t __spinc = 0
+
+#define SPINLOCK_SPINCHECK \
+do { \
+ if (++__spinc == 0) { \
+ printf("LK_SPIN spinout, excl %d, share %d\n", \
+ lkp->lk_exclusivecount, lkp->lk_sharecount); \
+ if (lkp->lk_exclusivecount) \
+ printf("held by CPU %lu\n", \
+ (u_long) lkp->lk_cpu); \
+ if (lkp->lk_lock_file) \
+ printf("last locked at %s:%d\n", \
+ lkp->lk_lock_file, lkp->lk_lock_line); \
+ if (lkp->lk_unlock_file) \
+ printf("last unlocked at %s:%d\n", \
+ lkp->lk_unlock_file, lkp->lk_unlock_line); \
+ SPINLOCK_SPINCHECK_DEBUGGER; \
+ } \
+} while (0)
+#else
+#define SPINLOCK_SPINCHECK_DECL /* nothing */
+#define SPINLOCK_SPINCHECK /* nothing */
+#endif /* LOCKDEBUG && DDB */
+
/*
* Acquire a resource.
*/
#define ACQUIRE(lkp, error, extflags, drain, wanted) \
if ((extflags) & LK_SPIN) { \
int interlocked; \
+ SPINLOCK_SPINCHECK_DECL; \
\
if ((drain) == 0) \
(lkp)->lk_waitcount++; \
for (interlocked = 1;;) { \
+ SPINLOCK_SPINCHECK; \
if (wanted) { \
if (interlocked) { \
INTERLOCK_RELEASE((lkp), \
@@ -305,6 +340,10 @@
lkp->lk_timo = timo;
}
lkp->lk_wmesg = wmesg; /* just a name for spin locks */
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = NULL;
+ lkp->lk_unlock_file = NULL;
+#endif
}
/*
@@ -401,8 +440,13 @@
* accepted shared locks and shared-to-exclusive upgrades to go away.
*/
int
+#if defined(LOCKDEBUG)
+_lockmgr(__volatile struct lock *lkp, u_int flags,
+ struct simplelock *interlkp, const char *file, int line)
+#else
lockmgr(__volatile struct lock *lkp, u_int flags,
struct simplelock *interlkp)
+#endif
{
int error;
pid_t pid;
@@ -513,6 +557,10 @@
lkp->lk_recurselevel = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
DONTHAVEIT(lkp);
WAKEUP_WAITER(lkp);
break;
@@ -566,6 +614,10 @@
break;
lkp->lk_flags |= LK_HAVE_EXCL;
SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
HAVEIT(lkp);
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
@@ -631,6 +683,10 @@
break;
lkp->lk_flags |= LK_HAVE_EXCL;
SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
HAVEIT(lkp);
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
@@ -661,6 +717,10 @@
if (lkp->lk_exclusivecount == 0) {
lkp->lk_flags &= ~LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
DONTHAVEIT(lkp);
}
} else if (lkp->lk_sharecount != 0) {
@@ -701,6 +761,10 @@
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
SETHOLDER(lkp, pid, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
HAVEIT(lkp);
lkp->lk_exclusivecount = 1;
/* XXX unlikely that we'd want this */
@@ -740,7 +804,11 @@
*/
int
+#if defined(LOCKDEBUG)
+_spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
+#else
spinlock_release_all(__volatile struct lock *lkp)
+#endif
{
int s, count;
cpuid_t cpu_id;
@@ -765,6 +833,10 @@
COUNT_CPU(cpu_id, -count);
lkp->lk_flags &= ~LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
+#if defined(LOCKDEBUG)
+ lkp->lk_unlock_file = file;
+ lkp->lk_unlock_line = line;
+#endif
DONTHAVEIT(lkp);
}
#ifdef DIAGNOSTIC
@@ -785,7 +857,12 @@
*/
void
+#if defined(LOCKDEBUG)
+_spinlock_acquire_count(__volatile struct lock *lkp, int count,
+ const char *file, int line)
+#else
spinlock_acquire_count(__volatile struct lock *lkp, int count)
+#endif
{
int s, error;
cpuid_t cpu_id;
@@ -814,6 +891,10 @@
lkp->lk_flags &= ~LK_WANT_EXCL;
lkp->lk_flags |= LK_HAVE_EXCL;
SETHOLDER(lkp, LK_NOPROC, cpu_id);
+#if defined(LOCKDEBUG)
+ lkp->lk_lock_file = file;
+ lkp->lk_lock_line = line;
+#endif
HAVEIT(lkp);
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
diff -r bd192155fd74 -r 2aa904019a86 sys/sys/lock.h
--- a/sys/sys/lock.h Wed Nov 22 05:50:59 2000 +0000
+++ b/sys/sys/lock.h Wed Nov 22 06:31:22 2000 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lock.h,v 1.39 2000/11/19 00:56:39 sommerfeld Exp $ */
+/* $NetBSD: lock.h,v 1.40 2000/11/22 06:31:22 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@@ -167,6 +167,13 @@
#if defined(LOCKDEBUG)
#define lk_list lk_un.lk_un_spin.lk_spin_list
#endif
+
+#if defined(LOCKDEBUG)
+ const char *lk_lock_file;
+ const char *lk_unlock_file;
+ int lk_lock_line;
+ int lk_unlock_line;
+#endif
};
/*
@@ -275,7 +282,13 @@
void lockinit(struct lock *, int prio, const char *wmesg, int timo,
int flags);
+#if defined(LOCKDEBUG)
+int _lockmgr(__volatile struct lock *, u_int flags, struct simplelock *,
+ const char *file, int line);
+#define lockmgr(l, f, i) _lockmgr((l), (f), (i), __FILE__, __LINE__)
+#else
int lockmgr(__volatile struct lock *, u_int flags, struct simplelock *);
+#endif /* LOCKDEBUG */
int lockstatus(struct lock *);
void lockmgr_printinfo(__volatile struct lock *);
@@ -289,8 +302,18 @@
#define spinlockmgr(lkp, flags, intrlk) \
lockmgr((lkp), (flags) | LK_SPIN, (intrlk))
+#if defined(LOCKDEBUG)
+int _spinlock_release_all(__volatile struct lock *, const char *, int);
+void _spinlock_acquire_count(__volatile struct lock *, int, const char *,
+ int);
+
+#define spinlock_release_all(l) _spinlock_release_all((l), __FILE__, __LINE__)
+#define spinlock_acquire_count(l, c) _spinlock_acquire_count((l), (c), \
+ __FILE__, __LINE__)
+#else
int spinlock_release_all(__volatile struct lock *);
void spinlock_acquire_count(__volatile struct lock *, int);
+#endif
#if defined(LOCKDEBUG)
void _simple_lock(__volatile struct simplelock *, const char *, int);
Home |
Main Index |
Thread Index |
Old Index