Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys For LOCKDEBUG:



details:   https://anonhg.NetBSD.org/src/rev/169800ba69a8
branches:  trunk
changeset: 350902:169800ba69a8
user:      christos <christos%NetBSD.org@localhost>
date:      Thu Jan 26 04:11:56 2017 +0000

description:
For LOCKDEBUG:
Always provide the location of the caller of the lock as __func__, __LINE__.

diffstat:

 sys/kern/kern_lock.c      |    6 +-
 sys/kern/kern_mutex.c     |   16 ++--
 sys/kern/kern_rwlock.c    |   17 +++--
 sys/kern/subr_lockdebug.c |  123 +++++++++++++++++++++++++--------------------
 sys/sys/lockdebug.h       |   47 ++++++++++-------
 5 files changed, 114 insertions(+), 95 deletions(-)

diffs (truncated from 637 to 300 lines):

diff -r 465817bdeb27 -r 169800ba69a8 sys/kern/kern_lock.c
--- a/sys/kern/kern_lock.c      Thu Jan 26 04:10:27 2017 +0000
+++ b/sys/kern/kern_lock.c      Thu Jan 26 04:11:56 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $    */
+/*     $NetBSD: kern_lock.c,v 1.158 2017/01/26 04:11:56 christos Exp $ */
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.158 2017/01/26 04:11:56 christos Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -101,7 +101,7 @@
  */
 
 #define        _KERNEL_LOCK_ABORT(msg)                                         \
-    LOCKDEBUG_ABORT(kernel_lock, &_kernel_lock_ops, __func__, msg)
+    LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
 
 #ifdef LOCKDEBUG
 #define        _KERNEL_LOCK_ASSERT(cond)                                       \
diff -r 465817bdeb27 -r 169800ba69a8 sys/kern/kern_mutex.c
--- a/sys/kern/kern_mutex.c     Thu Jan 26 04:10:27 2017 +0000
+++ b/sys/kern/kern_mutex.c     Thu Jan 26 04:11:56 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_mutex.c,v 1.63 2016/07/07 06:55:43 msaitoh Exp $  */
+/*     $NetBSD: kern_mutex.c,v 1.64 2017/01/26 04:11:56 christos Exp $ */
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -40,7 +40,7 @@
 #define        __MUTEX_PRIVATE
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.63 2016/07/07 06:55:43 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.64 2017/01/26 04:11:56 christos Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -82,7 +82,7 @@
     LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),              \
         (uintptr_t)__builtin_return_address(0), 0)
 #define        MUTEX_ABORT(mtx, msg)                                   \
-    mutex_abort(mtx, __func__, msg)
+    mutex_abort(__func__, __LINE__, mtx, msg)
 
 #if defined(LOCKDEBUG)
 
@@ -261,8 +261,8 @@
 __strong_alias(mutex_spin_exit,mutex_vector_exit);
 #endif
 
-static void            mutex_abort(kmutex_t *, const char *, const char *);
-static void            mutex_dump(volatile void *);
+static void    mutex_abort(const char *, size_t, kmutex_t *, const char *);
+static void    mutex_dump(volatile void *);
 
 lockops_t mutex_spin_lockops = {
        "Mutex",
@@ -307,11 +307,11 @@
  *     we ask the compiler to not inline it.
  */
 void __noinline
-mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
+mutex_abort(const char *func, size_t line, kmutex_t *mtx, const char *msg)
 {
 
-       LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
-           &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
+       LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ?
+           &mutex_spin_lockops : &mutex_adaptive_lockops), msg);
 }
 
 /*
diff -r 465817bdeb27 -r 169800ba69a8 sys/kern/kern_rwlock.c
--- a/sys/kern/kern_rwlock.c    Thu Jan 26 04:10:27 2017 +0000
+++ b/sys/kern/kern_rwlock.c    Thu Jan 26 04:11:56 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_rwlock.c,v 1.45 2014/11/28 08:28:17 uebayasi Exp $        */
+/*     $NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $        */
 
 /*-
  * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.45 2014/11/28 08:28:17 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $");
 
 #define        __RWLOCK_PRIVATE
 
@@ -73,7 +73,7 @@
 #define        RW_DASSERT(rw, cond)                                            \
 do {                                                                   \
        if (!(cond))                                                    \
-               rw_abort(rw, __func__, "assertion failed: " #cond);     \
+               rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
 } while (/* CONSTCOND */ 0);
 
 #else  /* LOCKDEBUG */
@@ -94,7 +94,7 @@
 #define        RW_ASSERT(rw, cond)                                             \
 do {                                                                   \
        if (!(cond))                                                    \
-               rw_abort(rw, __func__, "assertion failed: " #cond);     \
+               rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
 } while (/* CONSTCOND */ 0)
 
 #else
@@ -111,7 +111,7 @@
 #define        RW_INHERITDEBUG(n, o)           /* nothing */
 #endif /* defined(LOCKDEBUG) */
 
-static void    rw_abort(krwlock_t *, const char *, const char *);
+static void    rw_abort(const char *, size_t, krwlock_t *, const char *);
 static void    rw_dump(volatile void *);
 static lwp_t   *rw_owner(wchan_t);
 
@@ -183,13 +183,13 @@
  *     we ask the compiler to not inline it.
  */
 static void __noinline
-rw_abort(krwlock_t *rw, const char *func, const char *msg)
+rw_abort(const char *func, size_t line, krwlock_t *rw, const char *msg)
 {
 
        if (panicstr != NULL)
                return;
 
-       LOCKDEBUG_ABORT(rw, &rwlock_lockops, func, msg);
+       LOCKDEBUG_ABORT(func, line, rw, &rwlock_lockops, msg);
 }
 
 /*
@@ -338,7 +338,8 @@
                        return;
                }
                if (__predict_false(RW_OWNER(rw) == curthread)) {
-                       rw_abort(rw, __func__, "locking against myself");
+                       rw_abort(__func__, __LINE__, rw,
+                           "locking against myself");
                }
                /*
                 * If the lock owner is running on another CPU, and
diff -r 465817bdeb27 -r 169800ba69a8 sys/kern/subr_lockdebug.c
--- a/sys/kern/subr_lockdebug.c Thu Jan 26 04:10:27 2017 +0000
+++ b/sys/kern/subr_lockdebug.c Thu Jan 26 04:11:56 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $      */
+/*     $NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $     */
 
 /*-
  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_ddb.h"
@@ -99,8 +99,8 @@
 bool                   ld_nomore;
 lockdebug_t            ld_prime[LD_BATCH];
 
-static void    lockdebug_abort1(lockdebug_t *, int, const char *,
-                                const char *, bool);
+static void    lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
+    const char *, bool);
 static int     lockdebug_more(int);
 static void    lockdebug_init(void);
 static void    lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
@@ -190,14 +190,15 @@
  *     Find a lockdebug structure by a pointer to a lock and return it locked.
  */
 static inline lockdebug_t *
-lockdebug_lookup(volatile void *lock, uintptr_t where)
+lockdebug_lookup(const char *func, size_t line, volatile void *lock,
+    uintptr_t where)
 {
        lockdebug_t *ld;
 
        ld = lockdebug_lookup1(lock);
        if (ld == NULL) {
-               panic("lockdebug_lookup: uninitialized lock "
-                   "(lock=%p, from=%08"PRIxPTR")", lock, where);
+               panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
+                   PRIxPTR ")", func, line, lock, where);
        }
        return ld;
 }
@@ -238,7 +239,8 @@
  *     structure.
  */
 bool
-lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
+lockdebug_alloc(const char *func, size_t line, volatile void *lock,
+    lockops_t *lo, uintptr_t initaddr)
 {
        struct cpu_info *ci;
        lockdebug_t *ld;
@@ -253,7 +255,8 @@
        __cpu_simple_lock(&ld_mod_lk);
        if ((ld = lockdebug_lookup1(lock)) != NULL) {
                __cpu_simple_unlock(&ld_mod_lk);
-               lockdebug_abort1(ld, s, __func__, "already initialized", true);
+               lockdebug_abort1(func, line, ld, s, "already initialized",
+                   true);
                return false;
        }
 
@@ -288,7 +291,7 @@
        ci->ci_lkdebug_recurse--;
 
        if (ld->ld_lock != NULL) {
-               panic("lockdebug_alloc: corrupt table ld %p", ld);
+               panic("%s,%zu: corrupt table ld %p", func, line, ld);
        }
 
        /* Initialise the structure. */
@@ -314,7 +317,7 @@
  *     A lock is being destroyed, so release debugging resources.
  */
 void
-lockdebug_free(volatile void *lock)
+lockdebug_free(const char *func, size_t line, volatile void *lock)
 {
        lockdebug_t *ld;
        int s;
@@ -324,16 +327,18 @@
 
        s = splhigh();
        __cpu_simple_lock(&ld_mod_lk);
-       ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
+       ld = lockdebug_lookup(func, line, lock,
+           (uintptr_t) __builtin_return_address(0));
        if (ld == NULL) {
                __cpu_simple_unlock(&ld_mod_lk);
-               panic("lockdebug_free: destroying uninitialized object %p"
-                   "(ld_lock=%p)", lock, ld->ld_lock);
+               panic("%s,%zu: destroying uninitialized object %p"
+                   "(ld_lock=%p)", func, line, lock, ld->ld_lock);
                return;
        }
        if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
                __cpu_simple_unlock(&ld_mod_lk);
-               lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
+               lockdebug_abort1(func, line, ld, s, "is locked or in use",
+                   true);
                return;
        }
        lockdebug_lock_cpus();
@@ -415,7 +420,8 @@
  *     Process the preamble to a lock acquire.
  */
 void
-lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
+lockdebug_wantlock(const char *func, size_t line,
+    volatile void *lock, uintptr_t where, int shared)
 {
        struct lwp *l = curlwp;
        lockdebug_t *ld;
@@ -429,7 +435,7 @@
                return;
 
        s = splhigh();
-       if ((ld = lockdebug_lookup(lock, where)) == NULL) {
+       if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
                splx(s);
                return;
        }
@@ -442,7 +448,7 @@
        }
        if (cpu_intr_p()) {
                if ((ld->ld_flags & LD_SLEEPER) != 0) {
-                       lockdebug_abort1(ld, s, __func__,
+                       lockdebug_abort1(func, line, ld, s,
                            "acquiring sleep lock from interrupt context",
                            true);
                        return;
@@ -453,7 +459,7 @@
        else
                ld->ld_exwant++;
        if (recurse) {
-               lockdebug_abort1(ld, s, __func__, "locking against myself",
+               lockdebug_abort1(func, line, ld, s, "locking against myself",
                    true);
                return;
        }
@@ -467,8 +473,8 @@
  *     Process a lock acquire operation.
  */



Home | Main Index | Thread Index | Old Index