Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/kern add defines to control whether or not mutex operati...
details: https://anonhg.NetBSD.org/src/rev/ef7eb81378b2
branches: trunk
changeset: 359794:ef7eb81378b2
user: chs <chs%NetBSD.org@localhost>
date: Sun Feb 25 18:54:29 2018 +0000
description:
add defines to control whether or not mutex operations are skipped
after we have panic'd. no functional change.
diffstat:
sys/kern/kern_mutex.c | 17 +++++++++++++++--
1 files changed, 15 insertions(+), 2 deletions(-)
diffs (88 lines):
diff -r c52b44cc3d96 -r ef7eb81378b2 sys/kern/kern_mutex.c
--- a/sys/kern/kern_mutex.c Sun Feb 25 18:53:23 2018 +0000
+++ b/sys/kern/kern_mutex.c Sun Feb 25 18:54:29 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_mutex.c,v 1.72 2018/02/06 07:46:24 ozaki-r Exp $ */
+/* $NetBSD: kern_mutex.c,v 1.73 2018/02/25 18:54:29 chs Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -40,7 +40,7 @@
#define __MUTEX_PRIVATE
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.72 2018/02/06 07:46:24 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.73 2018/02/25 18:54:29 chs Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@@ -60,6 +60,9 @@
#include <machine/lock.h>
+#define MUTEX_PANIC_SKIP_SPIN 1
+#define MUTEX_PANIC_SKIP_ADAPTIVE 1
+
/*
* When not running a debug kernel, spin mutexes are not much
* more than an splraiseipl() and splx() pair.
@@ -489,8 +492,10 @@
* to reduce cache line ping-ponging between CPUs.
*/
do {
+#if MUTEX_PANIC_SKIP_SPIN
if (panicstr != NULL)
break;
+#endif
while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
SPINLOCK_BACKOFF(count);
#ifdef LOCKDEBUG
@@ -547,10 +552,12 @@
owner = mtx->mtx_owner;
continue;
}
+#if MUTEX_PANIC_SKIP_ADAPTIVE
if (__predict_false(panicstr != NULL)) {
KPREEMPT_ENABLE(curlwp);
return;
}
+#endif
if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
MUTEX_ABORT(mtx, "locking against myself");
}
@@ -726,8 +733,10 @@
if (MUTEX_SPIN_P(mtx)) {
#ifdef FULL
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
+#if MUTEX_PANIC_SKIP_SPIN
if (panicstr != NULL)
return;
+#endif
MUTEX_ABORT(mtx, "exiting unheld spin mutex");
}
MUTEX_UNLOCKED(mtx);
@@ -737,11 +746,13 @@
return;
}
+#ifdef MUTEX_PANIC_SKIP_ADAPTIVE
if (__predict_false((uintptr_t)panicstr | cold)) {
MUTEX_UNLOCKED(mtx);
MUTEX_RELEASE(mtx);
return;
}
+#endif
curthread = (uintptr_t)curlwp;
MUTEX_DASSERT(mtx, curthread != 0);
@@ -932,8 +943,10 @@
* to reduce cache line ping-ponging between CPUs.
*/
do {
+#if MUTEX_PANIC_SKIP_SPIN
if (panicstr != NULL)
break;
+#endif
while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
SPINLOCK_BACKOFF(count);
#ifdef LOCKDEBUG
Home |
Main Index |
Thread Index |
Old Index