Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/lib/libpthread Mutex tuneup.
details: https://anonhg.NetBSD.org/src/rev/f18600139ee7
branches: trunk
changeset: 542503:f18600139ee7
user: nathanw <nathanw%NetBSD.org@localhost>
date: Fri Jan 31 02:55:00 2003 +0000
description:
Mutex tuneup.
* Use a double-checked locking technique to avoid taking
the interlock in pthread_mutex_unlock().
* In pthread_mutex_lock() and pthread_mutex_trylock(), only store the
stack pointer, not the thread ID, in ptm_owner. Do the translation
to a thread ID in the slow-lock, errorcheck, and recursive mutex
cases rather than in the common path.
* Juggle where pthread__self() is called, to move it out of the fast path.
Overall, this means that neither pthread_self() nor
pthread_spin[un]lock() are used in the course of locking and unlocking
an uncontested mutex. Speeds up the fast path by 40-50%, and
eliminates about 98% of spinlocks used by a couple of large threaded
applications.
(Still a GET_MUTEX_PRIVATE() in the fast path... perhaps the type
should be in the main body of the mutex).
diffstat:
lib/libpthread/pthread_mutex.c | 62 +++++++++++++++++++++++++++--------------
1 files changed, 41 insertions(+), 21 deletions(-)
diffs (145 lines):
diff -r 003902f7bc18 -r f18600139ee7 lib/libpthread/pthread_mutex.c
--- a/lib/libpthread/pthread_mutex.c Fri Jan 31 02:15:57 2003 +0000
+++ b/lib/libpthread/pthread_mutex.c Fri Jan 31 02:55:00 2003 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_mutex.c,v 1.7 2003/01/27 21:01:00 nathanw Exp $ */
+/* $NetBSD: pthread_mutex.c,v 1.8 2003/01/31 02:55:00 nathanw Exp $ */
/*-
* Copyright (c) 2001, 2003 The NetBSD Foundation, Inc.
@@ -181,7 +181,14 @@
}
/* We have the lock! */
- mutex->ptm_owner = pthread__self();
+ /*
+ * Identifying ourselves may be slow, and this assignment is
+ * only needed for (a) debugging identity of the owning thread
+ * and (b) handling errorcheck and recursive mutexes. It's
+ * better to just stash our stack pointer here and let those
+ * slow exception cases compute the stack->thread mapping.
+ */
+ mutex->ptm_owner = (pthread_t)pthread__sp();
return 0;
}
@@ -213,7 +220,7 @@
GET_MUTEX_PRIVATE(mutex, mp);
- if (mutex->ptm_owner == self) {
+ if (pthread__id(mutex->ptm_owner) == self) {
switch (mp->type) {
case PTHREAD_MUTEX_ERRORCHECK:
pthread_spinunlock(self,
@@ -241,7 +248,7 @@
* Locking a mutex is not a cancellation
* point, so we don't need to do the
* test-cancellation dance. We may get woken
- * up spuriously by pthread_cancel, though,
+ * up spuriously by pthread_cancel or signals,
* but it's okay since we're just going to
* retry.
*/
@@ -267,7 +274,6 @@
int
pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- pthread_t self = pthread__self();
#ifdef ERRORCHECK
if ((mutex == NULL) || (mutex->ptm_magic != _PT_MUTEX_MAGIC))
@@ -276,6 +282,7 @@
PTHREADD_ADD(PTHREADD_MUTEX_TRYLOCK);
if (pthread__simple_lock_try(&mutex->ptm_lock) == 0) {
+ pthread_t self;
struct mutex_private *mp;
GET_MUTEX_PRIVATE(mutex, mp);
@@ -285,7 +292,8 @@
* interlock because these fields are only modified
* if we know we own the mutex.
*/
- if (mutex->ptm_owner == self) {
+ self = pthread__self();
+ if (pthread__id(mutex->ptm_owner) == self) {
switch (mp->type) {
case PTHREAD_MUTEX_ERRORCHECK:
return EDEADLK;
@@ -301,7 +309,8 @@
return EBUSY;
}
- mutex->ptm_owner = self;
+ /* see comment at the end of pthread_mutex_lock() */
+ mutex->ptm_owner = (pthread_t)pthread__sp();
return 0;
}
@@ -313,8 +322,6 @@
struct mutex_private *mp;
pthread_t self, blocked;
- self = pthread__self();
-
#ifdef ERRORCHECK
if ((mutex == NULL) || (mutex->ptm_magic != _PT_MUTEX_MAGIC))
return EINVAL;
@@ -333,12 +340,12 @@
*/
switch (mp->type) {
case PTHREAD_MUTEX_ERRORCHECK:
- if (mutex->ptm_owner != self)
+ if (pthread__id(mutex->ptm_owner) != pthread__self())
return EPERM;
break;
case PTHREAD_MUTEX_RECURSIVE:
- if (mutex->ptm_owner != self)
+ if (pthread__id(mutex->ptm_owner) != pthread__self())
return EPERM;
if (mp->recursecount != 0) {
mp->recursecount--;
@@ -347,18 +354,31 @@
break;
}
- pthread_spinlock(self, &mutex->ptm_interlock);
- blocked = PTQ_FIRST(&mutex->ptm_blocked);
- if (blocked)
- PTQ_REMOVE(&mutex->ptm_blocked, blocked, pt_sleep);
mutex->ptm_owner = NULL;
pthread__simple_unlock(&mutex->ptm_lock);
- pthread_spinunlock(self, &mutex->ptm_interlock);
-
- /* Give the head of the blocked queue another try. */
- if (blocked) {
- PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK_UNBLOCK);
- pthread__sched(self, blocked);
+ /*
+ * Do a double-checked locking dance to see if there are any
+ * waiters. If we don't see any waiters, we can exit, because
+ * we've already released the lock. If we do see waiters, they
+ * were probably waiting on us... there's a slight chance that
+ * they are waiting on a different thread's ownership of the
+ * lock that happened between the unlock above and this
+ * examination of the queue; if so, no harm is done, as the
+ * waiter will loop and see that the mutex is still locked.
+ */
+ if (!PTQ_EMPTY(&mutex->ptm_blocked)) {
+ self = pthread__self();
+ pthread_spinlock(self, &mutex->ptm_interlock);
+ blocked = PTQ_FIRST(&mutex->ptm_blocked);
+ if (blocked)
+ PTQ_REMOVE(&mutex->ptm_blocked, blocked, pt_sleep);
+ pthread_spinunlock(self, &mutex->ptm_interlock);
+
+ /* Give the head of the blocked queue another try. */
+ if (blocked) {
+ PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK_UNBLOCK);
+ pthread__sched(self, blocked);
+ }
}
return 0;
}
Home |
Main Index |
Thread Index |
Old Index