Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/lib/libpthread Rip out some very ambitious optimisations aro...
details: https://anonhg.NetBSD.org/src/rev/60ee5ded22cc
branches: trunk
changeset: 467110:60ee5ded22cc
user: ad <ad%NetBSD.org@localhost>
date: Mon Jan 13 18:22:56 2020 +0000
description:
Rip out some very ambitious optimisations around pthread_mutex that are
don't buy much. This stuff is hard enough to get right in the kernel let
alone userspace, and I don't trust that it's right.
diffstat:
lib/libpthread/pthread.c | 13 +-----
lib/libpthread/pthread_cond.c | 7 +--
lib/libpthread/pthread_int.h | 3 +-
lib/libpthread/pthread_misc.c | 17 +-------
lib/libpthread/pthread_mutex.c | 76 ++++++++++++----------------------------
lib/libpthread/pthread_rwlock.c | 7 +--
6 files changed, 35 insertions(+), 88 deletions(-)
diffs (truncated from 332 to 300 lines):
diff -r 49dd9e4f83b0 -r 60ee5ded22cc lib/libpthread/pthread.c
--- a/lib/libpthread/pthread.c Mon Jan 13 17:23:07 2020 +0000
+++ b/lib/libpthread/pthread.c Mon Jan 13 18:22:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread.c,v 1.153 2019/03/05 01:35:52 christos Exp $ */
+/* $NetBSD: pthread.c,v 1.154 2020/01/13 18:22:56 ad Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread.c,v 1.153 2019/03/05 01:35:52 christos Exp $");
+__RCSID("$NetBSD: pthread.c,v 1.154 2020/01/13 18:22:56 ad Exp $");
#define __EXPOSE_STACK 1
@@ -319,7 +319,6 @@
t->pt_havespecific = 0;
t->pt_early = NULL;
t->pt_lwpctl = &pthread__dummy_lwpctl;
- t->pt_blocking = 0;
t->pt_droplock = NULL;
memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
@@ -1157,15 +1156,9 @@
int rv, error;
void *obj;
- /*
- * For non-interlocked release of mutexes we need a store
- * barrier before incrementing pt_blocking away from zero.
- * This is provided by pthread_mutex_unlock().
- */
self->pt_willpark = 1;
pthread_mutex_unlock(lock);
self->pt_willpark = 0;
- self->pt_blocking++;
/*
* Wait until we are awoken by a pending unpark operation,
@@ -1239,8 +1232,6 @@
pthread_mutex_unlock(lock);
}
self->pt_early = NULL;
- self->pt_blocking--;
- membar_sync();
return rv;
}
diff -r 49dd9e4f83b0 -r 60ee5ded22cc lib/libpthread/pthread_cond.c
--- a/lib/libpthread/pthread_cond.c Mon Jan 13 17:23:07 2020 +0000
+++ b/lib/libpthread/pthread_cond.c Mon Jan 13 18:22:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_cond.c,v 1.65 2017/12/08 03:08:19 christos Exp $ */
+/* $NetBSD: pthread_cond.c,v 1.66 2020/01/13 18:22:56 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_cond.c,v 1.65 2017/12/08 03:08:19 christos Exp $");
+__RCSID("$NetBSD: pthread_cond.c,v 1.66 2020/01/13 18:22:56 ad Exp $");
#include <stdlib.h>
#include <errno.h>
@@ -164,7 +164,6 @@
self->pt_willpark = 1;
pthread_mutex_unlock(mutex);
self->pt_willpark = 0;
- self->pt_blocking++;
do {
retval = _lwp_park(clkid, TIMER_ABSTIME,
__UNCONST(abstime), self->pt_unpark,
@@ -172,8 +171,6 @@
__UNVOLATILE(&mutex->ptm_waiters));
self->pt_unpark = 0;
} while (retval == -1 && errno == ESRCH);
- self->pt_blocking--;
- membar_sync();
pthread_mutex_lock(mutex);
/*
diff -r 49dd9e4f83b0 -r 60ee5ded22cc lib/libpthread/pthread_int.h
--- a/lib/libpthread/pthread_int.h Mon Jan 13 17:23:07 2020 +0000
+++ b/lib/libpthread/pthread_int.h Mon Jan 13 18:22:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_int.h,v 1.97 2019/12/18 15:11:57 joerg Exp $ */
+/* $NetBSD: pthread_int.h,v 1.98 2020/01/13 18:22:56 ad Exp $ */
/*-
* Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -131,7 +131,6 @@
*/
int pt_dummy1 __aligned(128);
struct lwpctl *pt_lwpctl; /* Kernel/user comms area */
- volatile int pt_blocking; /* Blocking in userspace */
volatile int pt_rwlocked; /* Handed rwlock successfully */
volatile int pt_signalled; /* Received pthread_cond_signal() */
volatile int pt_mutexwait; /* Waiting to acquire mutex */
diff -r 49dd9e4f83b0 -r 60ee5ded22cc lib/libpthread/pthread_misc.c
--- a/lib/libpthread/pthread_misc.c Mon Jan 13 17:23:07 2020 +0000
+++ b/lib/libpthread/pthread_misc.c Mon Jan 13 18:22:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_misc.c,v 1.15 2013/03/21 16:49:12 christos Exp $ */
+/* $NetBSD: pthread_misc.c,v 1.16 2020/01/13 18:22:56 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_misc.c,v 1.15 2013/03/21 16:49:12 christos Exp $");
+__RCSID("$NetBSD: pthread_misc.c,v 1.16 2020/01/13 18:22:56 ad Exp $");
#include <errno.h>
#include <string.h>
@@ -151,20 +151,9 @@
int
pthread__sched_yield(void)
{
- pthread_t self;
- int error;
if (__predict_false(__uselibcstub))
return __libc_thr_yield();
- self = pthread__self();
-
- /* Memory barrier for unlocked mutex release. */
- membar_producer();
- self->pt_blocking++;
- error = _sys_sched_yield();
- self->pt_blocking--;
- membar_sync();
-
- return error;
+ return _sys_sched_yield();
}
diff -r 49dd9e4f83b0 -r 60ee5ded22cc lib/libpthread/pthread_mutex.c
--- a/lib/libpthread/pthread_mutex.c Mon Jan 13 17:23:07 2020 +0000
+++ b/lib/libpthread/pthread_mutex.c Mon Jan 13 18:22:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_mutex.c,v 1.65 2019/03/05 22:49:38 christos Exp $ */
+/* $NetBSD: pthread_mutex.c,v 1.66 2020/01/13 18:22:56 ad Exp $ */
/*-
* Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -47,7 +47,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_mutex.c,v 1.65 2019/03/05 22:49:38 christos Exp $");
+__RCSID("$NetBSD: pthread_mutex.c,v 1.66 2020/01/13 18:22:56 ad Exp $");
#include <sys/types.h>
#include <sys/lwpctl.h>
@@ -235,10 +235,7 @@
/*
* Spin while the holder is running. 'lwpctl' gives us the true
- * status of the thread. pt_blocking is set by libpthread in order
- * to cut out system call and kernel spinlock overhead on remote CPUs
- * (could represent many thousands of clock cycles). pt_blocking also
- * makes this thread yield if the target is calling sched_yield().
+ * status of the thread.
*/
NOINLINE static void *
pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
@@ -250,8 +247,7 @@
thread = (pthread_t)MUTEX_OWNER(owner);
if (thread == NULL)
break;
- if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
- thread->pt_blocking)
+ if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
break;
if (count < 128)
count += count;
@@ -262,10 +258,10 @@
return owner;
}
-NOINLINE static void
+NOINLINE static bool
pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
{
- void *new, *owner;
+ void *owner, *next;
/*
* Note that the mutex can become unlocked before we set
@@ -281,34 +277,16 @@
* the value of ptm_owner/pt_mutexwait after we have entered
* the waiters list (the CAS itself must be atomic).
*/
-again:
- membar_consumer();
- owner = ptm->ptm_owner;
-
- if (MUTEX_OWNER(owner) == 0) {
- pthread__mutex_wakeup(self, ptm);
- return;
- }
- if (!MUTEX_HAS_WAITERS(owner)) {
- new = (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT);
- if (atomic_cas_ptr(&ptm->ptm_owner, owner, new) != owner) {
- goto again;
+ for (owner = ptm->ptm_owner;; owner = next) {
+ if (MUTEX_OWNER(owner) == 0) {
+ pthread__mutex_wakeup(self, ptm);
+ return true;
}
- }
-
- /*
- * Note that pthread_mutex_unlock() can do a non-interlocked CAS.
- * We cannot know if the presence of the waiters bit is stable
- * while the holding thread is running. There are many assumptions;
- * see sys/kern/kern_mutex.c for details. In short, we must spin if
- * we see that the holder is running again.
- */
- membar_sync();
- if (MUTEX_OWNER(owner) != (uintptr_t)self)
- pthread__mutex_spin(ptm, owner);
-
- if (membar_consumer(), !MUTEX_HAS_WAITERS(ptm->ptm_owner)) {
- goto again;
+ if (MUTEX_HAS_WAITERS(owner)) {
+ return false;
+ }
+ next = atomic_cas_ptr(&ptm->ptm_owner, owner,
+ (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));
}
}
@@ -386,9 +364,12 @@
if (next == waiters)
break;
}
-
+
/* Set the waiters bit and block. */
- pthread__mutex_setwaiters(self, ptm);
+ membar_sync();
+ if (pthread__mutex_setwaiters(self, ptm)) {
+ continue;
+ }
/*
* We may have been awoken by the current thread above,
@@ -398,15 +379,13 @@
* being set to zero). Otherwise it is unsafe to re-enter
* the thread onto the waiters list.
*/
+ membar_sync();
while (self->pt_mutexwait) {
- self->pt_blocking++;
error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
__UNCONST(ts), self->pt_unpark,
__UNVOLATILE(&ptm->ptm_waiters),
__UNVOLATILE(&ptm->ptm_waiters));
self->pt_unpark = 0;
- self->pt_blocking--;
- membar_sync();
if (__predict_true(error != -1)) {
continue;
}
@@ -471,15 +450,11 @@
if (__predict_false(__uselibcstub))
return __libc_mutex_unlock_stub(ptm);
- /*
- * Note this may be a non-interlocked CAS. See lock_slow()
- * above and sys/kern/kern_mutex.c for details.
- */
#ifndef PTHREAD__ATOMIC_IS_MEMBAR
membar_exit();
#endif
self = pthread__self();
- value = atomic_cas_ptr_ni(&ptm->ptm_owner, self, NULL);
+ value = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);
if (__predict_true(value == self)) {
pthread__smt_wake();
return 0;
@@ -582,12 +557,9 @@
pthread_t thread, next;
ssize_t n, rv;
- /*
- * Take ownership of the current set of waiters. No
- * need for a memory barrier following this, all loads
- * are dependent upon 'thread'.
- */
+ /* Take ownership of the current set of waiters. */
thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
Home |
Main Index |
Thread Index |
Old Index