Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/drm2/linux drm: Spruce up ww_mutex comments...
details: https://anonhg.NetBSD.org/src/rev/aa263a0dfeb6
branches: trunk
changeset: 1028969:aa263a0dfeb6
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sun Dec 19 12:36:24 2021 +0000
description:
drm: Spruce up ww_mutex comments. Audit return values.
diffstat:
sys/external/bsd/drm2/linux/linux_ww_mutex.c | 302 +++++++++++++++++++++++---
1 files changed, 262 insertions(+), 40 deletions(-)
diffs (truncated from 572 to 300 lines):
diff -r e9c4547943ec -r aa263a0dfeb6 sys/external/bsd/drm2/linux/linux_ww_mutex.c
--- a/sys/external/bsd/drm2/linux/linux_ww_mutex.c Sun Dec 19 12:36:15 2021 +0000
+++ b/sys/external/bsd/drm2/linux/linux_ww_mutex.c Sun Dec 19 12:36:24 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $ */
+/* $NetBSD: linux_ww_mutex.c,v 1.10 2021/12/19 12:36:24 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.10 2021/12/19 12:36:24 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -168,6 +168,13 @@
};
#endif
+/*
+ * ww_mutex_init(mutex, class)
+ *
+ * Initialize mutex in the given class. Must precede any other
+ * ww_mutex_* operations. After done, mutex must be destroyed
+ * with ww_mutex_destroy.
+ */
void
ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
{
@@ -187,6 +194,13 @@
#endif
}
+/*
+ * ww_mutex_destroy(mutex)
+ *
+ * Destroy mutex initialized by ww_mutex_init. Caller must not be
+ * with any other ww_mutex_* operations except after
+ * reinitializing with ww_mutex_init.
+ */
void
ww_mutex_destroy(struct ww_mutex *mutex)
{
@@ -205,9 +219,15 @@
}
/*
- * XXX WARNING: This returns true if it is locked by ANYONE. Does not
- * mean `Do I hold this lock?' (answering which really requires an
- * acquire context).
+ * ww_mutex_is_locked(mutex)
+ *
+ * True if anyone holds mutex locked at the moment, false if not.
+ * Answer is stale as soon returned unless mutex is held by
+ * caller.
+ *
+ * XXX WARNING: This returns true if it is locked by ANYONE. Does
+ * not mean `Do I hold this lock?' (answering which really
+ * requires an acquire context).
*/
bool
ww_mutex_is_locked(struct ww_mutex *mutex)
@@ -233,33 +253,78 @@
return locked;
}
+/*
+ * ww_mutex_state_wait(mutex, state)
+ *
+ * Wait for mutex, which must be in the given state, to transition
+ * to another state. Uninterruptible; never fails.
+ *
+ * Caller must hold mutex's internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static void
ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
{
+ KASSERT(mutex_owned(&mutex->wwm_lock));
KASSERT(mutex->wwm_state == state);
do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
while (mutex->wwm_state == state);
}
+/*
+ * ww_mutex_state_wait_sig(mutex, state)
+ *
+ * Wait for mutex, which must be in the given state, to transition
+ * to another state, or fail if interrupted by a signal. Return 0
+ * on success, -EINTR if interrupted by a signal.
+ *
+ * Caller must hold mutex's internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static int
ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
{
int ret;
+ KASSERT(mutex_owned(&mutex->wwm_lock));
KASSERT(mutex->wwm_state == state);
do {
/* XXX errno NetBSD->Linux */
ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- if (ret)
+ if (ret) {
+ KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+ "ret=%d", ret);
+ ret = -EINTR;
break;
+ }
} while (mutex->wwm_state == state);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock_wait(mutex, ctx)
+ *
+ * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ * by another thread with an acquire context, wait to acquire
+ * mutex. While waiting, record ctx in the tree of waiters. Does
+ * not update the mutex state otherwise.
+ *
+ * Caller must not already hold mutex. Caller must hold mutex's
+ * internal lock. Uninterruptible; never fails.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static void
ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -291,6 +356,22 @@
rb_tree_remove_node(&mutex->wwm_waiters, ctx);
}
+/*
+ * ww_mutex_lock_wait_sig(mutex, ctx)
+ *
+ * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
+ * by another thread with an acquire context, wait to acquire
+ * mutex and return 0, or return -EINTR if interrupted by a
+ * signal. While waiting, record ctx in the tree of waiters.
+ * Does not update the mutex state otherwise.
+ *
+ * Caller must not already hold mutex. Caller must hold mutex's
+ * internal lock.
+ *
+ * May sleep.
+ *
+ * Internal subroutine.
+ */
static int
ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
@@ -318,18 +399,31 @@
do {
/* XXX errno NetBSD->Linux */
ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
- if (ret == -ERESTART)
- ret = -ERESTARTSYS;
- if (ret)
+ if (ret) {
+ KASSERTMSG((ret == -EINTR || ret == -ERESTART),
+ "ret=%d", ret);
+ ret = -EINTR;
goto out;
+ }
} while (!(((mutex->wwm_state == WW_CTX) ||
(mutex->wwm_state == WW_WANTOWN)) &&
(mutex->wwm_u.ctx == ctx)));
out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock_noctx(mutex)
+ *
+ * Acquire mutex without an acquire context. Caller must not
+ * already hold the mutex. Uninterruptible; never fails.
+ *
+ * May sleep.
+ *
+ * Internal subroutine, implementing ww_mutex_lock(..., NULL).
+ */
static void
ww_mutex_lock_noctx(struct ww_mutex *mutex)
{
@@ -364,6 +458,18 @@
mutex_exit(&mutex->wwm_lock);
}
+/*
+ * ww_mutex_lock_noctx_sig(mutex)
+ *
+ * Acquire mutex without an acquire context and return 0, or fail
+ * and return -EINTR if interrupted by a signal. Caller must not
+ * already hold the mutex.
+ *
+ * May sleep.
+ *
+ * Internal subroutine, implementing
+ * ww_mutex_lock_interruptible(..., NULL).
+ */
static int
ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
{
@@ -379,8 +485,10 @@
KASSERTMSG((mutex->wwm_u.owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
- if (ret)
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
goto out;
+ }
goto retry;
case WW_CTX:
KASSERT(mutex->wwm_u.ctx != NULL);
@@ -390,8 +498,10 @@
KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
"locking %p against myself: %p", mutex, curlwp);
ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
- if (ret)
+ if (ret) {
+ KASSERTMSG(ret == -EINTR, "ret=%d", ret);
goto out;
+ }
goto retry;
default:
panic("wait/wound mutex %p in bad state: %d",
@@ -402,12 +512,29 @@
WW_LOCKED(mutex);
ret = 0;
out: mutex_exit(&mutex->wwm_lock);
+ KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
return ret;
}
+/*
+ * ww_mutex_lock(mutex, ctx)
+ *
+ * Lock the mutex and return 0, or fail if impossible.
+ *
+ * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
+ * always succeeds and returns 0.
+ *
+ * - If ctx is nonnull, then:
+ * . Fail with -EALREADY if caller already holds mutex.
+ * . Fail with -EDEADLK if someone else holds mutex but there is
+ * a cycle.
+ *
+ * May sleep.
+ */
int
ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
{
+ int ret;
/*
* We do not WW_WANTLOCK at the beginning because we may
@@ -419,7 +546,8 @@
if (ctx == NULL) {
WW_WANTLOCK(mutex);
ww_mutex_lock_noctx(mutex);
- return 0;
+ ret = 0;
+ goto out;
}
KASSERTMSG((ctx->wwx_owner == curlwp),
@@ -466,8 +594,8 @@
* for objects whose locking order is determined by
* userland.
*/
- mutex_exit(&mutex->wwm_lock);
- return -EALREADY;
+ ret = -EALREADY;
+ goto out_unlock;
}
/*
@@ -484,8 +612,8 @@
Home |
Main Index |
Thread Index |
Old Index