Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/common/linux Rework linux_tasklet.c a little.
details: https://anonhg.NetBSD.org/src/rev/fa06ebaaa5d6
branches: trunk
changeset: 1028438:fa06ebaaa5d6
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sun Dec 19 11:03:17 2021 +0000
description:
Rework linux_tasklet.c a little.
Provide some more hacks for i915's grubby paws.
diffstat:
sys/external/bsd/common/include/linux/tasklet.h | 31 ++-
sys/external/bsd/common/linux/linux_tasklet.c | 247 ++++++++++++++++-------
2 files changed, 189 insertions(+), 89 deletions(-)
diffs (truncated from 464 to 300 lines):
diff -r e90e8610ea30 -r fa06ebaaa5d6 sys/external/bsd/common/include/linux/tasklet.h
--- a/sys/external/bsd/common/include/linux/tasklet.h Sun Dec 19 11:03:09 2021 +0000
+++ b/sys/external/bsd/common/include/linux/tasklet.h Sun Dec 19 11:03:17 2021 +0000
@@ -1,7 +1,7 @@
-/* $NetBSD: tasklet.h,v 1.4 2021/12/19 01:17:46 riastradh Exp $ */
+/* $NetBSD: tasklet.h,v 1.5 2021/12/19 11:03:17 riastradh Exp $ */
/*-
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -33,16 +33,22 @@
#define _LINUX_TASKLET_H_
/* namespace */
+#define __tasklet_disable_sync_once linux___tasklet_disable_sync_once
+#define __tasklet_enable linux___tasklet_enable
+#define __tasklet_enable_sync_once linux___tasklet_enable_sync_once
+#define __tasklet_is_enabled linux___tasklet_is_enabled
+#define __tasklet_is_scheduled linux___tasklet_is_scheduled
#define tasklet_disable linux_tasklet_disable
-#define tasklet_disable_sync_once linux_tasklet_disable_sync_once
#define tasklet_enable linux_tasklet_enable
-#define tasklet_enable_sync_once linux_tasklet_enable_sync_once
#define tasklet_hi_schedule linux_tasklet_hi_schedule
#define tasklet_init linux_tasklet_init
-#define tasklet_is_enabled linux_tasklet_is_enabled
+#define tasklet_is_locked linux_tasklet_is_locked
#define tasklet_kill linux_tasklet_kill
#define tasklet_schedule linux_tasklet_schedule
#define tasklet_struct linux_tasklet_struct
+#define tasklet_trylock linux_tasklet_trylock
+#define tasklet_unlock linux_tasklet_unlock
+#define tasklet_unlock_wait linux_tasklet_unlock_wait
struct tasklet_struct {
SIMPLEQ_ENTRY(tasklet_struct) tl_entry;
@@ -81,9 +87,16 @@
void tasklet_hi_schedule(struct tasklet_struct *);
void tasklet_kill(struct tasklet_struct *);
-/* i915drmkms hack */
-void tasklet_disable_sync_once(struct tasklet_struct *);
-void tasklet_enable_sync_once(struct tasklet_struct *);
-bool tasklet_is_enabled(const struct tasklet_struct *);
+bool tasklet_is_locked(const struct tasklet_struct *);
+bool tasklet_trylock(struct tasklet_struct *);
+void tasklet_unlock(struct tasklet_struct *);
+void tasklet_unlock_wait(const struct tasklet_struct *);
+
+/* i915 hacks */
+void __tasklet_disable_sync_once(struct tasklet_struct *);
+void __tasklet_enable_sync_once(struct tasklet_struct *);
+bool __tasklet_is_enabled(const struct tasklet_struct *);
+bool __tasklet_is_scheduled(const struct tasklet_struct *);
+bool __tasklet_enable(struct tasklet_struct *);
#endif /* _LINUX_TASKLET_H_ */
diff -r e90e8610ea30 -r fa06ebaaa5d6 sys/external/bsd/common/linux/linux_tasklet.c
--- a/sys/external/bsd/common/linux/linux_tasklet.c Sun Dec 19 11:03:09 2021 +0000
+++ b/sys/external/bsd/common/linux/linux_tasklet.c Sun Dec 19 11:03:17 2021 +0000
@@ -1,7 +1,7 @@
-/* $NetBSD: linux_tasklet.c,v 1.4 2021/12/19 01:46:01 riastradh Exp $ */
+/* $NetBSD: linux_tasklet.c,v 1.5 2021/12/19 11:03:18 riastradh Exp $ */
/*-
- * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * Copyright (c) 2018, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.4 2021/12/19 01:46:01 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.5 2021/12/19 11:03:18 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -195,27 +195,20 @@
/* Go through the queue of tasklets we grabbed. */
while (!SIMPLEQ_EMPTY(&th)) {
struct tasklet_struct *tasklet;
- unsigned state;
/* Remove the first tasklet from the queue. */
tasklet = SIMPLEQ_FIRST(&th);
SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
+ KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
+ TASKLET_SCHEDULED);
+
/*
* Test and set RUNNING, in case it is already running
* on another CPU and got scheduled again on this one
* before it completed.
*/
- do {
- state = tasklet->tl_state;
- /* It had better be scheduled. */
- KASSERT(state & TASKLET_SCHEDULED);
- if (state & TASKLET_RUNNING)
- break;
- } while (atomic_cas_uint(&tasklet->tl_state, state,
- state | TASKLET_RUNNING) != state);
-
- if (state & TASKLET_RUNNING) {
+ if (!tasklet_trylock(tasklet)) {
/*
* Put it back on the queue to run it again in
* a sort of busy-wait, and move on to the next
@@ -225,35 +218,30 @@
continue;
}
- /* Wait for last runner's side effects. */
- membar_enter();
-
- /* Check whether it's currently disabled. */
- if (tasklet->tl_disablecount) {
+ /*
+ * Check whether it's currently disabled.
+ *
+ * Pairs with membar_exit in __tasklet_enable.
+ */
+ if (atomic_load_acquire(&tasklet->tl_disablecount)) {
/*
* Disabled: clear the RUNNING bit and, requeue
* it, but keep it SCHEDULED.
*/
- KASSERT(tasklet->tl_state & TASKLET_RUNNING);
- atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
+ tasklet_unlock(tasklet);
tasklet_queue_enqueue(tq, tasklet);
continue;
}
/* Not disabled. Clear SCHEDULED and call func. */
- KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
+ KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
+ TASKLET_SCHEDULED);
atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
(*tasklet->func)(tasklet->data);
- /*
- * Guarantee all caller-relevant reads or writes in
- * func have completed before clearing RUNNING bit.
- */
- membar_exit();
-
/* Clear RUNNING to notify tasklet_disable. */
- atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
+ tasklet_unlock(tasklet);
}
}
@@ -271,7 +259,7 @@
/* Test and set the SCHEDULED bit. If already set, we're done. */
do {
- ostate = tasklet->tl_state;
+ ostate = atomic_load_relaxed(&tasklet->tl_state);
if (ostate & TASKLET_SCHEDULED)
return;
nstate = ostate | TASKLET_SCHEDULED;
@@ -297,7 +285,7 @@
struct tasklet_cpu *tc;
int s;
- KASSERT(tasklet->tl_state & TASKLET_SCHEDULED);
+ KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
/*
* Insert on the current CPU's queue while all interrupts are
@@ -326,8 +314,8 @@
unsigned long data)
{
- tasklet->tl_state = 0;
- tasklet->tl_disablecount = 0;
+ atomic_store_relaxed(&tasklet->tl_state, 0);
+ atomic_store_relaxed(&tasklet->tl_disablecount, 0);
tasklet->func = func;
tasklet->data = data;
}
@@ -372,6 +360,8 @@
* If tasklet is guaranteed not to be scheduled, e.g. if you have
* just invoked tasklet_kill, then tasklet_disable serves to wait
* for it to complete in case it might already be running.
+ *
+ * Load-acquire semantics.
*/
void
tasklet_disable(struct tasklet_struct *tasklet)
@@ -384,21 +374,7 @@
KASSERT(disablecount != 0);
/* Wait for it to finish running, if it was running. */
- while (tasklet->tl_state & TASKLET_RUNNING)
- SPINLOCK_BACKOFF_HOOK;
-
- /*
- * Guarantee any side effects of running are visible to us
- * before we return.
- *
- * XXX membar_sync is overkill here. It is tempting to issue
- * membar_enter, but it only orders stores | loads, stores;
- * what we really want here is load_acquire(&tasklet->tl_state)
- * above, i.e. to witness all side effects preceding the store
- * whose value we loaded. Absent that, membar_sync is the best
- * we can do.
- */
- membar_sync();
+ tasklet_unlock_wait(tasklet);
}
/*
@@ -406,22 +382,14 @@
*
* Decrement tasklet's disable count. If it was previously
* scheduled to run, it may now run.
+ *
+ * Store-release semantics.
*/
void
tasklet_enable(struct tasklet_struct *tasklet)
{
- unsigned int disablecount __diagused;
- /*
- * Guarantee all caller-relevant reads or writes have completed
- * before potentially allowing tasklet to run again by
- * decrementing the disable count.
- */
- membar_exit();
-
- /* Decrement the disable count. */
- disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
- KASSERT(disablecount != UINT_MAX);
+ (void)__tasklet_enable(tasklet);
}
/*
@@ -439,7 +407,7 @@
"deadlock: soft interrupts are blocked in interrupt context");
/* Wait for it to be removed from the queue. */
- while (tasklet->tl_state & TASKLET_SCHEDULED)
+ while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
SPINLOCK_BACKOFF_HOOK;
/*
@@ -459,19 +427,94 @@
*/
/* Wait for it to finish running. */
- while (tasklet->tl_state & TASKLET_RUNNING)
- SPINLOCK_BACKOFF_HOOK;
+ tasklet_unlock_wait(tasklet);
+}
+
+/*
+ * tasklet_is_scheduled(tasklet)
+ *
+ * True if tasklet is currently locked. Caller must use it only
+ * for positive assertions.
+ */
+bool
+tasklet_is_locked(const struct tasklet_struct *tasklet)
+{
+
+ return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
+}
- /*
- * Wait for any side effects running. Again, membar_sync is
- * overkill; we really want load_acquire(&tasklet->tl_state)
- * here.
- */
- membar_sync();
+/*
+ * tasklet_trylock(tasklet)
+ *
+ * Try to lock tasklet, i.e., set TASKLET_RUNNING. Return true if
+ * we locked it, false if already locked.
+ *
+ * Load-acquire semantics.
+ */
+bool
+tasklet_trylock(struct tasklet_struct *tasklet)
+{
+ unsigned state;
+
+ do {
+ /* Pairs with membar_exit in tasklet_unlock. */
+ state = atomic_load_acquire(&tasklet->tl_state);
Home |
Main Index |
Thread Index |
Old Index