Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/common/linux linux/workqueue: Draft queue_r...
details: https://anonhg.NetBSD.org/src/rev/9b06ac60eafe
branches: trunk
changeset: 1028679:9b06ac60eafe
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sun Dec 19 11:40:13 2021 +0000
description:
linux/workqueue: Draft queue_rcu_work.
diffstat:
sys/external/bsd/common/include/linux/workqueue.h | 13 +++-
sys/external/bsd/common/linux/linux_work.c | 66 ++++++++++++++++++++++-
2 files changed, 74 insertions(+), 5 deletions(-)
diffs (177 lines):
diff -r 7438032e63fe -r 9b06ac60eafe sys/external/bsd/common/include/linux/workqueue.h
--- a/sys/external/bsd/common/include/linux/workqueue.h Sun Dec 19 11:40:05 2021 +0000
+++ b/sys/external/bsd/common/include/linux/workqueue.h Sun Dec 19 11:40:13 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: workqueue.h,v 1.24 2021/12/19 11:38:03 riastradh Exp $ */
+/* $NetBSD: workqueue.h,v 1.25 2021/12/19 11:40:13 riastradh Exp $ */
/*-
* Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
@@ -39,9 +39,10 @@
#include <linux/stringify.h>
#define INIT_DELAYED_WORK linux_INIT_DELAYED_WORK
+#define INIT_RCU_WORK linux_INIT_RCU_WORK
#define INIT_WORK linux_INIT_WORK
+#define alloc_ordered_workqueue linux_alloc_ordered_workqueue
#define alloc_workqueue linux_alloc_workqueue
-#define alloc_ordered_workqueue linux_alloc_ordered_workqueue
#define cancel_delayed_work linux_cancel_delayed_work
#define cancel_delayed_work_sync linux_cancel_delayed_work_sync
#define cancel_work linux_cancel_work
@@ -54,8 +55,9 @@
#define flush_scheduled_work linux_flush_scheduled_work
#define flush_work linux_flush_work
#define flush_workqueue linux_flush_workqueue
+#define mod_delayed_work linux_mod_delayed_work
#define queue_delayed_work linux_queue_delayed_work
-#define mod_delayed_work linux_mod_delayed_work
+#define queue_rcu_work linux_queue_rcu_work
#define queue_work linux_queue_work
#define schedule_delayed_work linux_schedule_delayed_work
#define schedule_work linux_schedule_work
@@ -89,6 +91,8 @@
};
struct rcu_work {
+ struct work_struct work; /* Linux API name */
+ struct rcu_head rw_rcu;
};
#define WQ_FREEZABLE __BIT(0)
@@ -145,6 +149,9 @@
bool flush_delayed_work(struct delayed_work *);
bool delayed_work_pending(const struct delayed_work *);
+void INIT_RCU_WORK(struct rcu_work *, void (*fn)(struct work_struct *));
+void queue_rcu_work(struct workqueue_struct *, struct rcu_work *);
+
struct work_struct *
current_work(void);
diff -r 7438032e63fe -r 9b06ac60eafe sys/external/bsd/common/linux/linux_work.c
--- a/sys/external/bsd/common/linux/linux_work.c Sun Dec 19 11:40:05 2021 +0000
+++ b/sys/external/bsd/common/linux/linux_work.c Sun Dec 19 11:40:13 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_work.c,v 1.54 2021/12/19 11:40:05 riastradh Exp $ */
+/* $NetBSD: linux_work.c,v 1.55 2021/12/19 11:40:14 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.54 2021/12/19 11:40:05 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.55 2021/12/19 11:40:14 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -56,6 +56,7 @@
kmutex_t wq_lock;
kcondvar_t wq_cv;
struct dwork_head wq_delayed; /* delayed work scheduled */
+ struct work_head wq_rcu; /* RCU work scheduled */
struct work_head wq_queue; /* work to run */
struct work_head wq_dqueue; /* delayed work to run now */
struct work_struct *wq_current_work;
@@ -91,6 +92,8 @@
"struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
SDT_PROBE_DEFINE2(sdt, linux, work, queue,
"struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
+SDT_PROBE_DEFINE2(sdt, linux, work, rcu,
+ "struct rcu_work *"/*work*/, "struct workqueue_struct *"/*wq*/);
SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
"struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
@@ -260,6 +263,7 @@
mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
cv_init(&wq->wq_cv, name);
TAILQ_INIT(&wq->wq_delayed);
+ TAILQ_INIT(&wq->wq_rcu);
TAILQ_INIT(&wq->wq_queue);
TAILQ_INIT(&wq->wq_dqueue);
wq->wq_current_work = NULL;
@@ -279,6 +283,7 @@
fail0: KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
KASSERT(TAILQ_EMPTY(&wq->wq_queue));
+ KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
cv_destroy(&wq->wq_cv);
mutex_destroy(&wq->wq_lock);
@@ -346,6 +351,12 @@
}
mutex_exit(&wq->wq_lock);
+ /* Wait for all scheduled RCU work to complete. */
+ mutex_enter(&wq->wq_lock);
+ while (!TAILQ_EMPTY(&wq->wq_rcu))
+ cv_wait(&wq->wq_cv, &wq->wq_lock);
+ mutex_exit(&wq->wq_lock);
+
/*
* At this point, no new work can be put on the queue.
*/
@@ -364,6 +375,7 @@
KASSERT(wq->wq_current_work == NULL);
KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
KASSERT(TAILQ_EMPTY(&wq->wq_queue));
+ KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
cv_destroy(&wq->wq_cv);
mutex_destroy(&wq->wq_lock);
@@ -1613,3 +1625,53 @@
return work_pending(&dw->work);
}
+
+/*
+ * INIT_RCU_WORK(rw, fn)
+ *
+ * Initialize rw for use with a workqueue to call fn in a worker
+ * thread after an RCU grace period. There is no corresponding
+ * destruction operation.
+ */
+void
+INIT_RCU_WORK(struct rcu_work *rw, void (*fn)(struct work_struct *))
+{
+
+ INIT_WORK(&rw->work, fn);
+}
+
+static void
+queue_rcu_work_cb(struct rcu_head *r)
+{
+ struct rcu_work *rw = container_of(r, struct rcu_work, rw_rcu);
+ struct workqueue_struct *wq = work_queue(&rw->work);
+
+ mutex_enter(&wq->wq_lock);
+ KASSERT(work_pending(&rw->work));
+ KASSERT(work_queue(&rw->work) == wq);
+ destroy_rcu_head(&rw->rw_rcu);
+ TAILQ_REMOVE(&wq->wq_rcu, &rw->work, work_entry);
+ TAILQ_INSERT_TAIL(&wq->wq_queue, &rw->work, work_entry);
+ cv_broadcast(&wq->wq_cv);
+ SDT_PROBE2(sdt, linux, work, queue, &rw->work, wq);
+ mutex_exit(&wq->wq_lock);
+}
+
+/*
+ * queue_rcu_work(wq, rw)
+ *
+ * Schedule rw to run on wq after an RCU grace period.
+ */
+void
+queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rw)
+{
+
+ mutex_enter(&wq->wq_lock);
+ if (acquire_work(&rw->work, wq)) {
+ init_rcu_head(&rw->rw_rcu);
+ SDT_PROBE2(sdt, linux, work, rcu, rw, wq);
+ TAILQ_INSERT_TAIL(&wq->wq_rcu, &rw->work, work_entry);
+ call_rcu(&rw->rw_rcu, &queue_rcu_work_cb);
+ }
+ mutex_exit(&wq->wq_lock);
+}
Home |
Main Index |
Thread Index |
Old Index