Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/external/bsd/common/linux Document workqueue semantics a...
details: https://anonhg.NetBSD.org/src/rev/45891f306829
branches: trunk
changeset: 366366:45891f306829
user: riastradh <riastradh%NetBSD.org@localhost>
date: Mon Aug 27 15:05:16 2018 +0000
description:
Document workqueue semantics as I understand it.
diffstat:
sys/external/bsd/common/linux/linux_work.c | 199 ++++++++++++++++++++++++++++-
1 files changed, 195 insertions(+), 4 deletions(-)
diffs (truncated from 388 to 300 lines):
diff -r 3a57d5a5f130 -r 45891f306829 sys/external/bsd/common/linux/linux_work.c
--- a/sys/external/bsd/common/linux/linux_work.c Mon Aug 27 15:05:01 2018 +0000
+++ b/sys/external/bsd/common/linux/linux_work.c Mon Aug 27 15:05:16 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_work.c,v 1.35 2018/08/27 15:05:01 riastradh Exp $ */
+/* $NetBSD: linux_work.c,v 1.36 2018/08/27 15:05:16 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.35 2018/08/27 15:05:01 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.36 2018/08/27 15:05:16 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -80,6 +80,12 @@
struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_power_efficient_wq __read_mostly;
+/*
+ * linux_workqueue_init()
+ *
+ * Initialize the Linux workqueue subsystem. Return 0 on success,
+ * NetBSD error on failure.
+ */
int
linux_workqueue_init(void)
{
@@ -118,6 +124,11 @@
return error;
}
+/*
+ * linux_workqueue_fini()
+ *
+ * Destroy the Linux workqueue subsystem. Never fails.
+ */
void
linux_workqueue_fini(void)
{
@@ -132,6 +143,13 @@
* Workqueues
*/
+/*
+ * alloc_ordered_workqueue(name, flags)
+ *
+ * Create a workqueue of the given name. No flags are currently
+ * defined. Return NULL on failure, pointer to struct
+ * workqueue_struct object on success.
+ */
struct workqueue_struct *
alloc_ordered_workqueue(const char *name, int flags)
{
@@ -169,6 +187,14 @@
return NULL;
}
+/*
+ * destroy_workqueue(wq)
+ *
+ * Destroy a workqueue created with wq. Cancel any pending
+ * delayed work. Wait for all queued work to complete.
+ *
+ * May sleep.
+ */
void
destroy_workqueue(struct workqueue_struct *wq)
{
@@ -237,6 +263,14 @@
* Work thread and callout
*/
+/*
+ * linux_workqueue_thread(cookie)
+ *
+ * Main function for a workqueue's worker thread. Waits until
+ * there is work queued, grabs a batch of work off the queue,
+ * executes it all, bumps the generation number, and repeats,
+ * until dying.
+ */
static void __dead
linux_workqueue_thread(void *cookie)
{
@@ -295,6 +329,16 @@
kthread_exit(0);
}
+/*
+ * linux_workqueue_timeout(cookie)
+ *
+ * Delayed work timeout callback.
+ *
+ * - If scheduled, queue it.
+ * - If rescheduled, callout_schedule ourselves again.
+ * - If cancelled, destroy the callout and release the work from
+ * the workqueue.
+ */
static void
linux_workqueue_timeout(void *cookie)
{
@@ -331,6 +375,12 @@
out: mutex_exit(&wq->wq_lock);
}
+/*
+ * current_work()
+ *
+ * If in a workqueue worker thread, return the work it is
+ * currently executing. Otherwise return NULL.
+ */
struct work_struct *
current_work(void)
{
@@ -352,6 +402,12 @@
* Work
*/
+/*
+ * INIT_WORK(work, fn)
+ *
+ * Initialize work for use with a workqueue to call fn in a worker
+ * thread. There is no corresponding destruction operation.
+ */
void
INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
{
@@ -360,6 +416,16 @@
work->func = fn;
}
+/*
+ * acquire_work(work, wq)
+ *
+ * Try to associate work with wq. If work is already on a
+ * workqueue, return that workqueue. Otherwise, set work's queue
+ * to wq, issue a memory barrier to match any prior release_work,
+ * and return NULL.
+ *
+ * Caller must hold wq's lock.
+ */
static struct workqueue_struct *
acquire_work(struct work_struct *work, struct workqueue_struct *wq)
{
@@ -376,6 +442,14 @@
return wq0;
}
+/*
+ * release_work(work, wq)
+ *
+ * Issue a memory barrier to match any subsequent acquire_work and
+ * dissociate work from wq.
+ *
+ * Caller must hold wq's lock and work must be associated with wq.
+ */
static void
release_work(struct work_struct *work, struct workqueue_struct *wq)
{
@@ -387,6 +461,17 @@
work->work_queue = NULL;
}
+/*
+ * schedule_work(work)
+ *
+ * If work is not already queued on system_wq, queue it to be run
+ * by system_wq's worker thread when it next can. True if it was
+ * newly queued, false if it was already queued. If the work was
+ * already running, queue it to run again.
+ *
+ * Caller must ensure work is not queued to run on a different
+ * workqueue.
+ */
bool
schedule_work(struct work_struct *work)
{
@@ -394,6 +479,17 @@
return queue_work(system_wq, work);
}
+/*
+ * queue_work(wq, work)
+ *
+ * If work is not already queued on wq, queue it to be run by wq's
+ * worker thread when it next can. True if it was newly queued,
+ * false if it was already queued. If the work was already
+ * running, queue it to run again.
+ *
+ * Caller must ensure work is not queued to run on a different
+ * workqueue.
+ */
bool
queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
@@ -443,6 +539,15 @@
return newly_queued;
}
+/*
+ * cancel_work(work)
+ *
+ * If work was queued, remove it from the queue and return true.
+ * If work was not queued, return false. Note that work may
+ * already be running; if it hasn't been requeued, then
+ * cancel_work will return false, and either way, cancel_work will
+ * NOT wait for the work to complete.
+ */
bool
cancel_work(struct work_struct *work)
{
@@ -480,6 +585,17 @@
out: return cancelled_p;
}
+/*
+ * cancel_work_sync(work)
+ *
+ * If work was queued, remove it from the queue and return true.
+ * If work was not queued, return false. Note that work may
+ * already be running; if it hasn't been requeued, then
+ * cancel_work will return false; either way, if work was
+ * currently running, wait for it to complete.
+ *
+ * May sleep.
+ */
bool
cancel_work_sync(struct work_struct *work)
{
@@ -543,6 +659,13 @@
* Delayed work
*/
+/*
+ * INIT_DELAYED_WORK(dw, fn)
+ *
+ * Initialize dw for use with a workqueue to call fn in a worker
+ * thread after a delay. There is no corresponding destruction
+ * operation.
+ */
void
INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
{
@@ -559,6 +682,17 @@
*/
}
+/*
+ * schedule_delayed_work(dw, ticks)
+ *
+ * If it is not currently scheduled, schedule dw to run after
+ * ticks on system_wq. If currently executing and not already
+ * rescheduled, reschedule it. True if it was newly scheduled,
+ * false if it was already scheduled.
+ *
+ * If ticks == 0, queue it to run as soon as the worker can,
+ * without waiting for the next callout tick to run.
+ */
bool
schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
{
@@ -631,8 +765,11 @@
* queue_delayed_work(wq, dw, ticks)
*
* If it is not currently scheduled, schedule dw to run after
- * ticks. If currently executing and not already rescheduled,
- * reschedule it. If ticks == 0, run without delay.
+ * ticks on wq. If currently executing and not already
+ * rescheduled, reschedule it.
+ *
+ * If ticks == 0, queue it to run as soon as the worker can,
+ * without waiting for the next callout tick to run.
*/
bool
queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
@@ -756,6 +893,9 @@
* Schedule dw to run after ticks. If currently scheduled,
* reschedule it. If currently executing, reschedule it. If
* ticks == 0, run without delay.
+ *
+ * True if it modified the timer of an already scheduled work,
+ * false if it newly scheduled the work.
*/
bool
mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
@@ -950,6 +1090,16 @@
return timer_modified;
}
+/*
+ * cancel_delayed_work(dw)
+ *
+ * If work was scheduled or queued, remove it from the schedule or
+ * queue and return true. If work was not scheduled or queued,
+ * return false. Note that work may already be running; if it
+ * hasn't been rescheduled or requeued, then cancel_delayed_work
+ * will return false, and either way, cancel_delayed_work will NOT
Home |
Main Index |
Thread Index |
Old Index