Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/netinet wqinput: avoid having struct wqinput_worklist di...
details: https://anonhg.NetBSD.org/src/rev/b92ce62d28ba
branches: trunk
changeset: 459632:b92ce62d28ba
user: ozaki-r <ozaki-r%NetBSD.org@localhost>
date: Thu Sep 19 04:09:34 2019 +0000
description:
wqinput: avoid having struct wqinput_worklist directly in a percpu storage
percpu(9) has a certain memory storage for each CPU and provides it by the piece
to users. If the storages went short, percpu(9) enlarges them by allocating new
larger memory areas, replacing old ones with them and destroying the old ones.
A percpu storage referenced by a pointer gotten via percpu_getref can be
destroyed by the mechanism after a running thread sleeps even if percpu_putref
has not been called.
Input handlers of wqinput normally involves sleepable operations so we must
avoid dereferencing a percpu data (struct wqinput_worklist) after executing
an input handler. Address this situation by having just a pointer to the data
in a percpu storage instead.
Reviewed by knakahara@ and yamaguchi@
diffstat:
sys/netinet/wqinput.c | 38 +++++++++++++++++++++++++++++++-------
1 files changed, 31 insertions(+), 7 deletions(-)
diffs (92 lines):
diff -r a4b1b3b07dc0 -r b92ce62d28ba sys/netinet/wqinput.c
--- a/sys/netinet/wqinput.c Thu Sep 19 04:08:29 2019 +0000
+++ b/sys/netinet/wqinput.c Thu Sep 19 04:09:34 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: wqinput.c,v 1.5 2018/08/10 07:20:59 msaitoh Exp $ */
+/* $NetBSD: wqinput.c,v 1.6 2019/09/19 04:09:34 ozaki-r Exp $ */
/*-
* Copyright (c) 2017 Internet Initiative Japan Inc.
@@ -80,7 +80,8 @@
static void
wqinput_drops(void *p, void *arg, struct cpu_info *ci __unused)
{
- struct wqinput_worklist *const wwl = p;
+ struct wqinput_worklist **const wwlp = p;
+ struct wqinput_worklist *const wwl = *wwlp;
uint64_t *sum = arg;
*sum += wwl->wwl_dropped;
@@ -148,6 +149,28 @@
return;
}
+static struct wqinput_worklist *
+wqinput_percpu_getref(percpu_t *pc)
+{
+
+ return *(struct wqinput_worklist **)percpu_getref(pc);
+}
+
+static void
+wqinput_percpu_putref(percpu_t *pc)
+{
+
+ percpu_putref(pc);
+}
+
+static void
+wqinput_percpu_init_cpu(void *p, void *arg __unused, struct cpu_info *ci __unused)
+{
+ struct wqinput_worklist **wwlp = p;
+
+ *wwlp = kmem_zalloc(sizeof(**wwlp), KM_SLEEP);
+}
+
struct wqinput *
wqinput_create(const char *name, void (*func)(struct mbuf *, int, int))
{
@@ -165,7 +188,8 @@
panic("%s: workqueue_create failed (%d)\n", __func__, error);
pool_init(&wqi->wqi_work_pool, sizeof(struct wqinput_work), 0, 0, 0,
name, NULL, IPL_SOFTNET);
- wqi->wqi_worklists = percpu_alloc(sizeof(struct wqinput_worklist));
+ wqi->wqi_worklists = percpu_alloc(sizeof(struct wqinput_worklist *));
+ percpu_foreach(wqi->wqi_worklists, wqinput_percpu_init_cpu, NULL);
wqi->wqi_input = func;
wqinput_sysctl_setup(name, wqi);
@@ -207,7 +231,7 @@
/* Users expect to run at IPL_SOFTNET */
s = splsoftnet();
/* This also prevents LWP migrations between CPUs */
- wwl = percpu_getref(wqi->wqi_worklists);
+ wwl = wqinput_percpu_getref(wqi->wqi_worklists);
/* We can allow enqueuing another work at this point */
wwl->wwl_wq_is_active = false;
@@ -222,7 +246,7 @@
pool_put(&wqi->wqi_work_pool, work);
}
- percpu_putref(wqi->wqi_worklists);
+ wqinput_percpu_putref(wqi->wqi_worklists);
splx(s);
}
@@ -245,7 +269,7 @@
struct wqinput_work *work;
struct wqinput_worklist *wwl;
- wwl = percpu_getref(wqi->wqi_worklists);
+ wwl = wqinput_percpu_getref(wqi->wqi_worklists);
/* Prevent too much work and mbuf from being queued */
if (wwl->wwl_len >= WQINPUT_LIST_MAXLEN) {
@@ -274,5 +298,5 @@
workqueue_enqueue(wqi->wqi_wq, &wwl->wwl_work, NULL);
out:
- percpu_putref(wqi->wqi_worklists);
+ wqinput_percpu_putref(wqi->wqi_worklists);
}
Home |
Main Index |
Thread Index |
Old Index