Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/net Make bpf dynamically loadable.
details: https://anonhg.NetBSD.org/src/rev/b9b4eb7a553f
branches: trunk
changeset: 751138:b9b4eb7a553f
user: pooka <pooka%NetBSD.org@localhost>
date: Mon Jan 25 22:18:17 2010 +0000
description:
Make bpf dynamically loadable.
diffstat:
sys/net/bpf.c | 69 +++++++++++++++++---
sys/net/bpf.h | 5 +-
sys/net/bpf_stub.c | 169 +++++++++++++++++++++++++++++++++++++++++++++-------
3 files changed, 207 insertions(+), 36 deletions(-)
diffs (truncated from 331 to 300 lines):
diff -r ff1e3139c3b4 -r b9b4eb7a553f sys/net/bpf.c
--- a/sys/net/bpf.c Mon Jan 25 20:33:57 2010 +0000
+++ b/sys/net/bpf.c Mon Jan 25 22:18:17 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $ */
+/* $NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $ */
/*
* Copyright (c) 1990, 1991, 1993
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $");
#if defined(_KERNEL_OPT)
#include "opt_bpf.h"
@@ -58,6 +58,8 @@
#include <sys/vnode.h>
#include <sys/queue.h>
#include <sys/stat.h>
+#include <sys/module.h>
+#include <sys/once.h>
#include <sys/file.h>
#include <sys/filedesc.h>
@@ -359,13 +361,8 @@
d->bd_bif = 0;
}
-
-/*
- * bpfilterattach() is called at boot time.
- */
-/* ARGSUSED */
-void
-bpfilterattach(int n)
+static int
+doinit(void)
{
mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
@@ -375,6 +372,20 @@
bpf_gstats.bs_recv = 0;
bpf_gstats.bs_drop = 0;
bpf_gstats.bs_capt = 0;
+
+ return 0;
+}
+
+/*
+ * bpfilterattach() is called at boot time.
+ */
+/* ARGSUSED */
+void
+bpfilterattach(int n)
+{
+ static ONCE_DECL(control);
+
+ RUN_ONCE(&control, doinit);
}
/*
@@ -1910,9 +1921,43 @@
.bpf_mtap_sl_out = bpf_mtap_sl_out,
};
-void
-bpf_setops()
+MODULE(MODULE_CLASS_DRIVER, bpf, NULL);
+
+static int
+bpf_modcmd(modcmd_t cmd, void *arg)
{
+ devmajor_t bmajor, cmajor;
+ int error;
- bpf_ops = &bpf_ops_kernel;
+ bmajor = cmajor = NODEVMAJOR;
+
+ switch (cmd) {
+ case MODULE_CMD_INIT:
+ bpfilterattach(0);
+ error = devsw_attach("bpf", NULL, &bmajor,
+ &bpf_cdevsw, &cmajor);
+ if (error == EEXIST)
+ error = 0; /* maybe built-in ... improve eventually */
+ if (error)
+ break;
+
+ bpf_ops_handover_enter(&bpf_ops_kernel);
+ atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
+ bpf_ops_handover_exit();
+ break;
+
+ case MODULE_CMD_FINI:
+ /*
+ * bpf_ops is not (yet) referenced in the callers before
+ * attach. maybe other issues too. "safety first".
+ */
+ error = EOPNOTSUPP;
+ break;
+
+ default:
+ error = ENOTTY;
+ break;
+ }
+
+ return error;
}
diff -r ff1e3139c3b4 -r b9b4eb7a553f sys/net/bpf.h
--- a/sys/net/bpf.h Mon Jan 25 20:33:57 2010 +0000
+++ b/sys/net/bpf.h Mon Jan 25 22:18:17 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bpf.h,v 1.52 2010/01/19 22:08:00 pooka Exp $ */
+/* $NetBSD: bpf.h,v 1.53 2010/01/25 22:18:17 pooka Exp $ */
/*
* Copyright (c) 1990, 1991, 1993
@@ -276,6 +276,9 @@
extern struct bpf_ops *bpf_ops;
void bpf_setops(void);
+void bpf_ops_handover_enter(struct bpf_ops *);
+void bpf_ops_handover_exit(void);
+
void bpfilterattach(int);
int bpf_validate(struct bpf_insn *, int);
diff -r ff1e3139c3b4 -r b9b4eb7a553f sys/net/bpf_stub.c
--- a/sys/net/bpf_stub.c Mon Jan 25 20:33:57 2010 +0000
+++ b/sys/net/bpf_stub.c Mon Jan 25 22:18:17 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $ */
+/* $NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $ */
/*
* Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -27,18 +27,119 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.3 2010/01/19 23:11:10 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpf_stub.c,v 1.4 2010/01/25 22:18:17 pooka Exp $");
#include <sys/param.h>
+#include <sys/kmem.h>
#include <sys/mbuf.h>
#include <net/bpf.h>
+struct laglist {
+ struct ifnet *lag_ifp;
+ u_int lag_dlt;
+ u_int lag_hlen;
+ struct bpf_if **lag_drvp;
+
+ TAILQ_ENTRY(laglist) lag_entries;
+};
+
+static TAILQ_HEAD(, laglist) lagdrvs = TAILQ_HEAD_INITIALIZER(lagdrvs);
+
+static void bpf_stub_attach(struct ifnet *, u_int, u_int, struct bpf_if **);
+static void bpf_stub_detach(struct ifnet *);
+
+static void bpf_stub_null(void);
+static void bpf_stub_warn(void);
+
+static kmutex_t handovermtx;
+static kcondvar_t handovercv;
+static bool handover;
+
+struct bpf_ops bpf_ops_stub = {
+ .bpf_attach = bpf_stub_attach,
+ .bpf_detach = bpf_stub_detach,
+ .bpf_change_type = (void *)bpf_stub_null,
+
+ .bpf_tap = (void *)bpf_stub_warn,
+ .bpf_mtap = (void *)bpf_stub_warn,
+ .bpf_mtap2 = (void *)bpf_stub_warn,
+ .bpf_mtap_af = (void *)bpf_stub_warn,
+ .bpf_mtap_et = (void *)bpf_stub_warn,
+ .bpf_mtap_sl_in = (void *)bpf_stub_warn,
+ .bpf_mtap_sl_out = (void *)bpf_stub_warn,
+};
+struct bpf_ops *bpf_ops;
+
static void
-bpf_stub_attach(struct ifnet *ipf, u_int dlt, u_int hlen, struct bpf_if **drvp)
+bpf_stub_attach(struct ifnet *ifp, u_int dlt, u_int hlen, struct bpf_if **drvp)
{
+ struct laglist *lag;
+ bool storeattach = true;
- *drvp = NULL;
+ lag = kmem_alloc(sizeof(*lag), KM_SLEEP);
+ lag->lag_ifp = ifp;
+ lag->lag_dlt = dlt;
+ lag->lag_hlen = hlen;
+ lag->lag_drvp = drvp;
+
+ mutex_enter(&handovermtx);
+ /*
+ * If handover is in progress, wait for it to finish and complete
+ * attach after that. Otherwise record ourselves.
+ */
+ while (handover) {
+ storeattach = false;
+ cv_wait(&handovercv, &handovermtx);
+ }
+
+ if (storeattach == false) {
+ mutex_exit(&handovermtx);
+ kmem_free(lag, sizeof(*lag));
+ KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
+ bpf_ops->bpf_attach(ifp, dlt, hlen, drvp);
+ } else {
+ *drvp = NULL;
+ TAILQ_INSERT_TAIL(&lagdrvs, lag, lag_entries);
+ mutex_exit(&handovermtx);
+ }
+}
+
+static void
+bpf_stub_detach(struct ifnet *ifp)
+{
+ TAILQ_HEAD(, laglist) rmlist;
+ struct laglist *lag, *lag_next;
+ bool didhand;
+
+ TAILQ_INIT(&rmlist);
+
+ didhand = false;
+ mutex_enter(&handovermtx);
+ while (handover) {
+ didhand = true;
+ cv_wait(&handovercv, &handovermtx);
+ }
+
+ if (didhand == false) {
+ /* atomically remove all */
+ for (lag = TAILQ_FIRST(&lagdrvs); lag; lag = lag_next) {
+ lag_next = TAILQ_NEXT(lag, lag_entries);
+ if (lag->lag_ifp == ifp) {
+ TAILQ_REMOVE(&lagdrvs, lag, lag_entries);
+ TAILQ_INSERT_HEAD(&rmlist, lag, lag_entries);
+ }
+ }
+ mutex_exit(&handovermtx);
+ while ((lag = TAILQ_FIRST(&rmlist)) != NULL) {
+ TAILQ_REMOVE(&rmlist, lag, lag_entries);
+ kmem_free(lag, sizeof(*lag));
+ }
+ } else {
+ mutex_exit(&handovermtx);
+ KASSERT(bpf_ops != &bpf_ops_stub); /* revisit when unloadable */
+ bpf_ops->bpf_detach(ifp);
+ }
}
static void
@@ -59,27 +160,49 @@
#endif
}
-struct bpf_ops bpf_ops_stub = {
- .bpf_attach = bpf_stub_attach,
- .bpf_detach = (void *)bpf_stub_null,
- .bpf_change_type = (void *)bpf_stub_null,
-
- .bpf_tap = (void *)bpf_stub_warn,
- .bpf_mtap = (void *)bpf_stub_warn,
- .bpf_mtap2 = (void *)bpf_stub_warn,
- .bpf_mtap_af = (void *)bpf_stub_warn,
- .bpf_mtap_et = (void *)bpf_stub_warn,
- .bpf_mtap_sl_in = (void *)bpf_stub_warn,
- .bpf_mtap_sl_out = (void *)bpf_stub_warn,
-};
-
-struct bpf_ops *bpf_ops;
-
-void bpf_setops_stub(void);
void
-bpf_setops_stub()
+bpf_setops()
{
+ mutex_init(&handovermtx, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&handovercv, "bpfops");
bpf_ops = &bpf_ops_stub;
}
-__weak_alias(bpf_setops,bpf_setops_stub);
+
+/*
+ * Party's over, prepare for handover.
+ * It needs to happen *before* bpf_ops is set to make it atomic
+ * to callers (see also stub implementations, which wait if
+ * called during handover). The likelyhood of seeing a full
+ * attach-detach *during* handover comes close to astronomical,
Home |
Main Index |
Thread Index |
Old Index