Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/pci vmx(4) can select workqueue for packet proc...
details: https://anonhg.NetBSD.org/src/rev/e0f2ffa254b9
branches: trunk
changeset: 452991:e0f2ffa254b9
user: knakahara <knakahara%NetBSD.org@localhost>
date: Tue Jul 30 11:16:15 2019 +0000
description:
vmx(4) can select workqueue for packet processing like ixg(4).
diffstat:
sys/arch/x86/pci/if_vmx.c | 60 +++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 53 insertions(+), 7 deletions(-)
diffs (177 lines):
diff -r 29fc002a2ae4 -r e0f2ffa254b9 sys/arch/x86/pci/if_vmx.c
--- a/sys/arch/x86/pci/if_vmx.c Tue Jul 30 11:11:15 2019 +0000
+++ b/sys/arch/x86/pci/if_vmx.c Tue Jul 30 11:16:15 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vmx.c,v 1.44 2019/07/29 10:28:57 knakahara Exp $ */
+/* $NetBSD: if_vmx.c,v 1.45 2019/07/30 11:16:15 knakahara Exp $ */
/* $OpenBSD: if_vmx.c,v 1.16 2014/01/22 06:04:17 brad Exp $ */
/*
@@ -19,7 +19,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.44 2019/07/29 10:28:57 knakahara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vmx.c,v 1.45 2019/07/30 11:16:15 knakahara Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@@ -30,6 +30,7 @@
#include <sys/mbuf.h>
#include <sys/sockio.h>
#include <sys/pcq.h>
+#include <sys/workqueue.h>
#include <net/bpf.h>
#include <net/if.h>
@@ -96,6 +97,8 @@
#define VMXNET3_RX_PROCESS_LIMIT 256
#define VMXNET3_TX_PROCESS_LIMIT 256
+#define VMXNET3_WORKQUEUE_PRI PRI_SOFTNET
+
/*
* IP protocols that we can perform Tx checksum offloading of.
*/
@@ -225,6 +228,8 @@
struct vmxnet3_rxqueue vxq_rxqueue;
void *vxq_si;
+ bool vxq_workqueue;
+ struct work vxq_wq_cookie;
};
struct vmxnet3_statistics {
@@ -291,8 +296,10 @@
u_int vmx_tx_intr_process_limit;
u_int vmx_rx_process_limit;
u_int vmx_tx_process_limit;
-
struct sysctllog *vmx_sysctllog;
+
+ bool vmx_txrx_workqueue;
+ struct workqueue *vmx_queue_wq;
};
#define VMXNET3_STAT
@@ -378,6 +385,7 @@
int vmxnet3_legacy_intr(void *);
int vmxnet3_txrxq_intr(void *);
void vmxnet3_handle_queue(void *);
+void vmxnet3_handle_queue_work(struct work *, void *);
int vmxnet3_event_intr(void *);
void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
@@ -849,6 +857,7 @@
pci_chipset_tag_t pc = sc->vmx_pc;
int i;
+ workqueue_destroy(sc->vmx_queue_wq);
for (i = 0; i < sc->vmx_nintrs; i++) {
struct vmxnet3_queue *vmxq = &sc->vmx_queue[i];
@@ -866,7 +875,7 @@
struct vmxnet3_queue *vmxq;
pci_intr_handle_t *intr;
void **ihs;
- int intr_idx, i, use_queues;
+ int intr_idx, i, use_queues, error;
const char *intrstr;
char intrbuf[PCI_INTRSTR_LEN];
char xnamebuf[32];
@@ -904,6 +913,15 @@
vmxq->vxq_intr_idx = intr_idx;
}
+ snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(sc->vmx_dev));
+ error = workqueue_create(&sc->vmx_queue_wq, xnamebuf,
+ vmxnet3_handle_queue_work, sc, VMXNET3_WORKQUEUE_PRI, IPL_NET,
+ WQ_PERCPU | WQ_MPSAFE);
+ if (error) {
+ aprint_error_dev(sc->vmx_dev, "workqueue_create failed\n");
+ return (-1);
+ }
+ sc->vmx_txrx_workqueue = false;
intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
@@ -1839,6 +1857,12 @@
NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
if (error)
goto out;
+ error = sysctl_createv(log, 0, &rnode, NULL,
+ CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
+ SYSCTL_DESCR("Use workqueue for packet processing"),
+ NULL, 0, &sc->vmx_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
+ if (error)
+ goto out;
error = sysctl_createv(log, 0, &rnode, &rxnode,
0, CTLTYPE_NODE, "rx",
@@ -2305,6 +2329,18 @@
return more;
}
+static inline void
+vmxnet3_sched_handle_queue(struct vmxnet3_softc *sc, struct vmxnet3_queue *vmxq)
+{
+
+ if (vmxq->vxq_workqueue) {
+ workqueue_enqueue(sc->vmx_queue_wq, &vmxq->vxq_wq_cookie,
+ curcpu());
+ } else {
+ softint_schedule(vmxq->vxq_si);
+ }
+}
+
int
vmxnet3_legacy_intr(void *xsc)
{
@@ -2339,7 +2375,7 @@
VMXNET3_TXQ_UNLOCK(txq);
if (txmore || rxmore) {
- softint_schedule(sc->vmx_queue[0].vxq_si);
+ vmxnet3_sched_handle_queue(sc, &sc->vmx_queue[0]);
} else {
if_schedule_deferred_start(&sc->vmx_ethercom.ec_if);
vmxnet3_enable_all_intrs(sc);
@@ -2363,6 +2399,7 @@
sc = txq->vxtxq_sc;
txlimit = sc->vmx_tx_intr_process_limit;
rxlimit = sc->vmx_rx_intr_process_limit;
+ vmxq->vxq_workqueue = sc->vmx_txrx_workqueue;
if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(sc, vmxq->vxq_intr_idx);
@@ -2376,7 +2413,7 @@
VMXNET3_RXQ_UNLOCK(rxq);
if (txmore || rxmore) {
- softint_schedule(vmxq->vxq_si);
+ vmxnet3_sched_handle_queue(sc, vmxq);
} else {
/* for ALTQ */
if (vmxq->vxq_id == 0)
@@ -2419,11 +2456,20 @@
VMXNET3_RXQ_UNLOCK(rxq);
if (txmore || rxmore)
- softint_schedule(vmxq->vxq_si);
+ vmxnet3_sched_handle_queue(sc, vmxq);
else
vmxnet3_enable_intr(sc, vmxq->vxq_intr_idx);
}
+void
+vmxnet3_handle_queue_work(struct work *wk, void *context)
+{
+ struct vmxnet3_queue *vmxq;
+
+ vmxq = container_of(wk, struct vmxnet3_queue, vxq_wq_cookie);
+ vmxnet3_handle_queue(vmxq);
+}
+
int
vmxnet3_event_intr(void *xsc)
{
Home |
Main Index |
Thread Index |
Old Index