Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci vioif(4): added new data structure for network q...
details: https://anonhg.NetBSD.org/src/rev/0e78da987f00
branches: trunk
changeset: 373967:0e78da987f00
user: yamaguchi <yamaguchi%NetBSD.org@localhost>
date: Thu Mar 23 02:42:49 2023 +0000
description:
vioif(4): added new data structure for network queues
and moved the same parameters in vioif_txqueue and
vioif_rxqueue into the new structure
diffstat:
sys/dev/pci/if_vioif.c | 1140 ++++++++++++++++++++++++-----------------------
1 files changed, 585 insertions(+), 555 deletions(-)
diffs (truncated from 1816 to 300 lines):
diff -r ed7206b501db -r 0e78da987f00 sys/dev/pci/if_vioif.c
--- a/sys/dev/pci/if_vioif.c Thu Mar 23 02:33:34 2023 +0000
+++ b/sys/dev/pci/if_vioif.c Thu Mar 23 02:42:49 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vioif.c,v 1.97 2023/03/23 02:33:34 yamaguchi Exp $ */
+/* $NetBSD: if_vioif.c,v 1.98 2023/03/23 02:42:49 yamaguchi Exp $ */
/*
* Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.97 2023/03/23 02:33:34 yamaguchi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.98 2023/03/23 02:42:49 yamaguchi Exp $");
#ifdef _KERNEL_OPT
#include "opt_net_mpsafe.h"
@@ -205,12 +205,13 @@
/*
* Locking notes:
- * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
- * a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
+ * + a field in vioif_netueue is protected by netq_lock (a spin mutex)
* - more than one lock cannot be held at onece
+ * + a field in vioif_tx_context and vioif_rx_context is also protected
+ * by netq_lock.
* + ctrlq_inuse is protected by ctrlq_wait_lock.
* - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
- * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
+ * - netq_lock cannot be held along with ctrlq_wait_lock
* + fields in vioif_softc except queues are protected by
* sc->sc_lock(an adaptive mutex)
* - the lock is held before acquisition of other locks
@@ -236,49 +237,44 @@
bus_dmamap_t vnm_mbuf_map;
};
-struct vioif_txqueue {
- kmutex_t *txq_lock; /* lock for tx operations */
-
- struct virtqueue *txq_vq;
- bool txq_stopping;
- bool txq_link_active;
- pcq_t *txq_intrq;
-
- void *txq_maps_kva;
- struct vioif_net_map *txq_maps;
-
- void *txq_deferred_transmit;
- void *txq_handle_si;
- struct vioif_work txq_work;
- bool txq_workqueue;
- bool txq_running_handle;
-
- char txq_evgroup[16];
- struct evcnt txq_defrag_failed;
- struct evcnt txq_mbuf_load_failed;
- struct evcnt txq_enqueue_reserve_failed;
+#define VIOIF_NETQ_RX 0
+#define VIOIF_NETQ_TX 1
+#define VIOIF_NETQ_IDX 2
+#define VIOIF_NETQ_DIR(n) ((n) % VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_PAIRIDX(n) ((n) / VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_RXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_RX)
+#define VIOIF_NETQ_TXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_TX)
+
+struct vioif_netqueue {
+ kmutex_t netq_lock;
+ struct virtqueue *netq_vq;
+ bool netq_stopping;
+ bool netq_running_handle;
+ void *netq_maps_kva;
+ struct vioif_net_map *netq_maps;
+
+ void *netq_softint;
+ struct vioif_work netq_work;
+ bool netq_workqueue;
+
+ char netq_evgroup[32];
+ struct evcnt netq_mbuf_load_failed;
+ struct evcnt netq_enqueue_reserve_failed;
+
+ void *netq_ctx;
};
-struct vioif_rxqueue {
- kmutex_t *rxq_lock; /* lock for rx operations */
-
- struct virtqueue *rxq_vq;
- bool rxq_stopping;
-
- void *rxq_maps_kva;
- struct vioif_net_map *rxq_maps;
-
- void *rxq_handle_si;
- struct vioif_work rxq_work;
- bool rxq_workqueue;
- bool rxq_running_handle;
-
- char rxq_evgroup[16];
- struct evcnt rxq_mbuf_enobufs;
- struct evcnt rxq_mbuf_load_failed;
- struct evcnt rxq_enqueue_reserve_failed;
+struct vioif_tx_context {
+ bool txc_link_active;
+ pcq_t *txc_intrq;
+ void *txc_deferred_transmit;
+
+ struct evcnt txc_defrag_failed;
};
+struct vioif_rx_context {
+ struct evcnt rxc_mbuf_enobufs;
+};
struct vioif_ctrlqueue {
struct virtqueue *ctrlq_vq;
enum {
@@ -325,8 +321,7 @@
struct ethercom sc_ethercom;
int sc_link_state;
- struct vioif_txqueue *sc_txq;
- struct vioif_rxqueue *sc_rxq;
+ struct vioif_netqueue *sc_netqs;
bool sc_has_ctrl;
struct vioif_ctrlqueue sc_ctrlq;
@@ -365,34 +360,34 @@
static int vioif_init(struct ifnet *);
static void vioif_stop(struct ifnet *, int);
static void vioif_start(struct ifnet *);
-static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
+static void vioif_start_locked(struct ifnet *, struct vioif_netqueue *);
static int vioif_transmit(struct ifnet *, struct mbuf *);
-static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
+static void vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *);
static int vioif_ioctl(struct ifnet *, u_long, void *);
static void vioif_watchdog(struct ifnet *);
static int vioif_ifflags_cb(struct ethercom *);
+/* tx & rx */
+static void vioif_net_sched_handle(struct vioif_softc *,
+ struct vioif_netqueue *);
+
/* rx */
static void vioif_populate_rx_mbufs_locked(struct vioif_softc *,
- struct vioif_rxqueue *);
+ struct vioif_netqueue *);
static void vioif_rx_queue_clear(struct vioif_softc *, struct virtio_softc *,
- struct vioif_rxqueue *);
+ struct vioif_netqueue *);
static bool vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
- struct vioif_rxqueue *, u_int, size_t *);
+ struct vioif_netqueue *, u_int, size_t *);
static int vioif_rx_intr(void *);
static void vioif_rx_handle(void *);
-static void vioif_rx_sched_handle(struct vioif_softc *,
- struct vioif_rxqueue *);
/* tx */
static int vioif_tx_intr(void *);
static void vioif_tx_handle(void *);
-static void vioif_tx_sched_handle(struct vioif_softc *,
- struct vioif_txqueue *);
static void vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *,
- struct vioif_txqueue *);
+ struct vioif_netqueue *);
static bool vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
- struct vioif_txqueue *, u_int);
+ struct vioif_netqueue *, u_int);
static void vioif_deferred_transmit(void *);
/* workqueue */
@@ -501,60 +496,199 @@
vioif_alloc_queues(struct vioif_softc *sc)
{
int nvq_pairs = sc->sc_max_nvq_pairs;
- int nvqs = nvq_pairs * 2;
- int i;
+ size_t nvqs, netq_num;
KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
- sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs,
- KM_SLEEP);
- sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs,
- KM_SLEEP);
-
+ nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
if (sc->sc_has_ctrl)
nvqs++;
sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
- nvqs = 0;
- for (i = 0; i < nvq_pairs; i++) {
- sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++];
- sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++];
- }
-
- if (sc->sc_has_ctrl)
- sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++];
+ sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * netq_num,
+ KM_SLEEP);
}
static void
vioif_free_queues(struct vioif_softc *sc)
{
- int nvq_pairs = sc->sc_max_nvq_pairs;
- int nvqs = nvq_pairs * 2;
-
+ size_t nvqs, netq_num;
+
+ nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
if (sc->sc_ctrlq.ctrlq_vq)
nvqs++;
- if (sc->sc_txq) {
- kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs);
- sc->sc_txq = NULL;
+ kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num);
+ kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
+ sc->sc_netqs = NULL;
+ sc->sc_vqs = NULL;
+}
+
+static int
+vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc,
+ size_t qid, u_int softint_flags)
+{
+ static const struct {
+ const char *dirname;
+ int segsize;
+ int nsegs;
+ int (*intrhand)(void *);
+ void (*sihand)(void *);
+ } params[VIOIF_NETQ_IDX] = {
+ [VIOIF_NETQ_RX] = {
+ .dirname = "rx",
+ .segsize = MCLBYTES,
+ .nsegs = 2,
+ .intrhand = vioif_rx_intr,
+ .sihand = vioif_rx_handle,
+ },
+ [VIOIF_NETQ_TX] = {
+ .dirname = "tx",
+ .segsize = ETHER_MAX_LEN - ETHER_HDR_LEN,
+ .nsegs = 2,
+ .intrhand = vioif_tx_intr,
+ .sihand = vioif_tx_handle,
+ }
+ };
+
+ struct virtqueue *vq;
+ struct vioif_netqueue *netq;
+ struct vioif_tx_context *txc;
+ struct vioif_rx_context *rxc;
+ char qname[32];
+ int r, dir;
+
+ txc = NULL;
+ rxc = NULL;
+ netq = &sc->sc_netqs[qid];
+ vq = &sc->sc_vqs[qid];
+ dir = VIOIF_NETQ_DIR(qid);
+
+ netq->netq_vq = &sc->sc_vqs[qid];
+ netq->netq_stopping = false;
+ netq->netq_running_handle = false;
+
+ snprintf(qname, sizeof(qname), "%s%zu",
+ params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid));
+ snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
+ "%s-%s", device_xname(sc->sc_dev), qname);
+
+ mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
+ r = virtio_alloc_vq(vsc, vq, qid,
+ params[dir].segsize + sc->sc_hdr_size,
+ params[dir].nsegs, qname);
+ if (r != 0)
+ goto err;
+ netq->netq_vq = vq;
+
+ netq->netq_vq->vq_intrhand = params[dir].intrhand;
+ netq->netq_vq->vq_intrhand_arg = netq;
+ netq->netq_softint = softint_establish(softint_flags,
+ params[dir].sihand, netq);
+ if (netq->netq_softint == NULL) {
+ aprint_error_dev(sc->sc_dev,
+ "couldn't establish %s softint\n",
+ params[dir].dirname);
+ goto err;
}
-
- if (sc->sc_rxq) {
- kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs);
- sc->sc_rxq = NULL;
+ vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
Home |
Main Index |
Thread Index |
Old Index