Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci Divide some elements of vioif_softc into txq, rx...
details: https://anonhg.NetBSD.org/src/rev/f5c79eb5ef40
branches: trunk
changeset: 447508:f5c79eb5ef40
user: yamaguchi <yamaguchi%NetBSD.org@localhost>
date: Mon Jan 14 14:35:52 2019 +0000
description:
Divide some elements of vioif_softc into txq, rxq, and ctrlq
diffstat:
sys/dev/pci/if_vioif.c | 687 +++++++++++++++++++++++++++---------------------
1 files changed, 392 insertions(+), 295 deletions(-)
diffs (truncated from 1326 to 300 lines):
diff -r a0e832e9f093 -r f5c79eb5ef40 sys/dev/pci/if_vioif.c
--- a/sys/dev/pci/if_vioif.c Mon Jan 14 14:32:13 2019 +0000
+++ b/sys/dev/pci/if_vioif.c Mon Jan 14 14:35:52 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vioif.c,v 1.42 2019/01/14 14:32:13 yamaguchi Exp $ */
+/* $NetBSD: if_vioif.c,v 1.43 2019/01/14 14:35:52 yamaguchi Exp $ */
/*
* Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.42 2019/01/14 14:32:13 yamaguchi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.43 2019/01/14 14:35:52 yamaguchi Exp $");
#ifdef _KERNEL_OPT
#include "opt_net_mpsafe.h"
@@ -175,6 +175,67 @@
/*
* if_vioifvar.h:
*/
+
+/*
+ * Locking notes:
+ * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
+ * a filds in vioif_rxqueue is protected by rxq_lock (a spin mutex).
+ * - more than one lock cannot be held at onece
+ * + ctrlq_inuse is protected by ctrlq_wait_lock.
+ * - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
+ * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
+ */
+
+struct vioif_txqueue {
+ kmutex_t *txq_lock; /* lock for tx operations */
+
+ struct virtqueue *txq_vq;
+ bool txq_stopping;
+ bool txq_link_active;
+
+ struct virtio_net_hdr *txq_hdrs;
+ bus_dmamap_t *txq_hdr_dmamaps;
+
+ struct mbuf **txq_mbufs;
+ bus_dmamap_t *txq_dmamaps;
+};
+
+struct vioif_rxqueue {
+ kmutex_t *rxq_lock; /* lock for rx operations */
+
+ struct virtqueue *rxq_vq;
+ bool rxq_stopping;
+
+ struct virtio_net_hdr *rxq_hdrs;
+ bus_dmamap_t *rxq_hdr_dmamaps;
+
+ struct mbuf **rxq_mbufs;
+ bus_dmamap_t *rxq_dmamaps;
+
+ void *rxq_softint;
+};
+
+struct vioif_ctrlqueue {
+ struct virtqueue *ctrlq_vq;
+ enum {
+ FREE, INUSE, DONE
+ } ctrlq_inuse;
+ kcondvar_t ctrlq_wait;
+ kmutex_t ctrlq_wait_lock;
+
+ struct virtio_net_ctrl_cmd *ctrlq_cmd;
+ struct virtio_net_ctrl_status *ctrlq_status;
+ struct virtio_net_ctrl_rx *ctrlq_rx;
+ struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_uc;
+ struct virtio_net_ctrl_mac_tbl *ctrlq_mac_tbl_mc;
+
+ bus_dmamap_t ctrlq_cmd_dmamap;
+ bus_dmamap_t ctrlq_status_dmamap;
+ bus_dmamap_t ctrlq_rx_dmamap;
+ bus_dmamap_t ctrlq_tbl_uc_dmamap;
+ bus_dmamap_t ctrlq_tbl_mc_dmamap;
+};
+
struct vioif_softc {
device_t sc_dev;
@@ -189,55 +250,28 @@
short sc_deferred_init_done;
bool sc_link_active;
- /* bus_dmamem */
- bus_dma_segment_t sc_hdr_segs[1];
- struct virtio_net_hdr *sc_hdrs;
-#define sc_rx_hdrs sc_hdrs
- struct virtio_net_hdr *sc_tx_hdrs;
- struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
- struct virtio_net_ctrl_status *sc_ctrl_status;
- struct virtio_net_ctrl_rx *sc_ctrl_rx;
- struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
- struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;
-
- /* kmem */
- bus_dmamap_t *sc_arrays;
-#define sc_rxhdr_dmamaps sc_arrays
- bus_dmamap_t *sc_txhdr_dmamaps;
- bus_dmamap_t *sc_rx_dmamaps;
- bus_dmamap_t *sc_tx_dmamaps;
- struct mbuf **sc_rx_mbufs;
- struct mbuf **sc_tx_mbufs;
-
- bus_dmamap_t sc_ctrl_cmd_dmamap;
- bus_dmamap_t sc_ctrl_status_dmamap;
- bus_dmamap_t sc_ctrl_rx_dmamap;
- bus_dmamap_t sc_ctrl_tbl_uc_dmamap;
- bus_dmamap_t sc_ctrl_tbl_mc_dmamap;
-
- void *sc_rx_softint;
- void *sc_ctl_softint;
-
- enum {
- FREE, INUSE, DONE
- } sc_ctrl_inuse;
- kcondvar_t sc_ctrl_wait;
- kmutex_t sc_ctrl_wait_lock;
- kmutex_t sc_tx_lock;
- kmutex_t sc_rx_lock;
- bool sc_stopping;
+ struct vioif_txqueue sc_txq;
+ struct vioif_rxqueue sc_rxq;
bool sc_has_ctrl;
+ struct vioif_ctrlqueue sc_ctrlq;
+
+ bus_dma_segment_t sc_hdr_segs[1];
+ void *sc_dmamem;
+ void *sc_kmem;
+
+ void *sc_ctl_softint;
};
#define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
#define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
-#define VIOIF_TX_LOCK(_sc) mutex_enter(&(_sc)->sc_tx_lock)
-#define VIOIF_TX_UNLOCK(_sc) mutex_exit(&(_sc)->sc_tx_lock)
-#define VIOIF_TX_LOCKED(_sc) mutex_owned(&(_sc)->sc_tx_lock)
-#define VIOIF_RX_LOCK(_sc) mutex_enter(&(_sc)->sc_rx_lock)
-#define VIOIF_RX_UNLOCK(_sc) mutex_exit(&(_sc)->sc_rx_lock)
-#define VIOIF_RX_LOCKED(_sc) mutex_owned(&(_sc)->sc_rx_lock)
+#define VIOIF_TXQ_LOCK(_q) mutex_enter((_q)->txq_lock)
+#define VIOIF_TXQ_UNLOCK(_q) mutex_exit((_q)->txq_lock)
+#define VIOIF_TXQ_LOCKED(_q) mutex_owned((_q)->txq_lock)
+
+#define VIOIF_RXQ_LOCK(_q) mutex_enter((_q)->rxq_lock)
+#define VIOIF_RXQ_UNLOCK(_q) mutex_exit((_q)->rxq_lock)
+#define VIOIF_RXQ_LOCKED(_q) mutex_owned((_q)->rxq_lock)
/* cfattach interface functions */
static int vioif_match(device_t, cfdata_t, void *);
@@ -296,39 +330,42 @@
/* allocate memory */
/*
* dma memory is used for:
- * sc_rx_hdrs[slot]: metadata array for received frames (READ)
- * sc_tx_hdrs[slot]: metadata array for frames to be sent (WRITE)
- * sc_ctrl_cmd: command to be sent via ctrl vq (WRITE)
- * sc_ctrl_status: return value for a command via ctrl vq (READ)
- * sc_ctrl_rx: parameter for a VIRTIO_NET_CTRL_RX class command
+ * rxq_hdrs[slot]: metadata array for received frames (READ)
+ * txq_hdrs[slot]: metadata array for frames to be sent (WRITE)
+ * ctrlq_cmd: command to be sent via ctrl vq (WRITE)
+ * ctrlq_status: return value for a command via ctrl vq (READ)
+ * ctrlq_rx: parameter for a VIRTIO_NET_CTRL_RX class command
* (WRITE)
- * sc_ctrl_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
+ * ctrlq_mac_tbl_uc: unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
* class command (WRITE)
- * sc_ctrl_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
+ * ctrlq_mac_tbl_mc: multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
* class command (WRITE)
- * sc_ctrl_* structures are allocated only one each; they are protected by
- * sc_ctrl_inuse variable and sc_ctrl_wait condvar.
+ * ctrlq_* structures are allocated only one each; they are protected by
+ * ctrlq_inuse variable and ctrlq_wait condvar.
*/
/*
* dynamically allocated memory is used for:
- * sc_rxhdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
- * sc_txhdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
- * sc_rx_dmamaps[slot]: bus_dmamap_t array for received payload
- * sc_tx_dmamaps[slot]: bus_dmamap_t array for sent payload
- * sc_rx_mbufs[slot]: mbuf pointer array for received frames
- * sc_tx_mbufs[slot]: mbuf pointer array for sent frames
+ * rxq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_rx_hdrs[slot]
+ * txq_hdr_dmamaps[slot]: bus_dmamap_t array for sc_tx_hdrs[slot]
+ * rxq_dmamaps[slot]: bus_dmamap_t array for received payload
+ * txq_dmamaps[slot]: bus_dmamap_t array for sent payload
+ * rxq_mbufs[slot]: mbuf pointer array for received frames
+ * txq_mbufs[slot]: mbuf pointer array for sent frames
*/
static int
vioif_alloc_mems(struct vioif_softc *sc)
{
struct virtio_softc *vsc = sc->sc_virtio;
+ struct vioif_txqueue *txq = &sc->sc_txq;
+ struct vioif_rxqueue *rxq = &sc->sc_rxq;
+ struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
int allocsize, allocsize2, r, rsegs, i;
void *vaddr;
intptr_t p;
int rxqsize, txqsize;
- rxqsize = sc->sc_vq[VQ_RX].vq_num;
- txqsize = sc->sc_vq[VQ_TX].vq_num;
+ rxqsize = rxq->rxq_vq->vq_num;
+ txqsize = txq->txq_vq->vq_num;
allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
allocsize += sizeof(struct virtio_net_hdr) * txqsize;
@@ -361,17 +398,18 @@
#define P(p, p0, p0size) do { p0 = (void *) p; \
p += p0size; } while (0)
memset(vaddr, 0, allocsize);
+ sc->sc_dmamem = vaddr;
p = (intptr_t) vaddr;
- P(p, sc->sc_rx_hdrs, sizeof(sc->sc_rx_hdrs[0]) * rxqsize);
- P(p, sc->sc_tx_hdrs, sizeof(sc->sc_tx_hdrs[0]) * txqsize);
+ P(p, rxq->rxq_hdrs, sizeof(rxq->rxq_hdrs[0]) * rxqsize);
+ P(p, txq->txq_hdrs, sizeof(txq->txq_hdrs[0]) * txqsize);
if (sc->sc_has_ctrl) {
- P(p, sc->sc_ctrl_cmd, sizeof(*sc->sc_ctrl_cmd));
- P(p, sc->sc_ctrl_status, sizeof(*sc->sc_ctrl_status));
- P(p, sc->sc_ctrl_rx, sizeof(*sc->sc_ctrl_rx));
- P(p, sc->sc_ctrl_mac_tbl_uc, sizeof(*sc->sc_ctrl_mac_tbl_uc) + 0);
- P(p, sc->sc_ctrl_mac_tbl_mc,
- (sizeof(*sc->sc_ctrl_mac_tbl_mc)
+ P(p, ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd));
+ P(p, ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status));
+ P(p, ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx));
+ P(p, ctrlq->ctrlq_mac_tbl_uc, sizeof(*ctrlq->ctrlq_mac_tbl_uc) + 0);
+ P(p, ctrlq->ctrlq_mac_tbl_mc,
+ (sizeof(*ctrlq->ctrlq_mac_tbl_mc)
+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES));
}
@@ -379,14 +417,15 @@
allocsize2 += sizeof(bus_dmamap_t) * (rxqsize + txqsize);
allocsize2 += sizeof(struct mbuf*) * (rxqsize + txqsize);
vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
+ sc->sc_kmem = vaddr;
p = (intptr_t) vaddr;
- P(p, sc->sc_rxhdr_dmamaps, sizeof(sc->sc_rxhdr_dmamaps[0]) * rxqsize);
- P(p, sc->sc_txhdr_dmamaps, sizeof(sc->sc_txhdr_dmamaps[0]) * txqsize);
- P(p, sc->sc_rx_dmamaps, sizeof(sc->sc_rx_dmamaps[0]) * rxqsize);
- P(p, sc->sc_tx_dmamaps, sizeof(sc->sc_tx_dmamaps[0]) * txqsize);
- P(p, sc->sc_rx_mbufs, sizeof(sc->sc_rx_mbufs[0]) * rxqsize);
- P(p, sc->sc_tx_mbufs, sizeof(sc->sc_tx_mbufs[0]) * txqsize);
+ P(p, rxq->rxq_hdr_dmamaps, sizeof(rxq->rxq_hdr_dmamaps[0]) * rxqsize);
+ P(p, txq->txq_hdr_dmamaps, sizeof(txq->txq_hdr_dmamaps[0]) * txqsize);
+ P(p, rxq->rxq_dmamaps, sizeof(rxq->rxq_dmamaps[0]) * rxqsize);
+ P(p, txq->txq_dmamaps, sizeof(txq->txq_dmamaps[0]) * txqsize);
+ P(p, rxq->rxq_mbufs, sizeof(rxq->rxq_mbufs[0]) * rxqsize);
+ P(p, txq->txq_mbufs, sizeof(txq->txq_mbufs[0]) * txqsize);
#undef P
#define C(map, size, nsegs, usage) \
@@ -415,36 +454,40 @@
} \
} while (0)
for (i = 0; i < rxqsize; i++) {
- C_L(sc->sc_rxhdr_dmamaps[i], &sc->sc_rx_hdrs[i], sizeof(sc->sc_rx_hdrs[0]), 1,
+ C_L(rxq->rxq_hdr_dmamaps[i], &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
BUS_DMA_READ, "rx header");
- C(sc->sc_rx_dmamaps[i], MCLBYTES, 1, "rx payload");
+ C(rxq->rxq_dmamaps[i], MCLBYTES, 1, "rx payload");
}
for (i = 0; i < txqsize; i++) {
- C_L(sc->sc_txhdr_dmamaps[i], &sc->sc_tx_hdrs[i], sizeof(sc->sc_tx_hdrs[0]), 1,
+ C_L(txq->txq_hdr_dmamaps[i], &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
BUS_DMA_READ, "tx header");
- C(sc->sc_tx_dmamaps[i], ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, "tx payload");
+ C(txq->txq_dmamaps[i], ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, "tx payload");
}
if (sc->sc_has_ctrl) {
/* control vq class & command */
- C_L(sc->sc_ctrl_cmd_dmamap, sc->sc_ctrl_cmd, sizeof(*sc->sc_ctrl_cmd), 1,
+ C_L(ctrlq->ctrlq_cmd_dmamap,
+ ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
BUS_DMA_WRITE, "control command");
- C_L(sc->sc_ctrl_status_dmamap, sc->sc_ctrl_status, sizeof(*sc->sc_ctrl_status), 1,
+ C_L(ctrlq->ctrlq_status_dmamap,
+ ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
BUS_DMA_READ, "control status");
/* control vq rx mode command parameter */
- C_L(sc->sc_ctrl_rx_dmamap, sc->sc_ctrl_rx, sizeof(*sc->sc_ctrl_rx), 1,
+ C_L(ctrlq->ctrlq_rx_dmamap,
Home |
Main Index |
Thread Index |
Old Index