Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-10]: src/sys/dev/pci Pull up following revision(s) (requested by ...
details: https://anonhg.NetBSD.org/src/rev/5453e004175e
branches: netbsd-10
changeset: 374067:5453e004175e
user: martin <martin%NetBSD.org@localhost>
date: Thu Mar 30 11:36:26 2023 +0000
description:
Pull up following revision(s) (requested by yamaguchi in ticket #128):
sys/dev/pci/if_vioif.c: revision 1.83-1.102,1.105,1.106
vioif(4): remove unnecessary lock release
if_percpuq_enqueue() can call with rxq->rxq_lock held because of per-cpu.
vioif(4): access to txq_active and rxq_active with lock held
vioif(4): use device reset to stop interrupt completely
vioif(4): rename {txq,rxq}_active to {txq,rxq}_running_handle
vioif(4): stop interrupt before schedule handler
vioif(4): adjust receive buffer to ETHER_ALIGN
vioif(4): added event counters related to receive processing
vioif(4): fix missing virtio_enqueue_abort for error handling
vioif(4): drain receive buffer on stopping the device
to remove branch in vioif_populate_rx_mbufs_locked()
vioif(4): divide interrupt handler for receiving
into dequeuing and preparing of buffers
vioif(4): merge drain into clear of queue
vioif(4): increase output error counter
vioif(4): added a structure to manage variables for packet processings
vioif(4): prepare slot before dequeuing
vioif(4): added __predct_false to error check
vioif(4): added new data structure for network queues
and moved the same parameters in vioif_txqueue and
vioif_rxqueue into the new structure
vioif(4): added functions to manipulate network queues
vioif(4): rename sc_hdr_segs to sc_segs
vioif(4): reorganize functions
This change is move of function and rename,
and this is no functional change.
vioif(4): divide IFF_OACTIVE into per-queue
vioif(4): clear flags when configure is failed
vioif(4): fix wrong memory allocation size
diffstat:
sys/dev/pci/if_vioif.c | 2992 +++++++++++++++++++++++++----------------------
1 files changed, 1584 insertions(+), 1408 deletions(-)
diffs (truncated from 3434 to 300 lines):
diff -r f0129a17ade8 -r 5453e004175e sys/dev/pci/if_vioif.c
--- a/sys/dev/pci/if_vioif.c Thu Mar 23 12:12:08 2023 +0000
+++ b/sys/dev/pci/if_vioif.c Thu Mar 30 11:36:26 2023 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vioif.c,v 1.82 2022/09/12 07:26:04 knakahara Exp $ */
+/* $NetBSD: if_vioif.c,v 1.82.4.1 2023/03/30 11:36:26 martin Exp $ */
/*
* Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.82 2022/09/12 07:26:04 knakahara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.82.4.1 2023/03/30 11:36:26 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_net_mpsafe.h"
@@ -51,6 +51,7 @@
#include <sys/module.h>
#include <sys/pcq.h>
#include <sys/workqueue.h>
+#include <sys/xcall.h>
#include <dev/pci/virtioreg.h>
#include <dev/pci/virtiovar.h>
@@ -204,12 +205,13 @@ struct virtio_net_ctrl_mq {
/*
* Locking notes:
- * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
- * a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
+ * + a field in vioif_netueue is protected by netq_lock (a spin mutex)
* - more than one lock cannot be held at onece
+ * + a field in vioif_tx_context and vioif_rx_context is also protected
+ * by netq_lock.
* + ctrlq_inuse is protected by ctrlq_wait_lock.
* - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
- * - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
+ * - netq_lock cannot be held along with ctrlq_wait_lock
* + fields in vioif_softc except queues are protected by
* sc->sc_lock(an adaptive mutex)
* - the lock is held before acquisition of other locks
@@ -228,53 +230,52 @@ struct vioif_work {
unsigned int added;
};
-struct vioif_txqueue {
- kmutex_t *txq_lock; /* lock for tx operations */
-
- struct virtqueue *txq_vq;
- bool txq_stopping;
- bool txq_link_active;
- pcq_t *txq_intrq;
-
- struct virtio_net_hdr *txq_hdrs;
- bus_dmamap_t *txq_hdr_dmamaps;
-
- struct mbuf **txq_mbufs;
- bus_dmamap_t *txq_dmamaps;
-
- void *txq_deferred_transmit;
- void *txq_handle_si;
- struct vioif_work txq_work;
- bool txq_workqueue;
- bool txq_active;
-
- char txq_evgroup[16];
- struct evcnt txq_defrag_failed;
- struct evcnt txq_mbuf_load_failed;
- struct evcnt txq_enqueue_reserve_failed;
+struct vioif_net_map {
+ struct virtio_net_hdr *vnm_hdr;
+ bus_dmamap_t vnm_hdr_map;
+ struct mbuf *vnm_mbuf;
+ bus_dmamap_t vnm_mbuf_map;
};
-struct vioif_rxqueue {
- kmutex_t *rxq_lock; /* lock for rx operations */
-
- struct virtqueue *rxq_vq;
- bool rxq_stopping;
-
- struct virtio_net_hdr *rxq_hdrs;
- bus_dmamap_t *rxq_hdr_dmamaps;
-
- struct mbuf **rxq_mbufs;
- bus_dmamap_t *rxq_dmamaps;
-
- void *rxq_handle_si;
- struct vioif_work rxq_work;
- bool rxq_workqueue;
- bool rxq_active;
-
- char rxq_evgroup[16];
- struct evcnt rxq_mbuf_add_failed;
+#define VIOIF_NETQ_RX 0
+#define VIOIF_NETQ_TX 1
+#define VIOIF_NETQ_IDX 2
+#define VIOIF_NETQ_DIR(n) ((n) % VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_PAIRIDX(n) ((n) / VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_RXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_RX)
+#define VIOIF_NETQ_TXQID(n) ((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_TX)
+
+struct vioif_netqueue {
+ kmutex_t netq_lock;
+ struct virtqueue *netq_vq;
+ bool netq_stopping;
+ bool netq_running_handle;
+ void *netq_maps_kva;
+ struct vioif_net_map *netq_maps;
+
+ void *netq_softint;
+ struct vioif_work netq_work;
+ bool netq_workqueue;
+
+ char netq_evgroup[32];
+ struct evcnt netq_mbuf_load_failed;
+ struct evcnt netq_enqueue_failed;
+
+ void *netq_ctx;
};
+struct vioif_tx_context {
+ bool txc_link_active;
+ bool txc_no_free_slots;
+ pcq_t *txc_intrq;
+ void *txc_deferred_transmit;
+
+ struct evcnt txc_defrag_failed;
+};
+
+struct vioif_rx_context {
+ struct evcnt rxc_mbuf_enobufs;
+};
struct vioif_ctrlqueue {
struct virtqueue *ctrlq_vq;
enum {
@@ -321,17 +322,16 @@ struct vioif_softc {
struct ethercom sc_ethercom;
int sc_link_state;
- struct vioif_txqueue *sc_txq;
- struct vioif_rxqueue *sc_rxq;
+ struct vioif_netqueue *sc_netqs;
bool sc_has_ctrl;
struct vioif_ctrlqueue sc_ctrlq;
- bus_dma_segment_t sc_hdr_segs[1];
+ bus_dma_segment_t sc_segs[1];
void *sc_dmamem;
void *sc_kmem;
- void *sc_ctl_softint;
+ void *sc_cfg_softint;
struct workqueue *sc_txrx_workqueue;
bool sc_txrx_workqueue_sysctl;
@@ -361,69 +361,87 @@ static int vioif_finalize_teardown(devic
static int vioif_init(struct ifnet *);
static void vioif_stop(struct ifnet *, int);
static void vioif_start(struct ifnet *);
-static void vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
static int vioif_transmit(struct ifnet *, struct mbuf *);
-static void vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
static int vioif_ioctl(struct ifnet *, u_long, void *);
static void vioif_watchdog(struct ifnet *);
+static int vioif_ifflags(struct vioif_softc *);
static int vioif_ifflags_cb(struct ethercom *);
+/* tx & rx */
+static int vioif_netqueue_init(struct vioif_softc *,
+ struct virtio_softc *, size_t, u_int);
+static void vioif_netqueue_teardown(struct vioif_softc *,
+ struct virtio_softc *, size_t);
+static void vioif_net_intr_enable(struct vioif_softc *,
+ struct virtio_softc *);
+static void vioif_net_intr_disable(struct vioif_softc *,
+ struct virtio_softc *);
+static void vioif_net_sched_handle(struct vioif_softc *,
+ struct vioif_netqueue *);
+
/* rx */
-static int vioif_add_rx_mbuf(struct vioif_rxqueue *, int);
-static void vioif_free_rx_mbuf(struct vioif_rxqueue *, int);
static void vioif_populate_rx_mbufs_locked(struct vioif_softc *,
- struct vioif_rxqueue *);
-static void vioif_rx_queue_clear(struct vioif_rxqueue *);
-static bool vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
- struct vioif_rxqueue *, u_int);
+ struct vioif_netqueue *);
static int vioif_rx_intr(void *);
static void vioif_rx_handle(void *);
-static void vioif_rx_sched_handle(struct vioif_softc *,
- struct vioif_rxqueue *);
-static void vioif_rx_drain(struct vioif_rxqueue *);
+static void vioif_rx_queue_clear(struct vioif_softc *,
+ struct virtio_softc *, struct vioif_netqueue *);
/* tx */
+static void vioif_start_locked(struct ifnet *, struct vioif_netqueue *);
+static void vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *);
+static void vioif_deferred_transmit(void *);
static int vioif_tx_intr(void *);
static void vioif_tx_handle(void *);
-static void vioif_tx_sched_handle(struct vioif_softc *,
- struct vioif_txqueue *);
-static void vioif_tx_queue_clear(struct vioif_txqueue *);
-static bool vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
- struct vioif_txqueue *, u_int);
-static void vioif_tx_drain(struct vioif_txqueue *);
-static void vioif_deferred_transmit(void *);
-
-/* workqueue */
-static struct workqueue*
- vioif_workq_create(const char *, pri_t, int, int);
-static void vioif_workq_destroy(struct workqueue *);
-static void vioif_workq_work(struct work *, void *);
-static void vioif_work_set(struct vioif_work *, void(*)(void *), void *);
-static void vioif_work_add(struct workqueue *, struct vioif_work *);
-static void vioif_work_wait(struct workqueue *, struct vioif_work *);
-
-/* other control */
-static int vioif_get_link_status(struct vioif_softc *);
-static void vioif_update_link_status(struct vioif_softc *);
+static void vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *,
+ struct vioif_netqueue *);
+
+/* controls */
+static int vioif_ctrl_intr(void *);
static int vioif_ctrl_rx(struct vioif_softc *, int, bool);
static int vioif_set_promisc(struct vioif_softc *, bool);
static int vioif_set_allmulti(struct vioif_softc *, bool);
static int vioif_set_rx_filter(struct vioif_softc *);
static int vioif_rx_filter(struct vioif_softc *);
static int vioif_set_mac_addr(struct vioif_softc *);
-static int vioif_ctrl_intr(void *);
+static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
+
+/* config interrupt */
static int vioif_config_change(struct virtio_softc *);
-static void vioif_ctl_softint(void *);
-static int vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
-static void vioif_enable_interrupt_vqpairs(struct vioif_softc *);
-static void vioif_disable_interrupt_vqpairs(struct vioif_softc *);
+static void vioif_cfg_softint(void *);
+static void vioif_update_link_status(struct vioif_softc *);
+
+/* others */
+static void vioif_alloc_queues(struct vioif_softc *);
+static void vioif_free_queues(struct vioif_softc *);
+static int vioif_alloc_mems(struct vioif_softc *);
+static struct workqueue*
+ vioif_workq_create(const char *, pri_t, int, int);
+static void vioif_workq_destroy(struct workqueue *);
+static void vioif_work_set(struct vioif_work *, void(*)(void *), void *);
+static void vioif_work_add(struct workqueue *, struct vioif_work *);
+static void vioif_work_wait(struct workqueue *, struct vioif_work *);
static int vioif_setup_sysctl(struct vioif_softc *);
static void vioif_setup_stats(struct vioif_softc *);
-static int vioif_ifflags(struct vioif_softc *);
CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
vioif_match, vioif_attach, NULL, NULL);
+static void
+vioif_intr_barrier(void)
+{
+
+ /* wait for finish all interrupt handler */
+ xc_barrier(0);
+}
+
+static void
+vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq)
+{
+
+ virtio_enqueue_commit(vsc, vq, -1, true);
+}
+
static int
vioif_match(device_t parent, cfdata_t match, void *aux)
{
@@ -435,394 +453,19 @@ vioif_match(device_t parent, cfdata_t ma
return 0;
}
-static int
-vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
- bus_size_t size, int nsegs, const char *usage)
-{
- int r;
-
- r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
- nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
-
- if (r != 0) {
- aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
Home |
Main Index |
Thread Index |
Old Index