Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci Make if_vioif MPSAFE
details: https://anonhg.NetBSD.org/src/rev/084f8d574fc4
branches: trunk
changeset: 330795:084f8d574fc4
user: ozaki-r <ozaki-r%NetBSD.org@localhost>
date: Tue Jul 22 02:21:50 2014 +0000
description:
Make if_vioif MPSAFE
- Introduce VIOIF_MPSAFE
- It's enabled only when NET_MPSAFE is defined in if.h or the kernel config
- Add tx and rx mutex locks
- Locking them is performance sensitive, so it's not used when !VIOIF_MPSAFE
- Set SOFTINT_MPSAFE to vioif_rx_softint only when VIOIF_MPSAFE
diffstat:
sys/dev/pci/if_vioif.c | 144 +++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 133 insertions(+), 11 deletions(-)
diffs (truncated from 345 to 300 lines):
diff -r 38a92c721e3f -r 084f8d574fc4 sys/dev/pci/if_vioif.c
--- a/sys/dev/pci/if_vioif.c Tue Jul 22 01:55:54 2014 +0000
+++ b/sys/dev/pci/if_vioif.c Tue Jul 22 02:21:50 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_vioif.c,v 1.6 2014/07/22 01:55:54 ozaki-r Exp $ */
+/* $NetBSD: if_vioif.c,v 1.7 2014/07/22 02:21:50 ozaki-r Exp $ */
/*
* Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.6 2014/07/22 01:55:54 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.7 2014/07/22 02:21:50 ozaki-r Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -53,6 +53,10 @@
#include <net/bpf.h>
+#ifdef NET_MPSAFE
+#define VIOIF_MPSAFE 1
+#endif
+
/*
* if_vioifreg.h:
*/
@@ -186,10 +190,20 @@
} sc_ctrl_inuse;
kcondvar_t sc_ctrl_wait;
kmutex_t sc_ctrl_wait_lock;
+ kmutex_t *sc_tx_lock;
+ kmutex_t *sc_rx_lock;
+ bool sc_stopping;
};
#define VIRTIO_NET_TX_MAXNSEGS (16) /* XXX */
#define VIRTIO_NET_CTRL_MAC_MAXENTRIES (64) /* XXX */
+#define VIOIF_TX_LOCK(_sc) if ((_sc)->sc_tx_lock) mutex_enter((_sc)->sc_tx_lock)
+#define VIOIF_TX_UNLOCK(_sc) if ((_sc)->sc_tx_lock) mutex_exit((_sc)->sc_tx_lock)
+#define VIOIF_TX_LOCKED(_sc) (!(_sc)->sc_tx_lock || mutex_owned((_sc)->sc_tx_lock))
+#define VIOIF_RX_LOCK(_sc) if ((_sc)->sc_rx_lock) mutex_enter((_sc)->sc_rx_lock)
+#define VIOIF_RX_UNLOCK(_sc) if ((_sc)->sc_rx_lock) mutex_exit((_sc)->sc_rx_lock)
+#define VIOIF_RX_LOCKED(_sc) (!(_sc)->sc_rx_lock || mutex_owned((_sc)->sc_rx_lock))
+
/* cfattach interface functions */
static int vioif_match(device_t, cfdata_t, void *);
static void vioif_attach(device_t, device_t, void *);
@@ -207,12 +221,14 @@
static void vioif_free_rx_mbuf(struct vioif_softc *, int);
static void vioif_populate_rx_mbufs(struct vioif_softc *);
static int vioif_rx_deq(struct vioif_softc *);
+static int vioif_rx_deq_locked(struct vioif_softc *);
static int vioif_rx_vq_done(struct virtqueue *);
static void vioif_rx_softint(void *);
static void vioif_rx_drain(struct vioif_softc *);
/* tx */
static int vioif_tx_vq_done(struct virtqueue *);
+static int vioif_tx_vq_done_locked(struct virtqueue *);
static void vioif_tx_drain(struct vioif_softc *);
/* other control */
@@ -460,6 +476,7 @@
struct virtio_softc *vsc = device_private(parent);
uint32_t features;
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+ u_int flags;
if (vsc->sc_child != NULL) {
aprint_normal(": child already attached for %s; "
@@ -478,6 +495,10 @@
vsc->sc_intrhand = virtio_vq_intr;
vsc->sc_flags = 0;
+#ifdef VIOIF_MPSAFE
+ vsc->sc_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
+#endif
+
features = virtio_negotiate_features(vsc,
(VIRTIO_NET_F_MAC |
VIRTIO_NET_F_STATUS |
@@ -540,6 +561,16 @@
"tx") != 0) {
goto err;
}
+
+#ifdef VIOIF_MPSAFE
+ sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
+ sc->sc_rx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
+#else
+ sc->sc_tx_lock = NULL;
+ sc->sc_rx_lock = NULL;
+#endif
+ sc->sc_stopping = false;
+
vsc->sc_nvqs = 2;
sc->sc_vq[1].vq_done = vioif_tx_vq_done;
virtio_start_vq_intr(vsc, &sc->sc_vq[0]);
@@ -558,8 +589,12 @@
}
}
- sc->sc_rx_softint = softint_establish(SOFTINT_NET,
- vioif_rx_softint, sc);
+#ifdef VIOIF_MPSAFE
+ flags = SOFTINT_NET | SOFTINT_MPSAFE;
+#else
+ flags = SOFTINT_NET;
+#endif
+ sc->sc_rx_softint = softint_establish(flags, vioif_rx_softint, sc);
if (sc->sc_rx_softint == NULL) {
aprint_error_dev(self, "cannot establish softint\n");
goto err;
@@ -586,6 +621,11 @@
return;
err:
+ if (sc->sc_tx_lock)
+ mutex_obj_free(sc->sc_tx_lock);
+ if (sc->sc_rx_lock)
+ mutex_obj_free(sc->sc_rx_lock);
+
if (vsc->sc_nvqs == 3) {
virtio_free_vq(vsc, &sc->sc_vq[2]);
cv_destroy(&sc->sc_ctrl_wait);
@@ -629,7 +669,12 @@
struct vioif_softc *sc = ifp->if_softc;
vioif_stop(ifp, 0);
+
+ /* Have to set false before vioif_populate_rx_mbufs */
+ sc->sc_stopping = false;
+
vioif_populate_rx_mbufs(sc);
+
vioif_updown(sc, true);
ifp->if_flags |= IFF_RUNNING;
ifp->if_flags &= ~IFF_OACTIVE;
@@ -644,6 +689,8 @@
struct vioif_softc *sc = ifp->if_softc;
struct virtio_softc *vsc = sc->sc_virtio;
+ sc->sc_stopping = true;
+
/* only way to stop I/O and DMA is resetting... */
virtio_reset(vsc);
vioif_rx_deq(sc);
@@ -672,20 +719,26 @@
struct mbuf *m;
int queued = 0, retry = 0;
+ VIOIF_TX_LOCK(sc);
+
if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
- return;
+ goto out;
+
+ if (sc->sc_stopping)
+ goto out;
for (;;) {
int slot, r;
- IFQ_POLL(&ifp->if_snd, m);
+ IFQ_DEQUEUE(&ifp->if_snd, m);
+
if (m == NULL)
break;
r = virtio_enqueue_prep(vsc, vq, &slot);
if (r == EAGAIN) {
ifp->if_flags |= IFF_OACTIVE;
- vioif_tx_vq_done(vq);
+ vioif_tx_vq_done_locked(vq);
if (retry++ == 0)
continue;
else
@@ -708,13 +761,13 @@
bus_dmamap_unload(vsc->sc_dmat,
sc->sc_tx_dmamaps[slot]);
ifp->if_flags |= IFF_OACTIVE;
- vioif_tx_vq_done(vq);
+ vioif_tx_vq_done_locked(vq);
if (retry++ == 0)
continue;
else
break;
}
- IFQ_DEQUEUE(&ifp->if_snd, m);
+
sc->sc_tx_mbufs[slot] = m;
memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
@@ -731,10 +784,18 @@
bpf_mtap(ifp, m);
}
+ if (m != NULL) {
+ ifp->if_flags |= IFF_OACTIVE;
+ m_freem(m);
+ }
+
if (queued > 0) {
virtio_enqueue_commit(vsc, vq, -1, true);
ifp->if_timer = 5;
}
+
+out:
+ VIOIF_TX_UNLOCK(sc);
}
static int
@@ -817,6 +878,11 @@
int i, r, ndone = 0;
struct virtqueue *vq = &sc->sc_vq[0]; /* rx vq */
+ VIOIF_RX_LOCK(sc);
+
+ if (sc->sc_stopping)
+ goto out;
+
for (i = 0; i < vq->vq_num; i++) {
int slot;
r = virtio_enqueue_prep(vsc, vq, &slot);
@@ -850,12 +916,30 @@
}
if (ndone > 0)
virtio_enqueue_commit(vsc, vq, -1, true);
+
+out:
+ VIOIF_RX_UNLOCK(sc);
}
/* dequeue recieved packets */
static int
vioif_rx_deq(struct vioif_softc *sc)
{
+ int r;
+
+ KASSERT(sc->sc_stopping);
+
+ VIOIF_RX_LOCK(sc);
+ r = vioif_rx_deq_locked(sc);
+ VIOIF_RX_UNLOCK(sc);
+
+ return r;
+}
+
+/* dequeue recieved packets */
+static int
+vioif_rx_deq_locked(struct vioif_softc *sc)
+{
struct virtio_softc *vsc = sc->sc_virtio;
struct virtqueue *vq = &sc->sc_vq[0];
struct ifnet *ifp = &sc->sc_ethercom.ec_if;
@@ -863,6 +947,8 @@
int r = 0;
int slot, len;
+ KASSERT(VIOIF_RX_LOCKED(sc));
+
while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
len -= sizeof(struct virtio_net_hdr);
r = 1;
@@ -881,7 +967,13 @@
m->m_len = m->m_pkthdr.len = len;
ifp->if_ipackets++;
bpf_mtap(ifp, m);
+
+ VIOIF_RX_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
+ VIOIF_RX_LOCK(sc);
+
+ if (sc->sc_stopping)
+ break;
}
return r;
@@ -893,12 +985,19 @@
{
struct virtio_softc *vsc = vq->vq_owner;
struct vioif_softc *sc = device_private(vsc->sc_child);
- int r;
+ int r = 0;
+
+ VIOIF_RX_LOCK(sc);
- r = vioif_rx_deq(sc);
+ if (sc->sc_stopping)
+ goto out;
+
+ r = vioif_rx_deq_locked(sc);
if (r)
softint_schedule(sc->sc_rx_softint);
+out:
+ VIOIF_RX_UNLOCK(sc);
Home |
Main Index |
Thread Index |
Old Index