Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/broadcom Make sure to 0 sc_work_flags in ifstop.
details: https://anonhg.NetBSD.org/src/rev/17a313e78a49
branches: trunk
changeset: 782586:17a313e78a49
user: matt <matt%NetBSD.org@localhost>
date: Thu Nov 08 00:59:12 2012 +0000
description:
Make sure to 0 sc_work_flags in ifstop.
return immediately in ifstart if IFF_RUNNING is not set.
Make RCVMAGIC checks conditional
Release softc lock when calling if_input.
diffstat:
sys/arch/arm/broadcom/bcm53xx_eth.c | 191 ++++++++++++++++++++++++++++++++---
1 files changed, 171 insertions(+), 20 deletions(-)
diffs (truncated from 329 to 300 lines):
diff -r 7d40ec171f80 -r 17a313e78a49 sys/arch/arm/broadcom/bcm53xx_eth.c
--- a/sys/arch/arm/broadcom/bcm53xx_eth.c Thu Nov 08 00:34:37 2012 +0000
+++ b/sys/arch/arm/broadcom/bcm53xx_eth.c Thu Nov 08 00:59:12 2012 +0000
@@ -34,7 +34,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.15 2012/11/01 21:33:12 matt Exp $");
+__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.16 2012/11/08 00:59:12 matt Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@@ -61,7 +61,9 @@
#include <arm/broadcom/bcm53xx_reg.h>
#include <arm/broadcom/bcm53xx_var.h>
-#define BCMETH_RCVOFFSET 6
+//#define BCMETH_MPSAFE
+
+#define BCMETH_RCVOFFSET 10
#define BCMETH_MAXTXMBUFS 128
#define BCMETH_NTXSEGS 30
#define BCMETH_MAXRXMBUFS 255
@@ -69,7 +71,9 @@
#define BCMETH_NRXSEGS 1
#define BCMETH_RINGSIZE PAGE_SIZE
+#if 0
#define BCMETH_RCVMAGIC 0xfeedface
+#endif
static int bcmeth_ccb_match(device_t, cfdata_t, void *);
static void bcmeth_ccb_attach(device_t, device_t, void *);
@@ -204,6 +208,9 @@
struct bcmeth_rxqueue *);
static int bcmeth_intr(void *);
+#ifdef BCMETH_MPSAFETX
+static void bcmeth_soft_txintr(struct bcmeth_softc *);
+#endif
static void bcmeth_soft_intr(void *);
static void bcmeth_worker(struct work *, void *);
@@ -361,6 +368,9 @@
ifp->if_baudrate = IF_Mbps(1000);
ifp->if_capabilities = 0;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+#ifdef BCMETH_MPSAFE
+ ifp->if_flags2 = IFF2_MPSAFE;
+#endif
ifp->if_ioctl = bcmeth_ifioctl;
ifp->if_start = bcmeth_ifstart;
ifp->if_watchdog = bcmeth_ifwatchdog;
@@ -541,6 +551,7 @@
KASSERT(!cpu_intr_p());
sc->sc_soft_flags = 0;
+ sc->sc_work_flags = 0;
/* Disable Rx processing */
bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
@@ -842,12 +853,16 @@
}
KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
KASSERT(map->dm_mapsize == MCLBYTES);
+#ifdef BCMETH_RCVMAGIC
*mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
+#else
+ bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
+ BUS_DMASYNC_PREREAD);
+#endif
return m;
}
@@ -955,10 +970,16 @@
/*
* Let's give it to the network subsystm to deal with.
*/
+#ifdef BCMETH_MPSAFE
+ mutex_exit(sc->sc_lock);
+ (*ifp->if_input)(ifp, m);
+ mutex_enter(sc->sc_lock);
+#else
int s = splnet();
bpf_mtap(ifp, m);
(*ifp->if_input)(ifp, m);
splx(s);
+#endif
}
static void
@@ -1006,7 +1027,11 @@
* Get the count of descriptors. Fetch the correct number
* of mbufs.
*/
+#ifdef BCMETH_RCVMAGIC
size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
+#else
+ size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
+#endif
struct mbuf *m = rxq->rxq_mhead;
struct mbuf *m_last = m;
for (size_t i = 1; i < desc_count; i++) {
@@ -1027,6 +1052,7 @@
rxq->rxq_mtail = &rxq->rxq_mhead;
m_last->m_next = NULL;
+#ifdef BCMETH_RCVMAGIC
if (rxsts == BCMETH_RCVMAGIC) {
ifp->if_ierrors++;
if ((m->m_ext.ext_paddr >> 28) == 8) {
@@ -1035,7 +1061,9 @@
sc->sc_ev_rx_badmagic_hi.ev_count++;
}
IF_ENQUEUE(&sc->sc_rx_bufcache, m);
- } else if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
+ } else
+#endif /* BCMETH_RCVMAGIC */
+ if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
consumer - rxq->rxq_first, desc_count, rxsts);
/*
@@ -1059,7 +1087,34 @@
} else {
m_last->m_len = framelen & (MCLBYTES - 1);
}
+
+#ifdef BCMETH_MPSAFE
+ /*
+ * Wrap at the last entry!
+ */
+ if (++consumer == rxq->rxq_last) {
+ KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
+ rxq->rxq_consumer = rxq->rxq_first;
+ } else {
+ rxq->rxq_consumer = consumer;
+ }
+ rxq->rxq_inuse -= rxconsumed;
+#endif /* BCMETH_MPSAFE */
+
+ /*
+ * Receive the packet (which releases our lock)
+ */
bcmeth_rx_input(sc, m, rxsts);
+
+#ifdef BCMETH_MPSAFE
+ /*
+ * Since we had to give up our lock, we need to
+ * refresh these.
+ */
+ consumer = rxq->rxq_consumer;
+ rxconsumed = 0;
+ continue;
+#endif /* BCMETH_MPSAFE */
}
/*
@@ -1362,6 +1417,64 @@
return true;
}
+static struct mbuf *
+bcmeth_copy_packet(struct mbuf *m)
+{
+ struct mbuf *mext = NULL;
+ size_t misalignment = 0;
+ size_t hlen = 0;
+
+ for (mext = m; mext != NULL; mext = mext->m_next) {
+ if (mext->m_flags & M_EXT) {
+ misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
+ break;
+ }
+ hlen += m->m_len;
+ }
+
+ struct mbuf *n = m->m_next;
+ if (m != mext && hlen + misalignment <= MHLEN && false) {
+ KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
+ size_t oldoff = m->m_data - m->m_pktdat;
+ size_t off;
+ if (mext == NULL) {
+ off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
+ } else {
+ off = MHLEN - (hlen + misalignment);
+ }
+ KASSERT(off + hlen + misalignment <= MHLEN);
+ if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
+ memmove(&m->m_pktdat[off], m->m_data, m->m_len);
+ m->m_data = &m->m_pktdat[off];
+ }
+ m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
+ m->m_len = hlen;
+ m->m_next = mext;
+ while (n != mext) {
+ n = m_free(n);
+ }
+ return m;
+ }
+
+ struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
+ if (m0 == NULL) {
+ return NULL;
+ }
+ M_COPY_PKTHDR(m0, m);
+ MCLAIM(m0, m->m_owner);
+ if (m0->m_pkthdr.len > MHLEN) {
+ MCLGET(m0, M_DONTWAIT);
+ if ((m0->m_flags & M_EXT) == 0) {
+ m_freem(m0);
+ return NULL;
+ }
+ }
+ m0->m_len = m->m_pkthdr.len;
+ m_copydata(m, 0, m0->m_len, mtod(m0, void *));
+ m_freem(m);
+ return m0;
+}
+
static bool
bcmeth_txq_enqueue(
struct bcmeth_softc *sc,
@@ -1386,24 +1499,11 @@
* consolidate it into a single mbuf.
*/
if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
- struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
+ struct mbuf *m0 = bcmeth_copy_packet(m);
if (m0 == NULL) {
txq->txq_next = m;
return true;
}
- M_COPY_PKTHDR(m0, m);
- MCLAIM(m0, m->m_owner);
- if (m0->m_pkthdr.len > MHLEN) {
- MCLGET(m0, M_DONTWAIT);
- if ((m0->m_flags & M_EXT) == 0) {
- m_freem(m0);
- txq->txq_next = m;
- return true;
- }
- }
- m0->m_len = m->m_pkthdr.len;
- m_copydata(m, 0, m0->m_len, mtod(m0, void *));
- m_freem(m);
m = m0;
}
int error = bcmeth_txq_map_load(sc, txq, m);
@@ -1577,8 +1677,25 @@
{
struct bcmeth_softc * const sc = ifp->if_softc;
- atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
- softint_schedule(sc->sc_soft_ih);
+ if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
+ return;
+ }
+
+#ifdef BCMETH_MPSAFETX
+ if (cpu_intr_p()) {
+#endif
+ atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
+ softint_schedule(sc->sc_soft_ih);
+#ifdef BCMETH_MPSAFETX
+ } else {
+ /*
+ * Either we are in a softintr thread already or some other
+ * thread so just borrow it to do the send and save ourselves
+ * the overhead of a fast soft int.
+ */
+ bcmeth_soft_txintr(sc);
+ }
+#endif
}
int
@@ -1693,6 +1810,32 @@
return rv;
}
+#ifdef BCMETH_MPSAFETX
+void
+bcmeth_soft_txintr(struct bcmeth_softc *sc)
+{
+ mutex_enter(sc->sc_lock);
+ /*
+ * Let's do what we came here for. Consume transmitted
+ * packets off the the transmit ring.
+ */
+ if (!bcmeth_txq_consume(sc, &sc->sc_txq)
+ || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
+ sc->sc_ev_tx_stall.ev_count++;
+ sc->sc_if.if_flags |= IFF_OACTIVE;
+ } else {
+ sc->sc_if.if_flags &= ~IFF_OACTIVE;
Home |
Main Index |
Thread Index |
Old Index