Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci Don't destroy the dma maps if we're not disablin...
details: https://anonhg.NetBSD.org/src/rev/b2a0208266d7
branches: trunk
changeset: 446366:b2a0208266d7
user: bouyer <bouyer%NetBSD.org@localhost>
date: Sun Dec 02 17:02:04 2018 +0000
description:
Don't destroy the dma maps if we're not disabling the adapter, avoids
a KASSERT() when bus_dmamap_destroy() is called from interrupt
context via bge_watchdog()
Set IFF_OACTIVE only when bge_encap() fails on adapter ressource shortage.
Otherwise we may set IFF_OACTIVE while no transmit is in progress, and
nothing will clear it.
If bus_dmamap_load_mbuf() fails with EFBIG, m_defrag() the chain and retry.
Refine the check for the 4GB boundary workaround (a fragment should also
not cross the boundary), and do it only for TSO.
If bge_encap() fails and didn't set IFF_OACTIVE, drop the packet.
Bring in more hardware bug workarounds from freebsd.
With these it seems that a BCM5720 A0 can survive a few hours of internet
load with TSO4 enabled.
diffstat:
sys/dev/pci/if_bge.c | 165 ++++++++++++++++++++++++++++++++++++++---------
sys/dev/pci/if_bgereg.h | 7 +-
2 files changed, 139 insertions(+), 33 deletions(-)
diffs (truncated from 392 to 300 lines):
diff -r 442007f3b5d1 -r b2a0208266d7 sys/dev/pci/if_bge.c
--- a/sys/dev/pci/if_bge.c Sun Dec 02 16:58:13 2018 +0000
+++ b/sys/dev/pci/if_bge.c Sun Dec 02 17:02:04 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_bge.c,v 1.319 2018/11/30 17:53:08 jdolecek Exp $ */
+/* $NetBSD: if_bge.c,v 1.320 2018/12/02 17:02:04 bouyer Exp $ */
/*
* Copyright (c) 2001 Wind River Systems
@@ -79,7 +79,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.319 2018/11/30 17:53:08 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.320 2018/12/02 17:02:04 bouyer Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -233,10 +233,10 @@
bus_dmamap_t);
static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
static int bge_init_rx_ring_std(struct bge_softc *);
-static void bge_free_rx_ring_std(struct bge_softc *);
+static void bge_free_rx_ring_std(struct bge_softc *m, bool);
static int bge_init_rx_ring_jumbo(struct bge_softc *);
static void bge_free_rx_ring_jumbo(struct bge_softc *);
-static void bge_free_tx_ring(struct bge_softc *);
+static void bge_free_tx_ring(struct bge_softc *m, bool);
static int bge_init_tx_ring(struct bge_softc *);
static int bge_chipinit(struct bge_softc *);
@@ -1713,6 +1713,9 @@
struct bge_rx_bd *r;
int error;
+ if (dmamap == NULL)
+ dmamap = sc->bge_cdata.bge_rx_std_map[i];
+
if (dmamap == NULL) {
error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
@@ -1852,7 +1855,7 @@
}
static void
-bge_free_rx_ring_std(struct bge_softc *sc)
+bge_free_rx_ring_std(struct bge_softc *sc, bool disable)
{
int i;
@@ -1863,8 +1866,11 @@
if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
sc->bge_cdata.bge_rx_std_chain[i] = NULL;
- bus_dmamap_destroy(sc->bge_dmatag,
- sc->bge_cdata.bge_rx_std_map[i]);
+ if (disable) {
+ bus_dmamap_destroy(sc->bge_dmatag,
+ sc->bge_cdata.bge_rx_std_map[i]);
+ sc->bge_cdata.bge_rx_std_map[i] = NULL;
+ }
}
memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
sizeof(struct bge_rx_bd));
@@ -1920,7 +1926,7 @@
}
static void
-bge_free_tx_ring(struct bge_softc *sc)
+bge_free_tx_ring(struct bge_softc *sc, bool disable)
{
int i;
struct txdmamap_pool_entry *dma;
@@ -1940,12 +1946,17 @@
sizeof(struct bge_tx_bd));
}
- while ((dma = SLIST_FIRST(&sc->txdma_list))) {
- SLIST_REMOVE_HEAD(&sc->txdma_list, link);
- bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
- if (sc->bge_dma64)
- bus_dmamap_destroy(sc->bge_dmatag32, dma->dmamap32);
- free(dma, M_DEVBUF);
+ if (disable) {
+ while ((dma = SLIST_FIRST(&sc->txdma_list))) {
+ SLIST_REMOVE_HEAD(&sc->txdma_list, link);
+ bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
+ if (sc->bge_dma64) {
+ bus_dmamap_destroy(sc->bge_dmatag32,
+ dma->dmamap32);
+ }
+ free(dma, M_DEVBUF);
+ }
+ SLIST_INIT(&sc->txdma_list);
}
sc->bge_flags &= ~BGEF_TXRING_VALID;
@@ -1988,7 +1999,9 @@
else
maxsegsz = ETHER_MAX_LEN_JUMBO;
- SLIST_INIT(&sc->txdma_list);
+ if (SLIST_FIRST(&sc->txdma_list) != NULL)
+ goto alloc_done;
+
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
@@ -2021,7 +2034,7 @@
dma->dmamap32 = dmamap32;
SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
}
-
+alloc_done:
sc->bge_flags |= BGEF_TXRING_VALID;
return 0;
@@ -3141,12 +3154,30 @@
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
}
-
/* Turn on read DMA state machine */
CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
/* 5718 step 52 */
delay(40);
+ if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 ||
+ BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) {
+ for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
+ val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
+ if ((val & 0xFFFF) > BGE_FRAMELEN)
+ break;
+ if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
+ break;
+ }
+ if (i != BGE_NUM_RDMA_CHANNELS / 2) {
+ val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
+ if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719)
+ val |= BGE_RDMA_TX_LENGTH_WA_5719;
+ else
+ val |= BGE_RDMA_TX_LENGTH_WA_5720;
+ CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
+ }
+ }
+
/* 5718 step 56, 57XX step 84 */
/* Turn on RX data completion state machine */
CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
@@ -3913,6 +3944,7 @@
}
}
}
+ SLIST_INIT(&sc->txdma_list);
DPRINTFN(5, ("bus_dmamem_alloc\n"));
if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
PAGE_SIZE, 0, &sc->bge_ring_seg, 1,
@@ -4592,7 +4624,7 @@
sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
stdcnt++;
dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
- sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
+ sc->bge_cdata.bge_rx_std_map[rxidx] = NULL;
if (dmamap == NULL) {
ifp->if_ierrors++;
bge_newbuf_std(sc, sc->bge_std, m, dmamap);
@@ -4940,7 +4972,17 @@
ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS +
offsetof(struct bge_mac_stats_regs, etherStatsCollisions));
- ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+ /*
+ * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0,
+ * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames
+ * (silicon bug). There's no reliable workaround so just
+ * ignore the counter
+ */
+ if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 &&
+ BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5719_A0 &&
+ BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5720_A0) {
+ ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
+ }
ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
}
@@ -5159,6 +5201,7 @@
static int
bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
{
+ struct ifnet *ifp = &sc->ethercom.ec_if;
struct bge_tx_bd *f, *prev_f;
uint32_t frag, cur;
uint16_t csum_flags = 0;
@@ -5170,6 +5213,7 @@
int use_tso, maxsegsize, error;
bool have_vtag;
uint16_t vtag;
+ bool remap;
if (m_head->m_pkthdr.csum_flags) {
if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
@@ -5193,7 +5237,7 @@
goto check_dma_bug;
if (bge_cksum_pad(m_head) != 0)
- return ENOBUFS;
+ return ENOBUFS;
check_dma_bug:
if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
@@ -5209,8 +5253,10 @@
doit:
dma = SLIST_FIRST(&sc->txdma_list);
- if (dma == NULL)
+ if (dma == NULL) {
+ ifp->if_flags |= IFF_OACTIVE;
return ENOBUFS;
+ }
dmamap = dma->dmamap;
dmatag = sc->bge_dmatag;
dma->is_dma32 = false;
@@ -5366,11 +5412,22 @@
* the fragment pointers. Stop when we run out
* of fragments or hit the end of the mbuf chain.
*/
+ remap = true;
load_again:
error = bus_dmamap_load_mbuf(dmatag, dmamap,
m_head, BUS_DMA_NOWAIT);
- if (error)
- return ENOBUFS;
+ if (__predict_false(error)) {
+ if (error == EFBIG && remap) {
+ struct mbuf *m;
+ remap = false;
+ m = m_defrag(m_head, M_NOWAIT);
+ if (m != NULL) {
+ KASSERT(m == m_head);
+ goto load_again;
+ }
+ }
+ return error;
+ }
/*
* Sanity check: avoid coming within 16 descriptors
* of the end of the ring.
@@ -5393,8 +5450,12 @@
BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
f->bge_len = dmamap->dm_segs[i].ds_len;
- if (dma->is_dma32 == false && prev_f != NULL &&
- prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi) {
+ if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && (
+ (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) !=
+ ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) ||
+ (prev_f != NULL &&
+ prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi))
+ ) {
/*
* watchdog timeout issue was observed with TSO,
* limiting DMA address space to 32bits seems to
@@ -5404,6 +5465,7 @@
dmatag = sc->bge_dmatag32;
dmamap = dma->dmamap32;
dma->is_dma32 = true;
+ remap = true;
goto load_again;
}
@@ -5466,6 +5528,7 @@
fail_unload:
bus_dmamap_unload(dmatag, dmamap);
+ ifp->if_flags |= IFF_OACTIVE;
return ENOBUFS;
}
@@ -5479,8 +5542,10 @@
{
struct bge_softc *sc;
struct mbuf *m_head = NULL;
+ struct mbuf *m;
uint32_t prodidx;
int pkts = 0;
+ int error;
sc = ifp->if_softc;
@@ -5518,13 +5583,21 @@
* don't have room, set the OACTIVE flag and wait
* for the NIC to drain the ring.
*/
- if (bge_encap(sc, m_head, &prodidx)) {
- ifp->if_flags |= IFF_OACTIVE;
- break;
+ error = bge_encap(sc, m_head, &prodidx);
+ if (__predict_false(error)) {
+ if (ifp->if_flags & IFF_OACTIVE) {
+ /* just wait for the transmit ring to drain */
Home |
Main Index |
Thread Index |
Old Index