Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci msk(4): rework rx descriptor loading to support ...
details: https://anonhg.NetBSD.org/src/rev/9485d8c7d866
branches: trunk
changeset: 1009650:9485d8c7d866
user: jakllsch <jakllsch%NetBSD.org@localhost>
date: Thu Apr 30 01:52:08 2020 +0000
description:
msk(4): rework rx descriptor loading to support multiple segments
This paves the way to replace the driver-internal jumbo frame rx buffer
with other recieve buffers (for example MCLGET/MEXTMALLOC) in the future.
diffstat:
sys/dev/pci/if_msk.c | 174 ++++++++++++++++++++++++++++++++++++++------------
1 files changed, 130 insertions(+), 44 deletions(-)
diffs (268 lines):
diff -r c2de1a529890 -r 9485d8c7d866 sys/dev/pci/if_msk.c
--- a/sys/dev/pci/if_msk.c Thu Apr 30 00:48:10 2020 +0000
+++ b/sys/dev/pci/if_msk.c Thu Apr 30 01:52:08 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_msk.c,v 1.105 2020/04/29 20:03:52 jakllsch Exp $ */
+/* $NetBSD: if_msk.c,v 1.106 2020/04/30 01:52:08 jakllsch Exp $ */
/* $OpenBSD: if_msk.c,v 1.79 2009/10/15 17:54:56 deraadt Exp $ */
/*
@@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.105 2020/04/29 20:03:52 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.106 2020/04/30 01:52:08 jakllsch Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -113,7 +113,7 @@
static void msk_init_yukon(struct sk_if_softc *);
static void msk_stop(struct ifnet *, int);
static void msk_watchdog(struct ifnet *);
-static int msk_newbuf(struct sk_if_softc *, bus_dmamap_t);
+static int msk_newbuf(struct sk_if_softc *);
static int msk_alloc_jumbo_mem(struct sk_if_softc *);
static void *msk_jalloc(struct sk_if_softc *);
static void msk_jfree(struct mbuf *, void *, size_t, void *);
@@ -472,13 +472,18 @@
}
static int
-msk_newbuf(struct sk_if_softc *sc_if, bus_dmamap_t dmamap)
+msk_newbuf(struct sk_if_softc *sc_if)
{
+ struct sk_softc *sc = sc_if->sk_softc;
struct mbuf *m_new = NULL;
struct sk_chain *c;
struct msk_rx_desc *r;
void *buf = NULL;
bus_addr_t addr;
+ bus_dmamap_t rxmap;
+ size_t i;
+ uint32_t rxidx, frag, cur, hiaddr, old_hiaddr, total;
+ uint32_t entries = 0;
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL)
@@ -499,44 +504,99 @@
m_adj(m_new, ETHER_ALIGN);
- addr = dmamap->dm_segs[0].ds_addr +
- ((vaddr_t)m_new->m_data -
- (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf);
-
- if (sc_if->sk_cdata.sk_rx_hiaddr != MSK_ADDR_HI(addr)) {
- c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
- r = &sc_if->sk_rdata->sk_rx_ring[sc_if->sk_cdata.sk_rx_prod];
- c->sk_mbuf = NULL;
- r->sk_addr = htole32(MSK_ADDR_HI(addr));
- r->sk_len = 0;
+ rxidx = frag = cur = sc_if->sk_cdata.sk_rx_prod;
+ rxmap = sc_if->sk_cdata.sk_rx_chain[rxidx].sk_dmamap;
+
+ if (bus_dmamap_load_mbuf(sc->sc_dmatag, rxmap, m_new, BUS_DMA_NOWAIT)) {
+ DPRINTFN(2, ("msk_newbuf: dmamap_load failed\n"));
+ m_freem(m_new);
+ return ENOBUFS;
+ }
+
+ /* Count how many rx descriptors needed. */
+ hiaddr = sc_if->sk_cdata.sk_rx_hiaddr;
+ for (total = i = 0; i < rxmap->dm_nsegs; i++) {
+ if (hiaddr != MSK_ADDR_HI(rxmap->dm_segs[i].ds_addr)) {
+ hiaddr = MSK_ADDR_HI(rxmap->dm_segs[i].ds_addr);
+ total++;
+ }
+ total++;
+ }
+
+ if (total > MSK_RX_RING_CNT - sc_if->sk_cdata.sk_rx_cnt - 1) {
+ DPRINTFN(2, ("msk_newbuf: too few descriptors free\n"));
+ bus_dmamap_unload(sc->sc_dmatag, rxmap);
+ m_freem(m_new);
+ return ENOBUFS;
+ }
+
+ DPRINTFN(2, ("msk_newbuf: dm_nsegs=%d total desc=%u\n",
+ rxmap->dm_nsegs, total));
+
+ /* Sync the DMA map. */
+ bus_dmamap_sync(sc->sc_dmatag, rxmap, 0, rxmap->dm_mapsize,
+ BUS_DMASYNC_PREREAD);
+
+ old_hiaddr = sc_if->sk_cdata.sk_rx_hiaddr;
+ for (i = 0; i < rxmap->dm_nsegs; i++) {
+ addr = rxmap->dm_segs[i].ds_addr;
+ DPRINTFN(2, ("msk_newbuf: addr %llx\n",
+ (unsigned long long)addr));
+ hiaddr = MSK_ADDR_HI(addr);
+
+ if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) {
+ c = &sc_if->sk_cdata.sk_rx_chain[frag];
+ c->sk_mbuf = NULL;
+ r = &sc_if->sk_rdata->sk_rx_ring[frag];
+ r->sk_addr = htole32(hiaddr);
+ r->sk_len = 0;
+ r->sk_ctl = 0;
+ if (i == 0)
+ r->sk_opcode = SK_Y2_BMUOPC_ADDR64;
+ else
+ r->sk_opcode = SK_Y2_BMUOPC_ADDR64 |
+ SK_Y2_RXOPC_OWN;
+ sc_if->sk_cdata.sk_rx_hiaddr = hiaddr;
+ MSK_CDRXSYNC(sc_if, frag,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+ SK_INC(frag, MSK_RX_RING_CNT);
+ entries++;
+ DPRINTFN(10, ("%s: rx ADDR64: %#x\n",
+ sc_if->sk_ethercom.ec_if.if_xname, hiaddr));
+ }
+
+ c = &sc_if->sk_cdata.sk_rx_chain[frag];
+ r = &sc_if->sk_rdata->sk_rx_ring[frag];
+ r->sk_addr = htole32(MSK_ADDR_LO(addr));
+ r->sk_len = htole16(rxmap->dm_segs[i].ds_len);
r->sk_ctl = 0;
- r->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_RXOPC_OWN;
- sc_if->sk_cdata.sk_rx_hiaddr = MSK_ADDR_HI(addr);
-
- MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod,
+ if (i == 0) {
+ if (hiaddr != old_hiaddr)
+ r->sk_opcode = SK_Y2_RXOPC_PACKET |
+ SK_Y2_RXOPC_OWN;
+ else
+ r->sk_opcode = SK_Y2_RXOPC_PACKET;
+ } else
+ r->sk_opcode = SK_Y2_RXOPC_BUFFER | SK_Y2_RXOPC_OWN;
+ MSK_CDRXSYNC(sc_if, frag,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
-
- SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
- sc_if->sk_cdata.sk_rx_cnt++;
-
- DPRINTFN(10, ("%s: rx ADDR64: %#x\n",
- sc_if->sk_ethercom.ec_if.if_xname,
- (unsigned)MSK_ADDR_HI(addr)));
+ cur = frag;
+ SK_INC(frag, MSK_RX_RING_CNT);
+ entries++;
}
-
- c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
- r = &sc_if->sk_rdata->sk_rx_ring[sc_if->sk_cdata.sk_rx_prod];
- c->sk_mbuf = m_new;
- r->sk_addr = htole32(MSK_ADDR_LO(addr));
- r->sk_len = htole16(SK_JLEN);
- r->sk_ctl = 0;
- r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
-
- MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod,
+ KASSERTMSG(entries == total, "entries %u total %u", entries, total);
+
+ sc_if->sk_cdata.sk_rx_chain[rxidx].sk_dmamap =
+ sc_if->sk_cdata.sk_rx_chain[cur].sk_dmamap;
+ sc_if->sk_cdata.sk_rx_chain[cur].sk_mbuf = m_new;
+ sc_if->sk_cdata.sk_rx_chain[cur].sk_dmamap = rxmap;
+
+ sc_if->sk_rdata->sk_rx_ring[rxidx].sk_opcode |= SK_Y2_RXOPC_OWN;
+ MSK_CDRXSYNC(sc_if, rxidx,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
- SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
- sc_if->sk_cdata.sk_rx_cnt++;
+ sc_if->sk_cdata.sk_rx_cnt += entries;
+ sc_if->sk_cdata.sk_rx_prod = frag;
return 0;
}
@@ -1189,6 +1249,20 @@
sc_if->sk_cdata.sk_tx_chain[i].sk_dmamap = dmamap;
}
+ for (i = 0; i < MSK_RX_RING_CNT; i++) {
+ sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
+
+ if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN,
+ howmany(SK_JLEN + 1, NBPG),
+ SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) {
+ aprint_error_dev(sc_if->sk_dev,
+ "Can't create RX dmamap\n");
+ goto fail_3;
+ }
+
+ sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap = dmamap;
+ }
+
sc_if->sk_rdata = (struct msk_ring_data *)kva;
memset(sc_if->sk_rdata, 0, sizeof(struct msk_ring_data));
@@ -1303,6 +1377,11 @@
sc_if->sk_cdata.sk_tx_chain[i].sk_dmamap);
}
+ for (i = 0; i < MSK_RX_RING_CNT; i++) {
+ bus_dmamap_destroy(sc->sc_dmatag,
+ sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap);
+ }
+
if (--sc->rnd_attached == 0)
rnd_detach_source(&sc->rnd_source);
@@ -2027,13 +2106,13 @@
cur = sc_if->sk_cdata.sk_rx_cons;
prod = sc_if->sk_cdata.sk_rx_prod;
- /* Sync the descriptor */
- MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
DPRINTFN(2, ("msk_rxeof: cur %u prod %u rx_cnt %u\n", cur, prod,
sc_if->sk_cdata.sk_rx_cnt));
while (prod != cur) {
+ MSK_CDRXSYNC(sc_if, cur,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
tail = cur;
SK_INC(cur, MSK_RX_RING_CNT);
@@ -2050,10 +2129,11 @@
if (m == NULL)
return;
- dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
+ dmamap = sc_if->sk_cdata.sk_rx_chain[tail].sk_dmamap;
bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
- dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ uimin(dmamap->dm_mapsize, total_len), BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmatag, dmamap);
if (total_len < SK_MIN_FRAMELEN ||
total_len > ETHER_MAX_LEN_JUMBO ||
@@ -2132,8 +2212,7 @@
{
/* Make sure to not completely wrap around */
while (sc_if->sk_cdata.sk_rx_cnt < (MSK_RX_RING_CNT - 1)) {
- if (msk_newbuf(sc_if,
- sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
+ if (msk_newbuf(sc_if) == ENOBUFS) {
goto schedretry;
}
}
@@ -2640,6 +2719,13 @@
/* Free RX and TX mbufs still in the queues. */
for (i = 0; i < MSK_RX_RING_CNT; i++) {
if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
+ dmamap = sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap;
+
+ bus_dmamap_sync(sc->sc_dmatag, dmamap, 0,
+ dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+
+ bus_dmamap_unload(sc->sc_dmatag, dmamap);
+
m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
}
Home |
Main Index |
Thread Index |
Old Index