Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci msk(4): add 64-bit DMA support
details: https://anonhg.NetBSD.org/src/rev/4877b206c762
branches: trunk
changeset: 433434:4877b206c762
user: jakllsch <jakllsch%NetBSD.org@localhost>
date: Fri Sep 14 18:46:47 2018 +0000
description:
msk(4): add 64-bit DMA support
diffstat:
sys/dev/pci/if_msk.c | 219 +++++++++++++++++++++++++++++++++++------------
sys/dev/pci/if_mskvar.h | 16 +-
sys/dev/pci/if_skreg.h | 4 +-
3 files changed, 175 insertions(+), 64 deletions(-)
diffs (truncated from 447 to 300 lines):
diff -r 960a439aa0ed -r 4877b206c762 sys/dev/pci/if_msk.c
--- a/sys/dev/pci/if_msk.c Fri Sep 14 13:47:14 2018 +0000
+++ b/sys/dev/pci/if_msk.c Fri Sep 14 18:46:47 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_msk.c,v 1.77 2018/09/12 13:52:36 jakllsch Exp $ */
+/* $NetBSD: if_msk.c,v 1.78 2018/09/14 18:46:47 jakllsch Exp $ */
/* $OpenBSD: if_msk.c,v 1.79 2009/10/15 17:54:56 deraadt Exp $ */
/*
@@ -52,7 +52,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.77 2018/09/12 13:52:36 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.78 2018/09/14 18:46:47 jakllsch Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -150,6 +150,9 @@
static int msk_sysctl_handler(SYSCTLFN_PROTO);
static int msk_root_num;
+#define MSK_ADDR_LO(x) ((uint64_t) (x) & 0xffffffffUL)
+#define MSK_ADDR_HI(x) ((uint64_t) (x) >> 32)
+
/* supported device vendors */
static const struct msk_product {
pci_vendor_id_t msk_vendor;
@@ -409,6 +412,7 @@
{
struct msk_chain_data *cd = &sc_if->sk_cdata;
struct msk_ring_data *rd = sc_if->sk_rdata;
+ struct msk_rx_desc *r;
int i, nexti;
memset(rd->sk_rx_ring, 0, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
@@ -426,6 +430,18 @@
sc_if->sk_cdata.sk_rx_cons = 0;
sc_if->sk_cdata.sk_rx_cnt = 0;
+ /* Mark the first ring element to initialize the high address. */
+ sc_if->sk_cdata.sk_rx_hiaddr = 0;
+ r = &rd->sk_rx_ring[cd->sk_rx_prod];
+ r->sk_addr = htole32(cd->sk_rx_hiaddr);
+ r->sk_len = 0;
+ r->sk_ctl = 0;
+ r->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_RXOPC_OWN;
+ MSK_CDRXSYNC(sc_if, cd->sk_rx_prod,
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+ SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
+ sc_if->sk_cdata.sk_rx_cnt++;
+
msk_fill_rx_ring(sc_if);
return (0);
}
@@ -436,6 +452,7 @@
struct sk_softc *sc = sc_if->sk_softc;
struct msk_chain_data *cd = &sc_if->sk_cdata;
struct msk_ring_data *rd = sc_if->sk_rdata;
+ struct msk_tx_desc *t;
bus_dmamap_t dmamap;
struct sk_txmap_entry *entry;
int i, nexti;
@@ -468,8 +485,17 @@
sc_if->sk_cdata.sk_tx_cons = 0;
sc_if->sk_cdata.sk_tx_cnt = 0;
+ /* Mark the first ring element to initialize the high address. */
+ sc_if->sk_cdata.sk_tx_hiaddr = 0;
+ t = &rd->sk_tx_ring[cd->sk_tx_prod];
+ t->sk_addr = htole32(cd->sk_tx_hiaddr);
+ t->sk_len = 0;
+ t->sk_ctl = 0;
+ t->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_TXOPC_OWN;
MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ SK_INC(sc_if->sk_cdata.sk_tx_prod, MSK_TX_RING_CNT);
+ sc_if->sk_cdata.sk_tx_cnt++;
return (0);
}
@@ -480,7 +506,8 @@
struct mbuf *m_new = NULL;
struct sk_chain *c;
struct msk_rx_desc *r;
- void *buf = NULL;
+ void *buf = NULL;
+ bus_addr_t addr;
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL)
@@ -501,12 +528,34 @@
m_adj(m_new, ETHER_ALIGN);
+ addr = dmamap->dm_segs[0].ds_addr +
+ ((vaddr_t)m_new->m_data -
+ (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf);
+
+ if (sc_if->sk_cdata.sk_rx_hiaddr != MSK_ADDR_HI(addr)) {
+ c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
+ r = c->sk_le;
+ c->sk_mbuf = NULL;
+ r->sk_addr = htole32(MSK_ADDR_HI(addr));
+ r->sk_len = 0;
+ r->sk_ctl = 0;
+ r->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_RXOPC_OWN;
+ sc_if->sk_cdata.sk_rx_hiaddr = MSK_ADDR_HI(addr);
+
+ MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod,
+ BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+
+ SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
+ sc_if->sk_cdata.sk_rx_cnt++;
+
+ DPRINTFN(10, ("%s: rx ADDR64: %#x\n",
+ sc_if->sk_ethercom.ec_if.if_xname, (unsigned)MSK_ADDR_HI(addr)));
+ }
+
c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
r = c->sk_le;
c->sk_mbuf = m_new;
- r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr +
- (((vaddr_t)m_new->m_data
- - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
+ r->sk_addr = htole32(MSK_ADDR_LO(addr));
r->sk_len = htole16(SK_JLEN);
r->sk_ctl = 0;
r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
@@ -702,7 +751,7 @@
s = splnet();
- DPRINTFN(2, ("msk_ioctl ETHER\n"));
+ DPRINTFN(2, ("msk_ioctl ETHER cmd %lx\n", cmd));
switch (cmd) {
case SIOCSIFFLAGS:
if ((error = ifioctl_common(ifp, cmd, data)) != 0)
@@ -1101,17 +1150,19 @@
* give the receiver 2/3 of the memory (rounded down), and the
* transmitter whatever remains.
*/
- chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff;
- sc_if->sk_rx_ramstart = 0;
- sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1;
- chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk;
- sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1;
- sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1;
-
- DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
- " tx_ramstart=%#x tx_ramend=%#x\n",
- sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
- sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
+ if (sc->sk_ramsize) {
+ chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff;
+ sc_if->sk_rx_ramstart = 0;
+ sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1;
+ chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk;
+ sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1;
+ sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1;
+
+ DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
+ " tx_ramstart=%#x tx_ramend=%#x\n",
+ sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
+ sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
+ }
/* Allocate the descriptor queues. */
if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data),
@@ -1360,7 +1411,10 @@
return;
}
- sc->sc_dmatag = pa->pa_dmat;
+ if (pci_dma64_available(pa))
+ sc->sc_dmatag = pa->pa_dmat64;
+ else
+ sc->sc_dmatag = pa->pa_dmat;
command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
command |= PCI_COMMAND_MASTER_ENABLE;
@@ -1714,10 +1768,12 @@
{
struct sk_softc *sc = sc_if->sk_softc;
struct msk_tx_desc *f = NULL;
- u_int32_t frag, cur;
- int i;
+ u_int32_t frag, cur, hiaddr, old_hiaddr, total;
+ u_int32_t entries = 0;
+ size_t i;
struct sk_txmap_entry *entry;
bus_dmamap_t txmap;
+ bus_addr_t addr;
DPRINTFN(2, ("msk_encap\n"));
@@ -1746,30 +1802,68 @@
return (ENOBUFS);
}
- if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
+ /* Count how many tx descriptors needed. */
+ hiaddr = sc_if->sk_cdata.sk_tx_hiaddr;
+ for (total = i = 0; i < txmap->dm_nsegs; i++) {
+ if (hiaddr != MSK_ADDR_HI(txmap->dm_segs[i].ds_addr)) {
+ hiaddr = MSK_ADDR_HI(txmap->dm_segs[i].ds_addr);
+ total++;
+ }
+ total++;
+ }
+
+ if (total > MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2) {
DPRINTFN(2, ("msk_encap: too few descriptors free\n"));
bus_dmamap_unload(sc->sc_dmatag, txmap);
return (ENOBUFS);
}
- DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
+ DPRINTFN(2, ("msk_encap: dm_nsegs=%d total desc=%u\n",
+ txmap->dm_nsegs, total));
/* Sync the DMA map. */
bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
BUS_DMASYNC_PREWRITE);
+ old_hiaddr = sc_if->sk_cdata.sk_tx_hiaddr;
for (i = 0; i < txmap->dm_nsegs; i++) {
+ addr = txmap->dm_segs[i].ds_addr;
+ DPRINTFN(2, ("msk_encap: addr %llx\n",
+ (unsigned long long)addr));
+ hiaddr = MSK_ADDR_HI(addr);
+
+ if (sc_if->sk_cdata.sk_tx_hiaddr != hiaddr) {
+ f = &sc_if->sk_rdata->sk_tx_ring[frag];
+ f->sk_addr = htole32(hiaddr);
+ f->sk_len = 0;
+ f->sk_ctl = 0;
+ if (i == 0)
+ f->sk_opcode = SK_Y2_BMUOPC_ADDR64;
+ else
+ f->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_TXOPC_OWN;
+ sc_if->sk_cdata.sk_tx_hiaddr = hiaddr;
+ SK_INC(frag, MSK_TX_RING_CNT);
+ entries++;
+ DPRINTFN(10, ("%s: tx ADDR64: %#x\n",
+ sc_if->sk_ethercom.ec_if.if_xname, hiaddr));
+ }
+
f = &sc_if->sk_rdata->sk_tx_ring[frag];
- f->sk_addr = htole32(txmap->dm_segs[i].ds_addr);
+ f->sk_addr = htole32(MSK_ADDR_LO(addr));
f->sk_len = htole16(txmap->dm_segs[i].ds_len);
f->sk_ctl = 0;
- if (i == 0)
- f->sk_opcode = SK_Y2_TXOPC_PACKET;
- else
+ if (i == 0) {
+ if (hiaddr != old_hiaddr)
+ f->sk_opcode = SK_Y2_TXOPC_PACKET | SK_Y2_TXOPC_OWN;
+ else
+ f->sk_opcode = SK_Y2_TXOPC_PACKET;
+ } else
f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN;
cur = frag;
SK_INC(frag, MSK_TX_RING_CNT);
+ entries++;
}
+ KASSERTMSG(entries == total, "entries %u total %u", entries, total);
sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
@@ -1778,7 +1872,7 @@
sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG;
/* Sync descriptors before handing to chip */
- MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
+ MSK_CDTXSYNC(sc_if, *txidx, entries,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN;
@@ -1787,7 +1881,7 @@
MSK_CDTXSYNC(sc_if, *txidx, 1,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
- sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
+ sc_if->sk_cdata.sk_tx_cnt += entries;
#ifdef MSK_DEBUG
if (mskdebug >= 2) {
@@ -1922,31 +2016,38 @@
struct sk_softc *sc = sc_if->sk_softc;
struct ifnet *ifp = &sc_if->sk_ethercom.ec_if;
struct mbuf *m;
- struct sk_chain *cur_rx;
- int cur, total_len = len;
+ unsigned cur, prod, tail, total_len = len;
bus_dmamap_t dmamap;
- DPRINTFN(2, ("msk_rxeof\n"));
-
Home |
Main Index |
Thread Index |
Old Index