Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/pci Update comments from 57XX-PG105-R.pdf and 5718-P...
details: https://anonhg.NetBSD.org/src/rev/7d95185dc6dd
branches: trunk
changeset: 786007:7d95185dc6dd
user: msaitoh <msaitoh%NetBSD.org@localhost>
date: Thu Apr 11 11:24:07 2013 +0000
description:
Update comments from 57XX-PG105-R.pdf and 5718-PG106-R.pdf.
No functional change.
diffstat:
sys/dev/pci/if_bge.c | 122 +++++++++++++++++++++++++++++++++++++++++---------
1 files changed, 99 insertions(+), 23 deletions(-)
diffs (truncated from 535 to 300 lines):
diff -r 83866183e9e7 -r 7d95185dc6dd sys/dev/pci/if_bge.c
--- a/sys/dev/pci/if_bge.c Thu Apr 11 10:12:48 2013 +0000
+++ b/sys/dev/pci/if_bge.c Thu Apr 11 11:24:07 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_bge.c,v 1.235 2013/04/08 15:55:58 msaitoh Exp $ */
+/* $NetBSD: if_bge.c,v 1.236 2013/04/11 11:24:07 msaitoh Exp $ */
/*
* Copyright (c) 2001 Wind River Systems
@@ -79,7 +79,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.235 2013/04/08 15:55:58 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.236 2013/04/11 11:24:07 msaitoh Exp $");
#include "vlan.h"
@@ -2491,8 +2491,9 @@
*/
pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
- /* Step 33: Configure mbuf memory pool */
if (!BGE_IS_5705_PLUS(sc)) {
+ /* 57XX step 33 */
+ /* Configure mbuf memory pool */
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
BGE_BUFFPOOL_1);
@@ -2501,14 +2502,18 @@
else
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
+ /* 57XX step 34 */
/* Configure DMA resource pool */
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
BGE_DMA_DESCRIPTORS);
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
}
- /* Step 35: Configure mbuf pool watermarks */
- /* new broadcom docs strongly recommend these: */
+ /* 5718 step 11, 57XX step 35 */
+ /*
+ * Configure mbuf pool watermarks. New broadcom docs strongly
+ * recommend these.
+ */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
@@ -2529,11 +2534,13 @@
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
}
- /* Step 36: Configure DMA resource watermarks */
+ /* 57XX step 36 */
+ /* Configure DMA resource watermarks */
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
- /* Step 38: Enable buffer manager */
+ /* 5718 step 13, 57XX step 38 */
+ /* Enable buffer manager */
val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN;
/*
* Change the arbitration algorithm of TXMBUF read request to
@@ -2549,7 +2556,8 @@
val |= BGE_BMANMODE_LOMBUF_ATTN;
CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
- /* Step 39: Poll for buffer manager start indication */
+ /* 57XX step 39 */
+ /* Poll for buffer manager start indication */
for (i = 0; i < BGE_TIMEOUT * 2; i++) {
DELAY(10);
if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
@@ -2562,7 +2570,8 @@
return ENXIO;
}
- /* Step 40: Enable flow-through queues */
+ /* 57XX step 40 */
+ /* Enable flow-through queues */
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
@@ -2610,9 +2619,11 @@
* the controller support multiple send rings.
*/
- /* Step 41: Initialize the standard RX ring control block */
+ /* 5718 step 15, 57XX step 41 */
+ /* Initialize the standard RX ring control block */
rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
+ /* 5718 step 16 */
if (BGE_IS_5717_PLUS(sc)) {
/*
* Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
@@ -2656,8 +2667,9 @@
/* Reset the standard receive producer ring producer index. */
bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
+ /* 57XX step 42 */
/*
- * Step 42: Initialize the jumbo RX ring control block
+ * Initialize the jumbo RX ring control block
* We set the 'ring disabled' bit in the flags
* field until we're actually ready to start
* using this ring (i.e. once we set the MTU
@@ -2687,6 +2699,7 @@
bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
}
+ /* 57XX step 43 */
/* Disable the mini receive producer ring RCB. */
if (BGE_IS_5700_FAMILY(sc)) {
/* Set up dummy disabled mini ring RCB */
@@ -2712,6 +2725,7 @@
CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
(CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
}
+ /* 5718 step 14, 57XX step 44 */
/*
* The BD ring replenish thresholds control how often the
* hardware fetches new BD's from the producer rings in host
@@ -2729,11 +2743,13 @@
if (BGE_IS_JUMBO_CAPABLE(sc))
CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8);
+ /* 5718 step 18 */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4);
CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4);
}
+ /* 57XX step 45 */
/*
* Disable all send rings by setting the 'ring disabled' bit
* in the flags field of all the TX send ring control blocks,
@@ -2752,6 +2768,7 @@
rcb_addr += sizeof(struct bge_rcb);
}
+ /* 57XX step 46 and 47 */
/* Configure send ring RCB 0 (we use only the first ring) */
rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
@@ -2767,6 +2784,7 @@
RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
+ /* 57XX step 48 */
/*
* Disable all receive return rings by setting the
* 'ring diabled' bit in the flags field of all the receive
@@ -2798,6 +2816,7 @@
rcb_addr += sizeof(struct bge_rcb);
}
+ /* 57XX step 49 */
/*
* Set up receive return ring 0. Note that the NIC address
* for RX return rings is 0x0. The return rings live entirely
@@ -2811,6 +2830,7 @@
RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
+ /* 5718 step 24, 57XX step 53 */
/* Set random backoff seed for TX */
CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
(CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
@@ -2818,6 +2838,7 @@
CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) &
BGE_TX_BACKOFF_SEED_MASK);
+ /* 5718 step 26, 57XX step 55 */
/* Set inter-packet gap */
val = 0x2620;
if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720)
@@ -2825,25 +2846,31 @@
(BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
+ /* 5718 step 27, 57XX step 56 */
/*
* Specify which ring to use for packets that don't match
* any RX rules.
*/
CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
+ /* 5718 step 28, 57XX step 57 */
/*
* Configure number of RX lists. One interrupt distribution
* list, sixteen active lists, one bad frames class.
*/
CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
+ /* 5718 step 29, 57XX step 58 */
/* Inialize RX list placement stats mask. */
CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
+ /* 5718 step 30, 57XX step 59 */
CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
+ /* 5718 step 33, 57XX step 62 */
/* Disable host coalescing until we get it set up */
CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
+ /* 5718 step 34, 57XX step 63 */
/* Poll to make sure it's shut down. */
for (i = 0; i < BGE_TIMEOUT * 2; i++) {
DELAY(10);
@@ -2857,6 +2884,7 @@
return ENXIO;
}
+ /* 5718 step 35, 36, 37 */
/* Set up host coalescing defaults */
CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
@@ -2878,6 +2906,7 @@
CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
}
+ /* 5718 step 38 */
/* Set up address of status block */
BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
@@ -2896,16 +2925,20 @@
bzero(&sc->bge_rdata->bge_status_block, 32);
}
+ /* 5718 step 39, 57XX step 73 */
/* Turn on host coalescing state machine */
CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
+ /* 5718 step 40, 57XX step 74 */
/* Turn on RX BD completion state machine and enable attentions */
CSR_WRITE_4(sc, BGE_RBDC_MODE,
BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
+ /* 5718 step 41, 57XX step 75 */
/* Turn on RX list placement state machine */
CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
+ /* 57XX step 76 */
/* Turn on RX list selector state machine. */
if (!(BGE_IS_5705_PLUS(sc)))
CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
@@ -2922,27 +2955,34 @@
else
val |= BGE_PORTMODE_MII;
+ /* 5718 step 42 and 43, 57XX step 77 and 78 */
/* Allow APE to send/receive frames. */
if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
/* Turn on DMA, clear stats */
CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val);
+ /* 5718 step 44 */
DELAY(40);
+ /* 5718 step 45, 57XX step 79 */
/* Set misc. local control, enable interrupts on attentions */
CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
if (BGE_IS_5717_PLUS(sc)) {
CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */
+ /* 5718 step 46 */
DELAY(100);
}
+ /* 57XX step 81 */
/* Turn on DMA completion state machine */
if (!(BGE_IS_5705_PLUS(sc)))
CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
+ /* 5718 step 47, 57XX step 82 */
val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
+ /* 5718 step 48 */
/* Enable host coalescing bug fix. */
if (BGE_IS_5755_PLUS(sc))
val |= BGE_WDMAMODE_STATUS_TAG_FIX;
@@ -2952,6 +2992,7 @@
/* Turn on write DMA state machine */
CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val);
+ /* 5718 step 49 */
DELAY(40);
val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
@@ -3026,30 +3067,37 @@
/* Turn on read DMA state machine */
CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val);
+ /* 5718 step 52 */
delay(40);
+ /* 5718 step 56, 57XX step 84 */
Home |
Main Index |
Thread Index |
Old Index