NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/58584: bge(4) locking issues
The following reply was made to PR kern/58584; it has been noted by GNATS.
From: Taylor R Campbell <riastradh%NetBSD.org@localhost>
To: gnats-bugs%NetBSD.org@localhost, netbsd-bugs%NetBSD.org@localhost
Cc:
Subject: Re: kern/58584: bge(4) locking issues
Date: Mon, 12 Aug 2024 18:34:46 +0000
This is a multi-part message in MIME format.
--=_juraL4HGOymjQRARXFCoQxa3IeMCknrt
Attached patch series (pr58584.patch) and giant diff (pr58584.diff)
aims to address these problems, but I don't have any bge(4) to test.
--=_juraL4HGOymjQRARXFCoQxa3IeMCknrt
Content-Type: text/plain; charset="ISO-8859-1"; name="pr58584"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: attachment; filename="pr58584.patch"
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1723483765 0
# Mon Aug 12 17:29:25 2024 +0000
# Branch trunk
# Node ID 3b02c14478dd88afdd6539071e42f2d3025358d3
# Parent 26b25ed27d2f6bf0ad5e4df6a9ae9713f10ee32e
# EXP-Topic riastradh-pr58584-bgelocking
bge(4): Avoid freeing never-allocated resources if attach failed.
PR kern/58584
diff -r 26b25ed27d2f -r 3b02c14478dd sys/dev/pci/if_bge.c
--- a/sys/dev/pci/if_bge.c Sun Aug 11 18:33:13 2024 +0000
+++ b/sys/dev/pci/if_bge.c Mon Aug 12 17:29:25 2024 +0000
@@ -4078,6 +4078,8 @@ again:
#ifdef BGE_DEBUG
bge_debug_info(sc);
#endif
+
+ sc->bge_attached =3D true;
}
=20
/*
@@ -4090,6 +4092,9 @@ bge_detach(device_t self, int flags __un
struct bge_softc * const sc =3D device_private(self);
struct ifnet * const ifp =3D &sc->ethercom.ec_if;
=20
+ if (!sc->bge_attached)
+ return;
+
IFNET_LOCK(ifp);
=20
/* Stop the interface. Callouts are stopped in it. */
diff -r 26b25ed27d2f -r 3b02c14478dd sys/dev/pci/if_bgevar.h
--- a/sys/dev/pci/if_bgevar.h Sun Aug 11 18:33:13 2024 +0000
+++ b/sys/dev/pci/if_bgevar.h Mon Aug 12 17:29:25 2024 +0000
@@ -349,6 +349,7 @@ struct bge_softc {
int bge_txcnt;
struct callout bge_timeout;
bool bge_pending_rxintr_change;
+ bool bge_attached;
bool bge_detaching;
SLIST_HEAD(, txdmamap_pool_entry) txdma_list;
struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];
# HG changeset patch
# User Taylor R Campbell <riastradh%NetBSD.org@localhost>
# Date 1723485119 0
# Mon Aug 12 17:51:59 2024 +0000
# Branch trunk
# Node ID acdf8ea65621599e86692e4caa30dc52d6d61865
# Parent 3b02c14478dd88afdd6539071e42f2d3025358d3
# EXP-Topic riastradh-pr58584-bgelocking
bge(4): Fix various locking issues.
1. Add missing mutex_obj_free.
2. Add missing callout_destroy.
3. Nix core lock. Instead, new mcast lock serializes
SIOCADDMULTI/SIOCDELMULTI and multicast filter updates.
4. Use sc_intr_lock for SIOCIFMEDIA, to exclude concurrent ifmedia
business, not sc_core_lock, which doesn't exclude that.
5. Nix bge_stopping. Already handled by callout_halt.
(Note: PR says bge_detaching can be nixed too but I think that's
wrong; after our final if_stop, nothing else prevents if_init
again, which we must prevent before freeing resources.)
6. Serialize access to the coal ticks business, even during
if_init/stop, to exclude concurrent sysctl.
Sprinkle locking notes while here.
PR kern/58584
diff -r 3b02c14478dd -r acdf8ea65621 sys/dev/pci/if_bge.c
--- a/sys/dev/pci/if_bge.c Mon Aug 12 17:29:25 2024 +0000
+++ b/sys/dev/pci/if_bge.c Mon Aug 12 17:51:59 2024 +0000
@@ -203,9 +203,7 @@ static void bge_start_locked(struct ifne
static int bge_ifflags_cb(struct ethercom *);
static int bge_ioctl(struct ifnet *, u_long, void *);
static int bge_init(struct ifnet *);
-static int bge_init_locked(struct ifnet *);
static void bge_stop(struct ifnet *, int);
-static void bge_stop_locked(struct ifnet *, bool);
static bool bge_watchdog_tick(struct ifnet *);
static int bge_ifmedia_upd(struct ifnet *);
static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
@@ -1091,6 +1089,8 @@ bge_miibus_readreg(device_t dev, int phy
int rv =3D 0;
int i;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (bge_ape_lock(sc, sc->bge_phy_ape_lock) !=3D 0)
return -1;
=20
@@ -1144,6 +1144,8 @@ bge_miibus_writereg(device_t dev, int ph
int rv =3D 0;
int i;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (BGE_ASICREV(sc->bge_chipid) =3D=3D BGE_ASICREV_BCM5906 &&
(reg =3D=3D MII_GTCR || reg =3D=3D BRGPHY_MII_AUXCTL))
return 0;
@@ -1198,6 +1200,8 @@ bge_miibus_statchg(struct ifnet *ifp)
struct mii_data *mii =3D &sc->bge_mii;
uint32_t mac_mode, rx_mode, tx_mode;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
/*
* Get flow control negotiation result.
*/
@@ -1838,7 +1842,7 @@ bge_setmulti(struct bge_softc *sc)
uint32_t h;
int i;
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_mcast_lock));
if (sc->bge_if_flags & IFF_PROMISC)
goto allmulti;
=20
@@ -2766,10 +2770,16 @@ bge_blockinit(struct bge_softc *sc)
=20
/* 5718 step 35, 36, 37 */
/* Set up host coalescing defaults */
- CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
- CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
- CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
- CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
+ mutex_enter(sc->sc_intr_lock);
+ const uint32_t rx_coal_ticks =3D sc->bge_rx_coal_ticks;
+ const uint32_t tx_coal_ticks =3D sc->bge_tx_coal_ticks;
+ const uint32_t rx_max_coal_bds =3D sc->bge_rx_max_coal_bds;
+ const uint32_t tx_max_coal_bds =3D sc->bge_tx_max_coal_bds;
+ mutex_exit(sc->sc_intr_lock);
+ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, tx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_max_coal_bds);
+ CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_max_coal_bds);
if (!(BGE_IS_5705_PLUS(sc))) {
CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
@@ -3295,7 +3305,6 @@ bge_attach(device_t parent, device_t sel
return;
}
=20
- sc->bge_stopping =3D false;
sc->bge_txrx_stopping =3D false;
=20
/* Save various chip information. */
@@ -3869,7 +3878,7 @@ bge_attach(device_t parent, device_t sel
else
sc->bge_return_ring_cnt =3D BGE_RETURN_RING_CNT;
=20
- sc->sc_core_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
+ sc->sc_mcast_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
sc->sc_intr_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
=20
/* Set up ifnet structure */
@@ -4124,6 +4133,8 @@ bge_release_resources(struct bge_softc *
if (sc->bge_log !=3D NULL)
sysctl_teardown(&sc->bge_log);
=20
+ callout_destroy(&sc->bge_timeout);
+
#ifdef BGE_EVENT_COUNTERS
/* Detach event counters. */
evcnt_detach(&sc->bge_ev_intr);
@@ -4137,6 +4148,9 @@ bge_release_resources(struct bge_softc *
evcnt_detach(&sc->bge_ev_xoffentered);
#endif /* BGE_EVENT_COUNTERS */
=20
+ mutex_obj_free(sc->sc_intr_lock);
+ mutex_obj_free(sc->sc_mcast_lock);
+
/* Disestablish the interrupt handler */
if (sc->bge_intrhand !=3D NULL) {
pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
@@ -4466,6 +4480,8 @@ bge_rxeof(struct bge_softc *sc)
bus_size_t tlen;
int tosync;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
offsetof(struct bge_ring_data, bge_status_block),
sizeof(struct bge_status_block),
@@ -4636,6 +4652,8 @@ bge_txeof(struct bge_softc *sc)
int tosync;
struct mbuf *m;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
offsetof(struct bge_ring_data, bge_status_block),
sizeof(struct bge_status_block),
@@ -4833,11 +4851,7 @@ bge_tick(void *xsc)
struct ifnet * const ifp =3D &sc->ethercom.ec_if;
struct mii_data * const mii =3D &sc->bge_mii;
=20
- mutex_enter(sc->sc_core_lock);
- if (sc->bge_stopping) {
- mutex_exit(sc->sc_core_lock);
- return;
- }
+ mutex_enter(sc->sc_intr_lock);
=20
if (BGE_IS_5705_PLUS(sc))
bge_stats_update_regs(sc);
@@ -4859,9 +4873,7 @@ bge_tick(void *xsc)
* (extra input errors was reported for bcm5701 & bcm5704).
*/
if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
- mutex_enter(sc->sc_intr_lock);
mii_tick(mii);
- mutex_exit(sc->sc_intr_lock);
}
}
=20
@@ -4870,8 +4882,7 @@ bge_tick(void *xsc)
const bool ok =3D bge_watchdog_tick(ifp);
if (ok)
callout_schedule(&sc->bge_timeout, hz);
-
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_intr_lock);
}
=20
static void
@@ -5155,6 +5166,8 @@ bge_encap(struct bge_softc *sc, struct m
uint16_t vtag;
bool remap;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (m_head->m_pkthdr.csum_flags) {
if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
csum_flags |=3D BGE_TXBDFLAG_IP_CSUM;
@@ -5568,35 +5581,19 @@ static int
bge_init(struct ifnet *ifp)
{
struct bge_softc * const sc =3D ifp->if_softc;
-
- KASSERT(IFNET_LOCKED(ifp));
-
- if (sc->bge_detaching)
- return ENXIO;
-
- mutex_enter(sc->sc_core_lock);
- int ret =3D bge_init_locked(ifp);
- mutex_exit(sc->sc_core_lock);
-
- return ret;
-}
-
-
-static int
-bge_init_locked(struct ifnet *ifp)
-{
- struct bge_softc * const sc =3D ifp->if_softc;
const uint16_t *m;
uint32_t mode, reg;
int error =3D 0;
=20
ASSERT_SLEEPABLE();
KASSERT(IFNET_LOCKED(ifp));
- KASSERT(mutex_owned(sc->sc_core_lock));
KASSERT(ifp =3D=3D &sc->ethercom.ec_if);
=20
+ if (sc->bge_detaching)
+ return ENXIO;
+
/* Cancel pending I/O and flush buffers. */
- bge_stop_locked(ifp, false);
+ bge_stop(ifp, 0);
=20
bge_stop_fw(sc);
bge_sig_pre_reset(sc, BGE_RESET_START);
@@ -5768,7 +5765,6 @@ bge_init_locked(struct ifnet *ifp)
=20
mutex_enter(sc->sc_intr_lock);
if ((error =3D bge_ifmedia_upd(ifp)) =3D=3D 0) {
- sc->bge_stopping =3D false;
sc->bge_txrx_stopping =3D false;
=20
/* IFNET_LOCKED asserted above */
@@ -5778,7 +5774,9 @@ bge_init_locked(struct ifnet *ifp)
}
mutex_exit(sc->sc_intr_lock);
=20
+ mutex_enter(sc->sc_mcast_lock);
sc->bge_if_flags =3D ifp->if_flags;
+ mutex_exit(sc->sc_mcast_lock);
=20
return error;
}
@@ -5924,7 +5922,7 @@ bge_ifflags_cb(struct ethercom *ec)
int ret =3D 0;
=20
KASSERT(IFNET_LOCKED(ifp));
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_mcast_lock);
=20
u_short change =3D ifp->if_flags ^ sc->bge_if_flags;
=20
@@ -5940,7 +5938,7 @@ bge_ifflags_cb(struct ethercom *ec)
}
=20
sc->bge_if_flags =3D ifp->if_flags;
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_mcast_lock);
=20
return ret;
}
@@ -5964,7 +5962,7 @@ bge_ioctl(struct ifnet *ifp, u_long comm
=20
switch (command) {
case SIOCSIFMEDIA:
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_intr_lock);
/* XXX Flow control is not supported for 1000BASE-SX */
if (sc->bge_flags & BGEF_FIBER_TBI) {
ifr->ifr_media &=3D ~IFM_ETH_FMASK;
@@ -5984,7 +5982,7 @@ bge_ioctl(struct ifnet *ifp, u_long comm
}
sc->bge_flowflags =3D ifr->ifr_media & IFM_ETH_FMASK;
}
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_intr_lock);
=20
if (sc->bge_flags & BGEF_FIBER_TBI) {
error =3D ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
@@ -6002,11 +6000,11 @@ bge_ioctl(struct ifnet *ifp, u_long comm
error =3D 0;
=20
if (command =3D=3D SIOCADDMULTI || command =3D=3D SIOCDELMULTI) {
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_mcast_lock);
if (sc->bge_if_flags & IFF_RUNNING) {
bge_setmulti(sc);
}
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_mcast_lock);
}
break;
}
@@ -6020,7 +6018,7 @@ static bool
bge_watchdog_check(struct bge_softc * const sc)
{
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_intr_lock));
=20
if (!sc->bge_tx_sending)
return true;
@@ -6063,7 +6061,7 @@ bge_watchdog_tick(struct ifnet *ifp)
{
struct bge_softc * const sc =3D ifp->if_softc;
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_intr_lock));
=20
if (!sc->sc_trigger_reset && bge_watchdog_check(sc))
return true;
@@ -6126,7 +6124,10 @@ bge_stop_block(struct bge_softc *sc, bus
(u_long)reg, bit);
}
=20
-
+/*
+ * Stop the adapter and free any mbufs allocated to the
+ * RX and TX lists.
+ */
static void
bge_stop(struct ifnet *ifp, int disable)
{
@@ -6135,31 +6136,11 @@ bge_stop(struct ifnet *ifp, int disable)
ASSERT_SLEEPABLE();
KASSERT(IFNET_LOCKED(ifp));
=20
- mutex_enter(sc->sc_core_lock);
- bge_stop_locked(ifp, disable ? true : false);
- mutex_exit(sc->sc_core_lock);
-}
-
-/*
- * Stop the adapter and free any mbufs allocated to the
- * RX and TX lists.
- */
-static void
-bge_stop_locked(struct ifnet *ifp, bool disable)
-{
- struct bge_softc * const sc =3D ifp->if_softc;
-
- ASSERT_SLEEPABLE();
- KASSERT(IFNET_LOCKED(ifp));
- KASSERT(mutex_owned(sc->sc_core_lock));
-
- sc->bge_stopping =3D true;
-
mutex_enter(sc->sc_intr_lock);
sc->bge_txrx_stopping =3D true;
mutex_exit(sc->sc_intr_lock);
=20
- callout_halt(&sc->bge_timeout, sc->sc_core_lock);
+ callout_halt(&sc->bge_timeout, NULL);
=20
/* Disable host interrupts. */
BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
@@ -6257,7 +6238,9 @@ bge_stop_locked(struct ifnet *ifp, bool=20
=20
ifp->if_flags &=3D ~IFF_RUNNING;
=20
+ mutex_enter(sc->sc_mcast_lock);
sc->bge_if_flags =3D ifp->if_flags;
+ mutex_exit(sc->sc_mcast_lock);
}
=20
static void
diff -r 3b02c14478dd -r acdf8ea65621 sys/dev/pci/if_bgevar.h
--- a/sys/dev/pci/if_bgevar.h Mon Aug 12 17:29:25 2024 +0000
+++ b/sys/dev/pci/if_bgevar.h Mon Aug 12 17:51:59 2024 +0000
@@ -261,6 +261,21 @@ struct txdmamap_pool_entry {
#define ASF_NEW_HANDSHAKE 2
#define ASF_STACKUP 4
=20
+/*
+ * Locking notes:
+ *
+ * n IFNET_LOCK
+ * m sc_mcast_lock
+ * i sc_intr_lock
+ * i/n while down, IFNET_LOCK; while up, sc_intr_lock
+ *
+ * Otherwise, stable from attach to detach.
+ *
+ * Lock order:
+ *
+ * IFNET_LOCK -> sc_intr_lock
+ * IFNET_LOCK -> sc_mcast_lock
+ */
struct bge_softc {
device_t bge_dev;
struct ethercom ethercom; /* interface info */
@@ -276,10 +291,10 @@ struct bge_softc {
pcitag_t sc_pcitag;
=20
struct pci_attach_args bge_pa;
- struct mii_data bge_mii;
- struct ifmedia bge_ifmedia; /* media info */
+ struct mii_data bge_mii; /* i: mii data */
+ struct ifmedia bge_ifmedia; /* i: media info */
uint32_t bge_return_ring_cnt;
- uint32_t bge_tx_prodidx;
+ uint32_t bge_tx_prodidx; /* i: tx producer idx */
bus_dma_tag_t bge_dmatag;
bus_dma_tag_t bge_dmatag32;
bool bge_dma64;
@@ -287,7 +302,7 @@ struct bge_softc {
uint32_t bge_pciecap;
uint16_t bge_mps;
int bge_expmrq;
- uint32_t bge_lasttag;
+ uint32_t bge_lasttag; /* i: last status tag */
uint32_t bge_mfw_flags; /* Management F/W flags */
#define BGE_MFW_ON_RXCPU __BIT(0)
#define BGE_MFW_ON_APE __BIT(1)
@@ -297,74 +312,82 @@ struct bge_softc {
int bge_phy_addr;
uint32_t bge_chipid;
uint8_t bge_asf_mode;
- uint8_t bge_asf_count;
+ uint8_t bge_asf_count; /* i: XXX ??? */
struct bge_ring_data *bge_rdata; /* rings */
struct bge_chain_data bge_cdata; /* mbufs */
bus_dmamap_t bge_ring_map;
bus_dma_segment_t bge_ring_seg;
int bge_ring_rseg;
- uint16_t bge_tx_saved_considx;
- uint16_t bge_rx_saved_considx;
- uint16_t bge_std; /* current std ring head */
- uint16_t bge_std_cnt;
- uint16_t bge_jumbo; /* current jumbo ring head */
+ uint16_t bge_tx_saved_considx; /* i: tx consumer idx */
+ uint16_t bge_rx_saved_considx; /* i: rx consumer idx */
+ uint16_t bge_std; /* i: current std ring head */
+ uint16_t bge_std_cnt; /* i: number of std mbufs */
+ uint16_t bge_jumbo;
+ /* i: current jumbo ring head */
SLIST_HEAD(__bge_jfreehead, bge_jpool_entry) bge_jfree_listhead;
+ /* i: list of free jumbo mbufs */
SLIST_HEAD(__bge_jinusehead, bge_jpool_entry) bge_jinuse_listhead;
+ /* i: list of jumbo mbufs in use */
uint32_t bge_stat_ticks;
- uint32_t bge_rx_coal_ticks;
- uint32_t bge_tx_coal_ticks;
- uint32_t bge_rx_max_coal_bds;
- uint32_t bge_tx_max_coal_bds;
- uint32_t bge_sts;
+ uint32_t bge_rx_coal_ticks; /* i */
+ uint32_t bge_tx_coal_ticks; /* i */
+ uint32_t bge_rx_max_coal_bds; /* i */
+ uint32_t bge_tx_max_coal_bds; /* i */
+ uint32_t bge_sts; /* i/n: link status */
#define BGE_STS_LINK __BIT(0) /* MAC link status */
#define BGE_STS_LINK_EVT __BIT(1) /* pending link event */
#define BGE_STS_AUTOPOLL __BIT(2) /* PHY auto-polling */
#define BGE_STS_BIT(sc, x) ((sc)->bge_sts & (x))
#define BGE_STS_SETBIT(sc, x) ((sc)->bge_sts |=3D (x))
#define BGE_STS_CLRBIT(sc, x) ((sc)->bge_sts &=3D ~(x))
- u_short bge_if_flags;
- uint32_t bge_flags;
+ u_short bge_if_flags; /* m: if_flags cache */
+ uint32_t bge_flags; /* i/n */
uint32_t bge_phy_flags;
- int bge_flowflags;
+ int bge_flowflags; /* i */
time_t bge_tx_lastsent;
- bool bge_stopping;
+ /* i: time of last tx */
bool bge_txrx_stopping;
- bool bge_tx_sending;
+ /* i: true when going down */
+ bool bge_tx_sending; /* i: XXX ??? */
=20
#ifdef BGE_EVENT_COUNTERS
/*
* Event counters.
*/
- struct evcnt bge_ev_intr; /* interrupts */
- struct evcnt bge_ev_intr_spurious; /* spurious intr. (tagged status)*/
- struct evcnt bge_ev_intr_spurious2; /* spurious interrupts */
- struct evcnt bge_ev_tx_xoff; /* send PAUSE(len>0) packets */
- struct evcnt bge_ev_tx_xon; /* send PAUSE(len=3D0) packets */
- struct evcnt bge_ev_rx_xoff; /* receive PAUSE(len>0) packets */
- struct evcnt bge_ev_rx_xon; /* receive PAUSE(len=3D0) packets */
- struct evcnt bge_ev_rx_macctl; /* receive MAC control packets */
- struct evcnt bge_ev_xoffentered;/* XOFF state entered */
+ struct evcnt bge_ev_intr; /* i: interrupts */
+ struct evcnt bge_ev_intr_spurious;
+ /* i: spurious intr. (tagged status) */
+ struct evcnt bge_ev_intr_spurious2; /* i: spurious interrupts */
+ struct evcnt bge_ev_tx_xoff; /* i: send PAUSE(len>0) packets */
+ struct evcnt bge_ev_tx_xon; /* i: send PAUSE(len=3D0) packets */
+ struct evcnt bge_ev_rx_xoff; /* i: receive PAUSE(len>0) packets */
+ struct evcnt bge_ev_rx_xon; /* i: receive PAUSE(len=3D0) packets */
+ struct evcnt bge_ev_rx_macctl; /* i: receive MAC control packets */
+ struct evcnt bge_ev_xoffentered;/* i: XOFF state entered */
#endif /* BGE_EVENT_COUNTERS */
- uint64_t bge_if_collisions;
- int bge_txcnt;
- struct callout bge_timeout;
+ uint64_t bge_if_collisions; /* i */
+ int bge_txcnt; /* i: # tx descs in use */
+ struct callout bge_timeout; /* i: tx timeout */
bool bge_pending_rxintr_change;
+ /* i: change pending to
+ * rx_coal_ticks and
+ * rx_max_coal_bds */
bool bge_attached;
- bool bge_detaching;
- SLIST_HEAD(, txdmamap_pool_entry) txdma_list;
- struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];
+ bool bge_detaching; /* n */
+ SLIST_HEAD(, txdmamap_pool_entry) txdma_list; /* i */
+ struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT]; /* i */
=20
struct sysctllog *bge_log;
=20
krndsource_t rnd_source; /* random source */
=20
- kmutex_t *sc_core_lock; /* lock for softc operations */
- kmutex_t *sc_intr_lock; /* lock for interrupt operations */
+ kmutex_t *sc_mcast_lock; /* m: lock for SIOCADD/DELMULTI */
+ kmutex_t *sc_intr_lock; /* i: lock for interrupt operations */
struct workqueue *sc_reset_wq;
- struct work sc_reset_work;
+ struct work sc_reset_work; /* i */
volatile unsigned sc_reset_pending;
=20
- bool sc_trigger_reset;
+ bool sc_trigger_reset; /* i */
};
=20
#endif /* _DEV_PCI_IF_BGEVAR_H_ */
--=_juraL4HGOymjQRARXFCoQxa3IeMCknrt
Content-Type: text/plain; charset="ISO-8859-1"; name="pr58584"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: attachment; filename="pr58584.diff"
diff -r 26b25ed27d2f sys/dev/pci/if_bge.c
--- a/sys/dev/pci/if_bge.c Sun Aug 11 18:33:13 2024 +0000
+++ b/sys/dev/pci/if_bge.c Mon Aug 12 18:32:16 2024 +0000
@@ -203,9 +203,7 @@ static void bge_start_locked(struct ifne
static int bge_ifflags_cb(struct ethercom *);
static int bge_ioctl(struct ifnet *, u_long, void *);
static int bge_init(struct ifnet *);
-static int bge_init_locked(struct ifnet *);
static void bge_stop(struct ifnet *, int);
-static void bge_stop_locked(struct ifnet *, bool);
static bool bge_watchdog_tick(struct ifnet *);
static int bge_ifmedia_upd(struct ifnet *);
static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
@@ -1091,6 +1089,8 @@ bge_miibus_readreg(device_t dev, int phy
int rv =3D 0;
int i;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (bge_ape_lock(sc, sc->bge_phy_ape_lock) !=3D 0)
return -1;
=20
@@ -1144,6 +1144,8 @@ bge_miibus_writereg(device_t dev, int ph
int rv =3D 0;
int i;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (BGE_ASICREV(sc->bge_chipid) =3D=3D BGE_ASICREV_BCM5906 &&
(reg =3D=3D MII_GTCR || reg =3D=3D BRGPHY_MII_AUXCTL))
return 0;
@@ -1198,6 +1200,8 @@ bge_miibus_statchg(struct ifnet *ifp)
struct mii_data *mii =3D &sc->bge_mii;
uint32_t mac_mode, rx_mode, tx_mode;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
/*
* Get flow control negotiation result.
*/
@@ -1838,7 +1842,7 @@ bge_setmulti(struct bge_softc *sc)
uint32_t h;
int i;
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_mcast_lock));
if (sc->bge_if_flags & IFF_PROMISC)
goto allmulti;
=20
@@ -2766,10 +2770,16 @@ bge_blockinit(struct bge_softc *sc)
=20
/* 5718 step 35, 36, 37 */
/* Set up host coalescing defaults */
- CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
- CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
- CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
- CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
+ mutex_enter(sc->sc_intr_lock);
+ const uint32_t rx_coal_ticks =3D sc->bge_rx_coal_ticks;
+ const uint32_t tx_coal_ticks =3D sc->bge_tx_coal_ticks;
+ const uint32_t rx_max_coal_bds =3D sc->bge_rx_max_coal_bds;
+ const uint32_t tx_max_coal_bds =3D sc->bge_tx_max_coal_bds;
+ mutex_exit(sc->sc_intr_lock);
+ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, tx_coal_ticks);
+ CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_max_coal_bds);
+ CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, tx_max_coal_bds);
if (!(BGE_IS_5705_PLUS(sc))) {
CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
@@ -3295,7 +3305,6 @@ bge_attach(device_t parent, device_t sel
return;
}
=20
- sc->bge_stopping =3D false;
sc->bge_txrx_stopping =3D false;
=20
/* Save various chip information. */
@@ -3869,7 +3878,7 @@ bge_attach(device_t parent, device_t sel
else
sc->bge_return_ring_cnt =3D BGE_RETURN_RING_CNT;
=20
- sc->sc_core_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
+ sc->sc_mcast_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
sc->sc_intr_lock =3D mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
=20
/* Set up ifnet structure */
@@ -4078,6 +4087,8 @@ again:
#ifdef BGE_DEBUG
bge_debug_info(sc);
#endif
+
+ sc->bge_attached =3D true;
}
=20
/*
@@ -4090,6 +4101,9 @@ bge_detach(device_t self, int flags __un
struct bge_softc * const sc =3D device_private(self);
struct ifnet * const ifp =3D &sc->ethercom.ec_if;
=20
+ if (!sc->bge_attached)
+ return;
+
IFNET_LOCK(ifp);
=20
/* Stop the interface. Callouts are stopped in it. */
@@ -4119,6 +4133,8 @@ bge_release_resources(struct bge_softc *
if (sc->bge_log !=3D NULL)
sysctl_teardown(&sc->bge_log);
=20
+ callout_destroy(&sc->bge_timeout);
+
#ifdef BGE_EVENT_COUNTERS
/* Detach event counters. */
evcnt_detach(&sc->bge_ev_intr);
@@ -4132,6 +4148,9 @@ bge_release_resources(struct bge_softc *
evcnt_detach(&sc->bge_ev_xoffentered);
#endif /* BGE_EVENT_COUNTERS */
=20
+ mutex_obj_free(sc->sc_intr_lock);
+ mutex_obj_free(sc->sc_mcast_lock);
+
/* Disestablish the interrupt handler */
if (sc->bge_intrhand !=3D NULL) {
pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand);
@@ -4461,6 +4480,8 @@ bge_rxeof(struct bge_softc *sc)
bus_size_t tlen;
int tosync;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
offsetof(struct bge_ring_data, bge_status_block),
sizeof(struct bge_status_block),
@@ -4631,6 +4652,8 @@ bge_txeof(struct bge_softc *sc)
int tosync;
struct mbuf *m;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
offsetof(struct bge_ring_data, bge_status_block),
sizeof(struct bge_status_block),
@@ -4828,11 +4851,7 @@ bge_tick(void *xsc)
struct ifnet * const ifp =3D &sc->ethercom.ec_if;
struct mii_data * const mii =3D &sc->bge_mii;
=20
- mutex_enter(sc->sc_core_lock);
- if (sc->bge_stopping) {
- mutex_exit(sc->sc_core_lock);
- return;
- }
+ mutex_enter(sc->sc_intr_lock);
=20
if (BGE_IS_5705_PLUS(sc))
bge_stats_update_regs(sc);
@@ -4854,9 +4873,7 @@ bge_tick(void *xsc)
* (extra input errors was reported for bcm5701 & bcm5704).
*/
if (!BGE_STS_BIT(sc, BGE_STS_LINK)) {
- mutex_enter(sc->sc_intr_lock);
mii_tick(mii);
- mutex_exit(sc->sc_intr_lock);
}
}
=20
@@ -4865,8 +4882,7 @@ bge_tick(void *xsc)
const bool ok =3D bge_watchdog_tick(ifp);
if (ok)
callout_schedule(&sc->bge_timeout, hz);
-
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_intr_lock);
}
=20
static void
@@ -5150,6 +5166,8 @@ bge_encap(struct bge_softc *sc, struct m
uint16_t vtag;
bool remap;
=20
+ KASSERT(mutex_owned(sc->sc_intr_lock));
+
if (m_head->m_pkthdr.csum_flags) {
if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
csum_flags |=3D BGE_TXBDFLAG_IP_CSUM;
@@ -5563,35 +5581,19 @@ static int
bge_init(struct ifnet *ifp)
{
struct bge_softc * const sc =3D ifp->if_softc;
-
- KASSERT(IFNET_LOCKED(ifp));
-
- if (sc->bge_detaching)
- return ENXIO;
-
- mutex_enter(sc->sc_core_lock);
- int ret =3D bge_init_locked(ifp);
- mutex_exit(sc->sc_core_lock);
-
- return ret;
-}
-
-
-static int
-bge_init_locked(struct ifnet *ifp)
-{
- struct bge_softc * const sc =3D ifp->if_softc;
const uint16_t *m;
uint32_t mode, reg;
int error =3D 0;
=20
ASSERT_SLEEPABLE();
KASSERT(IFNET_LOCKED(ifp));
- KASSERT(mutex_owned(sc->sc_core_lock));
KASSERT(ifp =3D=3D &sc->ethercom.ec_if);
=20
+ if (sc->bge_detaching)
+ return ENXIO;
+
/* Cancel pending I/O and flush buffers. */
- bge_stop_locked(ifp, false);
+ bge_stop(ifp, 0);
=20
bge_stop_fw(sc);
bge_sig_pre_reset(sc, BGE_RESET_START);
@@ -5763,7 +5765,6 @@ bge_init_locked(struct ifnet *ifp)
=20
mutex_enter(sc->sc_intr_lock);
if ((error =3D bge_ifmedia_upd(ifp)) =3D=3D 0) {
- sc->bge_stopping =3D false;
sc->bge_txrx_stopping =3D false;
=20
/* IFNET_LOCKED asserted above */
@@ -5773,7 +5774,9 @@ bge_init_locked(struct ifnet *ifp)
}
mutex_exit(sc->sc_intr_lock);
=20
+ mutex_enter(sc->sc_mcast_lock);
sc->bge_if_flags =3D ifp->if_flags;
+ mutex_exit(sc->sc_mcast_lock);
=20
return error;
}
@@ -5919,7 +5922,7 @@ bge_ifflags_cb(struct ethercom *ec)
int ret =3D 0;
=20
KASSERT(IFNET_LOCKED(ifp));
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_mcast_lock);
=20
u_short change =3D ifp->if_flags ^ sc->bge_if_flags;
=20
@@ -5935,7 +5938,7 @@ bge_ifflags_cb(struct ethercom *ec)
}
=20
sc->bge_if_flags =3D ifp->if_flags;
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_mcast_lock);
=20
return ret;
}
@@ -5959,7 +5962,7 @@ bge_ioctl(struct ifnet *ifp, u_long comm
=20
switch (command) {
case SIOCSIFMEDIA:
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_intr_lock);
/* XXX Flow control is not supported for 1000BASE-SX */
if (sc->bge_flags & BGEF_FIBER_TBI) {
ifr->ifr_media &=3D ~IFM_ETH_FMASK;
@@ -5979,7 +5982,7 @@ bge_ioctl(struct ifnet *ifp, u_long comm
}
sc->bge_flowflags =3D ifr->ifr_media & IFM_ETH_FMASK;
}
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_intr_lock);
=20
if (sc->bge_flags & BGEF_FIBER_TBI) {
error =3D ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
@@ -5997,11 +6000,11 @@ bge_ioctl(struct ifnet *ifp, u_long comm
error =3D 0;
=20
if (command =3D=3D SIOCADDMULTI || command =3D=3D SIOCDELMULTI) {
- mutex_enter(sc->sc_core_lock);
+ mutex_enter(sc->sc_mcast_lock);
if (sc->bge_if_flags & IFF_RUNNING) {
bge_setmulti(sc);
}
- mutex_exit(sc->sc_core_lock);
+ mutex_exit(sc->sc_mcast_lock);
}
break;
}
@@ -6015,7 +6018,7 @@ static bool
bge_watchdog_check(struct bge_softc * const sc)
{
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_intr_lock));
=20
if (!sc->bge_tx_sending)
return true;
@@ -6058,7 +6061,7 @@ bge_watchdog_tick(struct ifnet *ifp)
{
struct bge_softc * const sc =3D ifp->if_softc;
=20
- KASSERT(mutex_owned(sc->sc_core_lock));
+ KASSERT(mutex_owned(sc->sc_intr_lock));
=20
if (!sc->sc_trigger_reset && bge_watchdog_check(sc))
return true;
@@ -6121,7 +6124,10 @@ bge_stop_block(struct bge_softc *sc, bus
(u_long)reg, bit);
}
=20
-
+/*
+ * Stop the adapter and free any mbufs allocated to the
+ * RX and TX lists.
+ */
static void
bge_stop(struct ifnet *ifp, int disable)
{
@@ -6130,31 +6136,11 @@ bge_stop(struct ifnet *ifp, int disable)
ASSERT_SLEEPABLE();
KASSERT(IFNET_LOCKED(ifp));
=20
- mutex_enter(sc->sc_core_lock);
- bge_stop_locked(ifp, disable ? true : false);
- mutex_exit(sc->sc_core_lock);
-}
-
-/*
- * Stop the adapter and free any mbufs allocated to the
- * RX and TX lists.
- */
-static void
-bge_stop_locked(struct ifnet *ifp, bool disable)
-{
- struct bge_softc * const sc =3D ifp->if_softc;
-
- ASSERT_SLEEPABLE();
- KASSERT(IFNET_LOCKED(ifp));
- KASSERT(mutex_owned(sc->sc_core_lock));
-
- sc->bge_stopping =3D true;
-
mutex_enter(sc->sc_intr_lock);
sc->bge_txrx_stopping =3D true;
mutex_exit(sc->sc_intr_lock);
=20
- callout_halt(&sc->bge_timeout, sc->sc_core_lock);
+ callout_halt(&sc->bge_timeout, NULL);
=20
/* Disable host interrupts. */
BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
@@ -6252,7 +6238,9 @@ bge_stop_locked(struct ifnet *ifp, bool=20
=20
ifp->if_flags &=3D ~IFF_RUNNING;
=20
+ mutex_enter(sc->sc_mcast_lock);
sc->bge_if_flags =3D ifp->if_flags;
+ mutex_exit(sc->sc_mcast_lock);
}
=20
static void
diff -r 26b25ed27d2f sys/dev/pci/if_bgevar.h
--- a/sys/dev/pci/if_bgevar.h Sun Aug 11 18:33:13 2024 +0000
+++ b/sys/dev/pci/if_bgevar.h Mon Aug 12 18:32:16 2024 +0000
@@ -261,6 +261,21 @@ struct txdmamap_pool_entry {
#define ASF_NEW_HANDSHAKE 2
#define ASF_STACKUP 4
=20
+/*
+ * Locking notes:
+ *
+ * n IFNET_LOCK
+ * m sc_mcast_lock
+ * i sc_intr_lock
+ * i/n while down, IFNET_LOCK; while up, sc_intr_lock
+ *
+ * Otherwise, stable from attach to detach.
+ *
+ * Lock order:
+ *
+ * IFNET_LOCK -> sc_intr_lock
+ * IFNET_LOCK -> sc_mcast_lock
+ */
struct bge_softc {
device_t bge_dev;
struct ethercom ethercom; /* interface info */
@@ -276,10 +291,10 @@ struct bge_softc {
pcitag_t sc_pcitag;
=20
struct pci_attach_args bge_pa;
- struct mii_data bge_mii;
- struct ifmedia bge_ifmedia; /* media info */
+ struct mii_data bge_mii; /* i: mii data */
+ struct ifmedia bge_ifmedia; /* i: media info */
uint32_t bge_return_ring_cnt;
- uint32_t bge_tx_prodidx;
+ uint32_t bge_tx_prodidx; /* i: tx producer idx */
bus_dma_tag_t bge_dmatag;
bus_dma_tag_t bge_dmatag32;
bool bge_dma64;
@@ -287,7 +302,7 @@ struct bge_softc {
uint32_t bge_pciecap;
uint16_t bge_mps;
int bge_expmrq;
- uint32_t bge_lasttag;
+ uint32_t bge_lasttag; /* i: last status tag */
uint32_t bge_mfw_flags; /* Management F/W flags */
#define BGE_MFW_ON_RXCPU __BIT(0)
#define BGE_MFW_ON_APE __BIT(1)
@@ -297,73 +312,82 @@ struct bge_softc {
int bge_phy_addr;
uint32_t bge_chipid;
uint8_t bge_asf_mode;
- uint8_t bge_asf_count;
+ uint8_t bge_asf_count; /* i: XXX ??? */
struct bge_ring_data *bge_rdata; /* rings */
struct bge_chain_data bge_cdata; /* mbufs */
bus_dmamap_t bge_ring_map;
bus_dma_segment_t bge_ring_seg;
int bge_ring_rseg;
- uint16_t bge_tx_saved_considx;
- uint16_t bge_rx_saved_considx;
- uint16_t bge_std; /* current std ring head */
- uint16_t bge_std_cnt;
- uint16_t bge_jumbo; /* current jumbo ring head */
+ uint16_t bge_tx_saved_considx; /* i: tx consumer idx */
+ uint16_t bge_rx_saved_considx; /* i: rx consumer idx */
+ uint16_t bge_std; /* i: current std ring head */
+ uint16_t bge_std_cnt; /* i: number of std mbufs */
+ uint16_t bge_jumbo;
+ /* i: current jumbo ring head */
SLIST_HEAD(__bge_jfreehead, bge_jpool_entry) bge_jfree_listhead;
+ /* i: list of free jumbo mbufs */
SLIST_HEAD(__bge_jinusehead, bge_jpool_entry) bge_jinuse_listhead;
+ /* i: list of jumbo mbufs in use */
uint32_t bge_stat_ticks;
- uint32_t bge_rx_coal_ticks;
- uint32_t bge_tx_coal_ticks;
- uint32_t bge_rx_max_coal_bds;
- uint32_t bge_tx_max_coal_bds;
- uint32_t bge_sts;
+ uint32_t bge_rx_coal_ticks; /* i */
+ uint32_t bge_tx_coal_ticks; /* i */
+ uint32_t bge_rx_max_coal_bds; /* i */
+ uint32_t bge_tx_max_coal_bds; /* i */
+ uint32_t bge_sts; /* i/n: link status */
#define BGE_STS_LINK __BIT(0) /* MAC link status */
#define BGE_STS_LINK_EVT __BIT(1) /* pending link event */
#define BGE_STS_AUTOPOLL __BIT(2) /* PHY auto-polling */
#define BGE_STS_BIT(sc, x) ((sc)->bge_sts & (x))
#define BGE_STS_SETBIT(sc, x) ((sc)->bge_sts |=3D (x))
#define BGE_STS_CLRBIT(sc, x) ((sc)->bge_sts &=3D ~(x))
- u_short bge_if_flags;
- uint32_t bge_flags;
+ u_short bge_if_flags; /* m: if_flags cache */
+ uint32_t bge_flags; /* i/n */
uint32_t bge_phy_flags;
- int bge_flowflags;
+ int bge_flowflags; /* i */
time_t bge_tx_lastsent;
- bool bge_stopping;
+ /* i: time of last tx */
bool bge_txrx_stopping;
- bool bge_tx_sending;
+ /* i: true when going down */
+ bool bge_tx_sending; /* i: XXX ??? */
=20
#ifdef BGE_EVENT_COUNTERS
/*
* Event counters.
*/
- struct evcnt bge_ev_intr; /* interrupts */
- struct evcnt bge_ev_intr_spurious; /* spurious intr. (tagged status)*/
- struct evcnt bge_ev_intr_spurious2; /* spurious interrupts */
- struct evcnt bge_ev_tx_xoff; /* send PAUSE(len>0) packets */
- struct evcnt bge_ev_tx_xon; /* send PAUSE(len=3D0) packets */
- struct evcnt bge_ev_rx_xoff; /* receive PAUSE(len>0) packets */
- struct evcnt bge_ev_rx_xon; /* receive PAUSE(len=3D0) packets */
- struct evcnt bge_ev_rx_macctl; /* receive MAC control packets */
- struct evcnt bge_ev_xoffentered;/* XOFF state entered */
+ struct evcnt bge_ev_intr; /* i: interrupts */
+ struct evcnt bge_ev_intr_spurious;
+ /* i: spurious intr. (tagged status) */
+ struct evcnt bge_ev_intr_spurious2; /* i: spurious interrupts */
+ struct evcnt bge_ev_tx_xoff; /* i: send PAUSE(len>0) packets */
+ struct evcnt bge_ev_tx_xon; /* i: send PAUSE(len=3D0) packets */
+ struct evcnt bge_ev_rx_xoff; /* i: receive PAUSE(len>0) packets */
+ struct evcnt bge_ev_rx_xon; /* i: receive PAUSE(len=3D0) packets */
+ struct evcnt bge_ev_rx_macctl; /* i: receive MAC control packets */
+ struct evcnt bge_ev_xoffentered;/* i: XOFF state entered */
#endif /* BGE_EVENT_COUNTERS */
- uint64_t bge_if_collisions;
- int bge_txcnt;
- struct callout bge_timeout;
+ uint64_t bge_if_collisions; /* i */
+ int bge_txcnt; /* i: # tx descs in use */
+ struct callout bge_timeout; /* i: tx timeout */
bool bge_pending_rxintr_change;
- bool bge_detaching;
- SLIST_HEAD(, txdmamap_pool_entry) txdma_list;
- struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];
+ /* i: change pending to
+ * rx_coal_ticks and
+ * rx_max_coal_bds */
+ bool bge_attached;
+ bool bge_detaching; /* n */
+ SLIST_HEAD(, txdmamap_pool_entry) txdma_list; /* i */
+ struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT]; /* i */
=20
struct sysctllog *bge_log;
=20
krndsource_t rnd_source; /* random source */
=20
- kmutex_t *sc_core_lock; /* lock for softc operations */
- kmutex_t *sc_intr_lock; /* lock for interrupt operations */
+ kmutex_t *sc_mcast_lock; /* m: lock for SIOCADD/DELMULTI */
+ kmutex_t *sc_intr_lock; /* i: lock for interrupt operations */
struct workqueue *sc_reset_wq;
- struct work sc_reset_work;
+ struct work sc_reset_work; /* i */
volatile unsigned sc_reset_pending;
=20
- bool sc_trigger_reset;
+ bool sc_trigger_reset; /* i */
};
=20
#endif /* _DEV_PCI_IF_BGEVAR_H_ */
--=_juraL4HGOymjQRARXFCoQxa3IeMCknrt--
Home |
Main Index |
Thread Index |
Old Index