Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/xen/xen use standard deferred if_start framework in...
details: https://anonhg.NetBSD.org/src/rev/e19a0ac54b74
branches: trunk
changeset: 1008591:e19a0ac54b74
user: jdolecek <jdolecek%NetBSD.org@localhost>
date: Fri Mar 27 18:37:30 2020 +0000
description:
use standard deferred if_start framework instead of custom variant
diffstat:
sys/arch/xen/xen/if_xennet_xenbus.c | 57 ++++++++---------------------------
sys/arch/xen/xen/xennetback_xenbus.c | 51 ++++++++++---------------------
2 files changed, 30 insertions(+), 78 deletions(-)
diffs (278 lines):
diff -r 7b291f99dd6a -r e19a0ac54b74 sys/arch/xen/xen/if_xennet_xenbus.c
--- a/sys/arch/xen/xen/if_xennet_xenbus.c Fri Mar 27 18:04:45 2020 +0000
+++ b/sys/arch/xen/xen/if_xennet_xenbus.c Fri Mar 27 18:37:30 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: if_xennet_xenbus.c,v 1.96 2020/03/26 18:50:16 jdolecek Exp $ */
+/* $NetBSD: if_xennet_xenbus.c,v 1.97 2020/03/27 18:37:30 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -61,11 +61,11 @@
*
* For TX:
* Purpose is to transmit packets to the outside. The start of day is in
- * xennet_start() (default output routine of xennet) that schedules a softint,
- * xennet_softstart(). xennet_softstart() generates the requests associated
+ * xennet_start() (output routine of xennet) scheduled via a softint.
+ * xennet_start() generates the requests associated
* to the TX mbufs queued (see altq(9)).
* The backend's responses are processed by xennet_tx_complete(), called
- * from xennet_softstart()
+ * from xennet_start()
*
* for RX:
* Purpose is to process the packets received from the outside. RX buffers
@@ -81,7 +81,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.96 2020/03/26 18:50:16 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.97 2020/03/27 18:37:30 jdolecek Exp $");
#include "opt_xen.h"
#include "opt_nfs_boot.h"
@@ -190,7 +190,6 @@
netif_rx_front_ring_t sc_rx_ring;
unsigned int sc_evtchn;
- void *sc_softintr;
struct intrhand *sc_ih;
grant_ref_t sc_tx_ring_gntref;
@@ -243,7 +242,6 @@
static int xennet_init(struct ifnet *);
static void xennet_stop(struct ifnet *, int);
static void xennet_reset(struct xennet_xenbus_softc *);
-static void xennet_softstart(void *);
static void xennet_start(struct ifnet *);
static int xennet_ioctl(struct ifnet *, u_long, void *);
static void xennet_watchdog(struct ifnet *);
@@ -396,11 +394,8 @@
IFQ_SET_READY(&ifp->if_snd);
if_attach(ifp);
+ if_deferred_start_init(ifp, NULL);
ether_ifattach(ifp, sc->sc_enaddr);
- sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc);
- if (sc->sc_softintr == NULL)
- panic("%s: can't establish soft interrupt",
- device_xname(self));
/* alloc shared rings */
tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
@@ -481,7 +476,6 @@
xengnt_revoke_access(sc->sc_rx_ring_gntref);
uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
UVM_KMF_WIRED);
- softint_disestablish(sc->sc_softintr);
splx(s0);
pmf_device_deregister(self);
@@ -930,7 +924,7 @@
/*
* Process responses associated to the TX mbufs sent previously through
- * xennet_softstart()
+ * xennet_start()
* Called at splsoftnet.
*/
static void
@@ -999,10 +993,7 @@
return 1;
/* Poke Tx queue if we run out of Tx buffers earlier */
- mutex_enter(&sc->sc_tx_lock);
- if (SLIST_EMPTY(&sc->sc_txreq_head))
- softint_schedule(sc->sc_softintr);
- mutex_exit(&sc->sc_tx_lock);
+ if_schedule_deferred_start(ifp);
rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
@@ -1141,38 +1132,14 @@
}
/*
- * The output routine of a xennet interface
- * Called at splnet.
+ * The output routine of a xennet interface. Prepares mbufs for TX,
+ * and notify backend when finished.
+ * Called at splsoftnet.
*/
void
xennet_start(struct ifnet *ifp)
{
struct xennet_xenbus_softc *sc = ifp->if_softc;
-
- DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", device_xname(sc->sc_dev)));
-
- rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
-
- /*
- * The Xen communication channel is much more efficient if we can
- * schedule batch of packets for domain0. To achieve this, we
- * schedule a soft interrupt, and just return. This way, the network
- * stack will enqueue all pending mbufs in the interface's send queue
- * before it is processed by xennet_softstart().
- */
- softint_schedule(sc->sc_softintr);
- return;
-}
-
-/*
- * Prepares mbufs for TX, and notify backend when finished
- * Called at splsoftnet
- */
-void
-xennet_softstart(void *arg)
-{
- struct xennet_xenbus_softc *sc = arg;
- struct ifnet *ifp = &sc->sc_ethercom.ec_if;
struct mbuf *m, *new_m;
netif_tx_request_t *txreq;
RING_IDX req_prod;
@@ -1184,6 +1151,8 @@
if ((ifp->if_flags & IFF_RUNNING) == 0)
return;
+ rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
+
xennet_tx_complete(sc);
mutex_enter(&sc->sc_tx_lock);
diff -r 7b291f99dd6a -r e19a0ac54b74 sys/arch/xen/xen/xennetback_xenbus.c
--- a/sys/arch/xen/xen/xennetback_xenbus.c Fri Mar 27 18:04:45 2020 +0000
+++ b/sys/arch/xen/xen/xennetback_xenbus.c Fri Mar 27 18:37:30 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xennetback_xenbus.c,v 1.85 2020/03/22 11:20:59 jdolecek Exp $ */
+/* $NetBSD: xennetback_xenbus.c,v 1.86 2020/03/27 18:37:30 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.85 2020/03/22 11:20:59 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.86 2020/03/27 18:37:30 jdolecek Exp $");
#include "opt_xen.h"
@@ -106,7 +106,7 @@
domid_t xni_domid; /* attached to this domain */
uint32_t xni_handle; /* domain-specific handle */
xnetback_state_t xni_status;
- void *xni_softintr;
+ bool xni_rx_copy;
/* network interface stuff */
struct ethercom xni_ec;
@@ -129,8 +129,8 @@
void xvifattach(int);
static int xennetback_ifioctl(struct ifnet *, u_long, void *);
static void xennetback_ifstart(struct ifnet *);
-static void xennetback_ifsoftstart_transfer(void *);
-static void xennetback_ifsoftstart_copy(void *);
+static void xennetback_ifsoftstart_transfer(struct xnetback_instance *);
+static void xennetback_ifsoftstart_copy(struct xnetback_instance *);
static void xennetback_ifwatchdog(struct ifnet *);
static int xennetback_ifinit(struct ifnet *);
static void xennetback_ifstop(struct ifnet *, int);
@@ -321,6 +321,7 @@
ifp->if_timer = 0;
IFQ_SET_READY(&ifp->if_snd);
if_attach(ifp);
+ if_deferred_start_init(ifp, NULL);
ether_ifattach(&xneti->xni_if, xneti->xni_enaddr);
mutex_enter(&xnetback_lock);
@@ -397,11 +398,6 @@
hypervisor_mask_event(xneti->xni_evtchn);
xen_intr_disestablish(xneti->xni_ih);
xneti->xni_ih = NULL;
-
- if (xneti->xni_softintr) {
- softint_disestablish(xneti->xni_softintr);
- xneti->xni_softintr = NULL;
- }
}
mutex_enter(&xnetback_lock);
@@ -490,20 +486,7 @@
xbusd->xbusd_otherend);
return -1;
}
-
- if (rx_copy)
- xneti->xni_softintr = softint_establish(SOFTINT_NET,
- xennetback_ifsoftstart_copy, xneti);
- else
- xneti->xni_softintr = softint_establish(SOFTINT_NET,
- xennetback_ifsoftstart_transfer, xneti);
-
- if (xneti->xni_softintr == NULL) {
- err = ENOMEM;
- xenbus_dev_fatal(xbusd, ENOMEM,
- "can't allocate softint", xbusd->xbusd_otherend);
- return -1;
- }
+ xneti->xni_rx_copy = (rx_copy != 0);
/* allocate VA space and map rings */
xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
@@ -609,7 +592,6 @@
uvm_km_free(kernel_map, xneti->xni_tx_ring_va,
PAGE_SIZE, UVM_KMF_VAONLY);
- softint_disestablish(xneti->xni_softintr);
return -1;
}
@@ -899,8 +881,9 @@
xen_rmb(); /* be sure to read the request before updating pointer */
xneti->xni_txring.req_cons = req_cons;
xen_wmb();
+
/* check to see if we can transmit more packets */
- softint_schedule(xneti->xni_softintr);
+ if_schedule_deferred_start(ifp);
return 1;
}
@@ -947,18 +930,19 @@
/*
* The Xen communication channel is much more efficient if we can
- * schedule batch of packets for the domain. To achieve this, we
- * schedule a soft interrupt, and just return. This way, the network
+ * schedule batch of packets for the domain. Deferred start by network
* stack will enqueue all pending mbufs in the interface's send queue
- * before it is processed by the soft interrupt handler().
+ * before it is processed by the soft interrupt handler.
*/
- softint_schedule(xneti->xni_softintr);
+ if (__predict_true(xneti->xni_rx_copy))
+ xennetback_ifsoftstart_copy(xneti);
+ else
+ xennetback_ifsoftstart_transfer(xneti);
}
static void
-xennetback_ifsoftstart_transfer(void *arg)
+xennetback_ifsoftstart_transfer(struct xnetback_instance *xneti)
{
- struct xnetback_instance *xneti = arg;
struct ifnet *ifp = &xneti->xni_if;
struct mbuf *m;
vaddr_t xmit_va;
@@ -1270,9 +1254,8 @@
}
static void
-xennetback_ifsoftstart_copy(void *arg)
+xennetback_ifsoftstart_copy(struct xnetback_instance *xneti)
{
- struct xnetback_instance *xneti = arg;
struct ifnet *ifp = &xneti->xni_if;
struct mbuf *m, *new_m;
paddr_t xmit_pa;
Home |
Main Index |
Thread Index |
Old Index