Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/xen/xen convert to bus_dma(9), no explicit xpmap_*(...
details: https://anonhg.NetBSD.org/src/rev/a8c4cb9d3a61
branches: trunk
changeset: 971024:a8c4cb9d3a61
user: jdolecek <jdolecek%NetBSD.org@localhost>
date: Sat Apr 11 11:48:20 2020 +0000
description:
convert to bus_dma(9), no explicit xpmap_*() calls any more
as part of this move some global arrays into struct xnetback_instance,
and fix race for xnetif_lookup()
diffstat:
sys/arch/xen/xen/xennetback_xenbus.c | 262 +++++++++++++++++-----------------
1 files changed, 132 insertions(+), 130 deletions(-)
diffs (truncated from 507 to 300 lines):
diff -r 71b5f47a3ddc -r a8c4cb9d3a61 sys/arch/xen/xen/xennetback_xenbus.c
--- a/sys/arch/xen/xen/xennetback_xenbus.c Sat Apr 11 11:01:12 2020 +0000
+++ b/sys/arch/xen/xen/xennetback_xenbus.c Sat Apr 11 11:48:20 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xennetback_xenbus.c,v 1.95 2020/04/09 10:57:02 jdolecek Exp $ */
+/* $NetBSD: xennetback_xenbus.c,v 1.96 2020/04/11 11:48:20 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.95 2020/04/09 10:57:02 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.96 2020/04/11 11:48:20 jdolecek Exp $");
#include "opt_xen.h"
@@ -70,13 +70,14 @@
#define XENPRINTF(x)
#endif
-extern pt_entry_t xpmap_pg_nx;
-
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-/* linux wants at last 16 bytes free in front of the packet */
-#define LINUX_REQUESTED_OFFSET 16
+/*
+ * Number of packets to transmit in one hypercall (= number of pages to
+ * transmit at once).
+ */
+#define NB_XMIT_PAGES_BATCH 64
/* ratecheck(9) for pool allocation failures */
static const struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */
@@ -88,6 +89,13 @@
DISCONNECTED
} xnetback_state_t;
+struct xnetback_xstate {
+ bus_dmamap_t xs_dmamap;
+ struct mbuf *xs_m;
+ int xs_id;
+ int xs_flags;
+};
+
/* we keep the xnetback instances in a linked list */
struct xnetback_instance {
SLIST_ENTRY(xnetback_instance) next;
@@ -110,6 +118,10 @@
grant_handle_t xni_rx_ring_handle;
vaddr_t xni_tx_ring_va; /* to unmap the ring */
vaddr_t xni_rx_ring_va;
+
+ /* arrays used in xennetback_ifstart(), used for both Rx and Tx */
+ gnttab_copy_t xni_gop_copy[NB_XMIT_PAGES_BATCH];
+ struct xnetback_xstate xni_xstate[NB_XMIT_PAGES_BATCH];
};
#define xni_if xni_ec.ec_if
#define xni_bpf xni_if.if_bpf
@@ -128,7 +140,6 @@
static inline void xennetback_tx_response(struct xnetback_instance *,
int, int);
-static void xennetback_mbuf_addr(struct mbuf *, paddr_t *, int *);
static SLIST_HEAD(, xnetback_instance) xnetback_instances;
static kmutex_t xnetback_lock;
@@ -141,22 +152,6 @@
.xbakd_type = "vif"
};
-/*
- * Number of packets to transmit in one hypercall (= number of pages to
- * transmit at once).
- */
-#define NB_XMIT_PAGES_BATCH 64
-
-/* arrays used in xennetback_ifstart(), too large to allocate on stack */
-/* XXXSMP */
-static gnttab_copy_t xstart_gop_copy[NB_XMIT_PAGES_BATCH];
-static struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH];
-static struct _req_info {
- int id;
- int flags;
-} xstart_req[NB_XMIT_PAGES_BATCH];
-
-
void
xvifattach(int n)
{
@@ -193,14 +188,21 @@
return err;
}
- if (xnetif_lookup(domid, handle)) {
- return EEXIST;
- }
xneti = kmem_zalloc(sizeof(*xneti), KM_SLEEP);
xneti->xni_domid = domid;
xneti->xni_handle = handle;
xneti->xni_status = DISCONNECTED;
+ /* Need to keep the lock for lookup and the list update */
+ mutex_enter(&xnetback_lock);
+ if (xnetif_lookup(domid, handle)) {
+ mutex_exit(&xnetback_lock);
+ kmem_free(xneti, sizeof(*xneti));
+ return EEXIST;
+ }
+ SLIST_INSERT_HEAD(&xnetback_instances, xneti, next);
+ mutex_exit(&xnetback_lock);
+
xbusd->xbusd_u.b.b_cookie = xneti;
xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy;
xneti->xni_xbusd = xbusd;
@@ -230,6 +232,20 @@
/* we can't use the same MAC addr as our guest */
xneti->xni_enaddr[3]++;
+
+ /* Initialize DMA map, used only for loading PA */
+ for (i = 0; i < __arraycount(xneti->xni_xstate); i++) {
+ if (bus_dmamap_create(xneti->xni_xbusd->xbusd_dmat, PAGE_SIZE,
+ 1, PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
+ &xneti->xni_xstate[i].xs_dmamap)
+ != 0) {
+ aprint_error_ifnet(ifp,
+ "failed to allocate dma map\n");
+ err = ENOMEM;
+ goto fail;
+ }
+ }
+
/* create pseudo-interface */
aprint_verbose_ifnet(ifp, "Ethernet address %s\n",
ether_sprintf(xneti->xni_enaddr));
@@ -258,10 +274,6 @@
if_deferred_start_init(ifp, NULL);
ether_ifattach(&xneti->xni_if, xneti->xni_enaddr);
- mutex_enter(&xnetback_lock);
- SLIST_INSERT_HEAD(&xnetback_instances, xneti, next);
- mutex_exit(&xnetback_lock);
-
xbusd->xbusd_otherend_changed = xennetback_frontend_changed;
do {
@@ -579,14 +591,14 @@
struct xnetback_instance *xneti;
bool found = false;
- mutex_enter(&xnetback_lock);
+ KASSERT(mutex_owned(&xnetback_lock));
+
SLIST_FOREACH(xneti, &xnetback_instances, next) {
if (xneti->xni_domid == dom && xneti->xni_handle == handle) {
found = true;
break;
}
}
- mutex_exit(&xnetback_lock);
return found;
}
@@ -639,50 +651,50 @@
{
int i = 0;
gnttab_copy_t *gop;
- struct mbuf *m;
- struct _req_info *req;
+ struct xnetback_xstate *xst;
/*
* Copy the data and ack it. Delaying it until the mbuf is
* freed will stall transmit.
*/
- if (HYPERVISOR_grant_table_op(GNTTABOP_copy, xstart_gop_copy, queued)
- != 0) {
- printf("%s: GNTTABOP_copy failed", ifp->if_xname);
+ if (HYPERVISOR_grant_table_op(GNTTABOP_copy, xneti->xni_gop_copy,
+ queued) != 0) {
+ printf("%s: GNTTABOP_copy Tx failed", ifp->if_xname);
goto abort;
}
for (; i < queued; i++) {
- gop = &xstart_gop_copy[i];
- m = mbufs_sent[i];
- req = &xstart_req[i];
+ gop = &xneti->xni_gop_copy[i];
+ xst = &xneti->xni_xstate[i];
if (gop->status != GNTST_okay) {
- printf("%s GNTTABOP_copy[%d] %d\n",
+ printf("%s GNTTABOP_copy[%d] Tx %d\n",
ifp->if_xname, i, gop->status);
goto abort;
}
- xennetback_tx_response(xneti, req->id, NETIF_RSP_OKAY);
+ xennetback_tx_response(xneti, xst->xs_id, NETIF_RSP_OKAY);
- if (req->flags & NETTXF_csum_blank)
- xennet_checksum_fill(ifp, m);
- else if (req->flags & NETTXF_data_validated)
- m->m_pkthdr.csum_flags = XN_M_CSUM_SUPPORTED;
- m_set_rcvif(m, ifp);
+ if (xst->xs_flags & NETTXF_csum_blank)
+ xennet_checksum_fill(ifp, xst->xs_m);
+ else if (xst->xs_flags & NETTXF_data_validated)
+ xst->xs_m->m_pkthdr.csum_flags = XN_M_CSUM_SUPPORTED;
+ m_set_rcvif(xst->xs_m, ifp);
- if_percpuq_enqueue(ifp->if_percpuq, m);
+ if_percpuq_enqueue(ifp->if_percpuq, xst->xs_m);
+
+ bus_dmamap_unload(xneti->xni_xbusd->xbusd_dmat,
+ xst->xs_dmamap);
}
return;
abort:
for (; i < queued; i++) {
- m = mbufs_sent[i];
- req = &xstart_req[i];
+ xst = &xneti->xni_xstate[i];
- m_freem(m);
- xennetback_tx_response(xneti, req->id, NETIF_RSP_ERROR);
+ m_freem(xst->xs_m);
+ xennetback_tx_response(xneti, xst->xs_id, NETIF_RSP_ERROR);
if_statinc(ifp, if_ierrors);
}
}
@@ -697,8 +709,9 @@
int receive_pending;
RING_IDX req_cons;
gnttab_copy_t *gop;
- paddr_t pa;
- int offset, queued = 0;
+ paddr_t ma;
+ int queued = 0;
+ struct xnetback_xstate *xst;
XENPRINTF(("xennetback_evthandler "));
req_cons = xneti->xni_txring.req_cons;
@@ -729,7 +742,7 @@
*/
const char *msg = xennetback_tx_check_packet(&txreq,
xneti->xni_ec.ec_capenable & ETHERCAP_VLAN_MTU);
- if (msg) {
+ if (__predict_false(msg != NULL)) {
printf("%s: packet with size %d is %s\n",
ifp->if_xname, txreq.size, msg);
xennetback_tx_response(xneti, txreq.id,
@@ -760,15 +773,30 @@
continue;
}
}
+ m->m_len = m->m_pkthdr.len = txreq.size;
XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n",
xneti->xni_if.if_xname, txreq.offset,
txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons)));
- xennetback_mbuf_addr(m, &pa, &offset);
+ xst = &xneti->xni_xstate[queued];
+ xst->xs_m = m;
+ xst->xs_id = txreq.id;
+ xst->xs_flags = txreq.flags;
+
+ if (bus_dmamap_load_mbuf(xneti->xni_xbusd->xbusd_dmat,
+ xst->xs_dmamap, m, BUS_DMA_NOWAIT) != 0) {
+ printf("%s: mbuf load failed\n", ifp->if_xname);
+ m_freem(m);
+ xennetback_tx_response(xneti, txreq.id,
+ NETIF_RSP_DROPPED);
+ if_statinc(ifp, if_ierrors);
+ continue;
+ }
+ ma = xst->xs_dmamap->dm_segs[0].ds_addr;
/* Queue for the copy */
- gop = &xstart_gop_copy[queued];
+ gop = &xneti->xni_gop_copy[queued];
memset(gop, 0, sizeof(*gop));
gop->flags = GNTCOPY_source_gref;
gop->len = txreq.size;
@@ -777,15 +805,11 @@
gop->source.offset = txreq.offset;
gop->source.domid = xneti->xni_domid;
- gop->dest.offset = offset;
+ gop->dest.offset = ma & PAGE_MASK;
gop->dest.domid = DOMID_SELF;
- gop->dest.u.gmfn = xpmap_ptom(pa) >> PAGE_SHIFT;
Home |
Main Index |
Thread Index |
Old Index