Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/xen/xen revert previous - physio arranges for tranf...
details: https://anonhg.NetBSD.org/src/rev/a7b2d9d68ac9
branches: trunk
changeset: 971033:a7b2d9d68ac9
user: jdolecek <jdolecek%NetBSD.org@localhost>
date: Sat Apr 11 17:52:01 2020 +0000
description:
revert previous - physio arranges for tranfer directly to user-provided
buffers, which are generally not DEV_BSIZE-aligned
diffstat:
sys/arch/xen/xen/xbd_xenbus.c | 69 +++++++++++++++++++++++++++++++++++-------
1 files changed, 57 insertions(+), 12 deletions(-)
diffs (158 lines):
diff -r 0c30deea591c -r a7b2d9d68ac9 sys/arch/xen/xen/xbd_xenbus.c
--- a/sys/arch/xen/xen/xbd_xenbus.c Sat Apr 11 17:43:54 2020 +0000
+++ b/sys/arch/xen/xen/xbd_xenbus.c Sat Apr 11 17:52:01 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xbd_xenbus.c,v 1.102 2020/04/11 16:15:34 jdolecek Exp $ */
+/* $NetBSD: xbd_xenbus.c,v 1.103 2020/04/11 17:52:01 jdolecek Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.102 2020/04/11 16:15:34 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.103 2020/04/11 17:52:01 jdolecek Exp $");
#include "opt_xen.h"
@@ -97,8 +97,6 @@
#define XEN_BSHIFT 9 /* log2(XEN_BSIZE) */
#define XEN_BSIZE (1 << XEN_BSHIFT)
-__CTASSERT((DEV_BSIZE == XEN_BSIZE));
-
struct xbd_req {
SLIST_ENTRY(xbd_req) req_next;
uint16_t req_id; /* ID passed to backend */
@@ -107,6 +105,7 @@
grant_ref_t req_gntref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int req_nr_segments; /* number of segments in this request */
struct buf *req_bp; /* buffer associated with this request */
+ void *req_data; /* pointer to the data buffer */
} req_rw;
struct {
int s_error;
@@ -117,6 +116,7 @@
#define req_gntref u.req_rw.req_gntref
#define req_nr_segments u.req_rw.req_nr_segments
#define req_bp u.req_rw.req_bp
+#define req_data u.req_rw.req_data
#define req_sync u.req_sync
struct xbd_xenbus_softc {
@@ -156,6 +156,7 @@
#define BLKIF_FEATURE_PERSISTENT 0x4
#define BLKIF_FEATURE_BITS \
"\20\1CACHE-FLUSH\2BARRIER\3PERSISTENT"
+ struct evcnt sc_cnt_map_unalign;
};
#if 0
@@ -176,6 +177,9 @@
static void xbd_backend_changed(void *, XenbusState);
static void xbd_connect(struct xbd_xenbus_softc *);
+static int xbd_map_align(struct xbd_req *);
+static void xbd_unmap_align(struct xbd_req *);
+
static void xbdminphys(struct buf *);
CFATTACH_DECL3_NEW(xbd, sizeof(struct xbd_xenbus_softc),
@@ -281,6 +285,9 @@
panic("%s: can't alloc ring", device_xname(self));
sc->sc_ring.sring = ring;
+ evcnt_attach_dynamic(&sc->sc_cnt_map_unalign, EVCNT_TYPE_MISC,
+ NULL, device_xname(self), "map unaligned");
+
/* resume shared structures and tell backend that we are ready */
if (xbd_xenbus_resume(self, PMF_Q_NONE) == false) {
uvm_km_free(kernel_map, (vaddr_t)ring, PAGE_SIZE,
@@ -364,6 +371,8 @@
uvm_km_free(kernel_map, (vaddr_t)sc->sc_ring.sring,
PAGE_SIZE, UVM_KMF_WIRED);
+ evcnt_detach(&sc->sc_cnt_map_unalign);
+
pmf_device_deregister(dev);
return 0;
@@ -702,6 +711,9 @@
goto next;
}
/* b_resid was set in dk_start */
+ if (__predict_false(
+ xbdreq->req_data != NULL && bp->b_data != xbdreq->req_data))
+ xbd_unmap_align(xbdreq);
next:
xbdreq->req_bp = NULL;
dk_done(&sc->sc_dksc, bp);
@@ -971,13 +983,16 @@
}
xbdreq->req_bp = bp;
+ xbdreq->req_data = bp->b_data;
+ if (__predict_false((vaddr_t)bp->b_data & (XEN_BSIZE - 1))) {
+ sc->sc_cnt_map_unalign.ev_count++;
- /*
- * All bufs passed by system are aligned to DEV_BSIZE.
- * xbd requires this to be the case, as transfer offsets
- * are expressed in multiplies of 512 (XEN_BSIZE).
- */
- KASSERT(((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) == 0);
+ if (__predict_false(xbd_map_align(xbdreq) != 0)) {
+ DPRINTF(("xbd_diskstart: no align\n"));
+ error = EAGAIN;
+ goto out;
+ }
+ }
SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
@@ -987,8 +1002,8 @@
req->sector_number = bp->b_rawblkno;
req->handle = sc->sc_handle;
- va = (vaddr_t)bp->b_data & ~PAGE_MASK;
- off = (vaddr_t)bp->b_data & PAGE_MASK;
+ va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
+ off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
bcount = bp->b_bcount;
bp->b_resid = 0;
for (seg = 0; bcount > 0;) {
@@ -1028,3 +1043,33 @@
err:
return error;
}
+
+static int
+xbd_map_align(struct xbd_req *req)
+{
+ int s = splvm(); /* XXXSMP - bogus? */
+ int rc;
+
+ rc = uvm_km_kmem_alloc(kmem_va_arena,
+ req->req_bp->b_bcount, (VM_NOSLEEP | VM_INSTANTFIT),
+ (vmem_addr_t *)&req->req_data);
+ splx(s);
+ if (__predict_false(rc != 0))
+ return ENOMEM;
+ if ((req->req_bp->b_flags & B_READ) == 0)
+ memcpy(req->req_data, req->req_bp->b_data,
+ req->req_bp->b_bcount);
+ return 0;
+}
+
+static void
+xbd_unmap_align(struct xbd_req *req)
+{
+ int s;
+ if (req->req_bp->b_flags & B_READ)
+ memcpy(req->req_bp->b_data, req->req_data,
+ req->req_bp->b_bcount);
+ s = splvm(); /* XXXSMP - bogus? */
+ uvm_km_kmem_free(kmem_va_arena, (vaddr_t)req->req_data, req->req_bp->b_bcount);
+ splx(s);
+}
Home |
Main Index |
Thread Index |
Old Index