Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-6]: src/sys Pull up the following revisions(s) (requested by bouy...
details: https://anonhg.NetBSD.org/src/rev/317624157a77
branches: netbsd-6
changeset: 776638:317624157a77
user: sborrill <sborrill%NetBSD.org@localhost>
date: Tue Jun 03 09:17:52 2014 +0000
description:
Pull up the following revisions(s) (requested by bouyer in ticket #1075):
sys/arch/xen/xen/xbd_xenbus.c: revision 1.63 via patch
sys/dev/cgd.c: revision 1.87 via patch
sys/dev/dksubr.c: revision 1.50 via patch
sys/dev/dkvar.h: revision 1.19 via patch
Avoid xbd(4) reordering requests, which, depending on the underlying
hardware, can badly affect write performances. This can give up to a 5x
performance gain in sequencial writes.
diffstat:
sys/arch/xen/xen/xbd_xenbus.c | 230 ++++++++++++++++++++++-------------------
sys/dev/cgd.c | 113 +++++++++++---------
sys/dev/dksubr.c | 38 +-----
sys/dev/dkvar.h | 7 +-
4 files changed, 198 insertions(+), 190 deletions(-)
diffs (truncated from 564 to 300 lines):
diff -r 40edb8a73c1f -r 317624157a77 sys/arch/xen/xen/xbd_xenbus.c
--- a/sys/arch/xen/xen/xbd_xenbus.c Wed May 21 21:43:22 2014 +0000
+++ b/sys/arch/xen/xen/xbd_xenbus.c Tue Jun 03 09:17:52 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xbd_xenbus.c,v 1.53 2012/02/02 20:11:26 para Exp $ */
+/* $NetBSD: xbd_xenbus.c,v 1.53.2.1 2014/06/03 09:17:52 sborrill Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.53 2012/02/02 20:11:26 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.53.2.1 2014/06/03 09:17:52 sborrill Exp $");
#include "opt_xen.h"
@@ -121,9 +121,9 @@
#define req_sync u.req_sync
struct xbd_xenbus_softc {
- device_t sc_dev;
struct dk_softc sc_dksc;
struct dk_intf *sc_di;
+ device_t sc_dev;
struct xenbus_device *sc_xbusd;
blkif_front_ring_t sc_ring;
@@ -169,7 +169,7 @@
static bool xbd_xenbus_resume(device_t, const pmf_qual_t *);
static int xbd_handler(void *);
-static int xbdstart(struct dk_softc *, struct buf *);
+static void xbdstart(struct dk_softc *);
static void xbd_backend_changed(void *, XenbusState);
static void xbd_connect(struct xbd_xenbus_softc *);
@@ -725,9 +725,10 @@
if (more_to_do)
goto again;
- dk_iodone(sc->sc_di, &sc->sc_dksc);
if (sc->sc_xbdreq_wait)
wakeup(&sc->sc_xbdreq_wait);
+ else
+ xbdstart(&sc->sc_dksc);
return 1;
}
@@ -922,132 +923,151 @@
return dk_dump(sc->sc_di, &sc->sc_dksc, dev, blkno, va, size);
}
-static int
-xbdstart(struct dk_softc *dksc, struct buf *bp)
+static void
+xbdstart(struct dk_softc *dksc)
{
- struct xbd_xenbus_softc *sc;
+ struct xbd_xenbus_softc *sc = (struct xbd_xenbus_softc *)dksc;
+ struct buf *bp;
+#ifdef DIAGNOSTIC
+ struct buf *qbp;
+#endif
struct xbd_req *xbdreq;
blkif_request_t *req;
- int ret = 0, runqueue = 1;
size_t bcount, off;
paddr_t ma;
vaddr_t va;
int nsects, nbytes, seg;
int notify;
- DPRINTF(("xbdstart(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount));
+ while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
- sc = device_lookup_private(&xbd_cd, DISKUNIT(bp->b_dev));
- if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
- bp->b_error = EIO;
- goto err;
- }
+ DPRINTF(("xbdstart(%p): b_bcount = %ld\n",
+ bp, (long)bp->b_bcount));
- if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) {
- /* invalid block number */
- bp->b_error = EINVAL;
- goto err;
- }
+ if (sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
+ bp->b_error = EIO;
+ goto err;
+ }
- if (bp->b_rawblkno == sc->sc_xbdsize) {
- /* at end of disk; return short read */
- bp->b_resid = bp->b_bcount;
- biodone(bp);
- return 0;
- }
+ if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) {
+ /* invalid block number */
+ bp->b_error = EINVAL;
+ goto err;
+ }
- if (__predict_false(sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) {
- /* device is suspended, do not consume buffer */
- DPRINTF(("%s: (xbdstart) device suspended\n",
- device_xname(sc->sc_dev)));
- ret = -1;
- goto out;
- }
-
- if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) {
- DPRINTF(("xbdstart: ring_full\n"));
- ret = -1;
- goto out;
- }
+ if (bp->b_rawblkno == sc->sc_xbdsize) {
+ /* at end of disk; return short read */
+ bp->b_resid = bp->b_bcount;
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
+ biodone(bp);
+ continue;
+ }
- dksc = &sc->sc_dksc;
+ if (__predict_false(
+ sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) {
+ /* device is suspended, do not consume buffer */
+ DPRINTF(("%s: (xbdstart) device suspended\n",
+ device_xname(sc->sc_dev)));
+ goto out;
+ }
- xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head);
- if (__predict_false(xbdreq == NULL)) {
- DPRINTF(("xbdstart: no req\n"));
- ret = -1; /* dk_start should not remove bp from queue */
- goto out;
- }
+ if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) {
+ DPRINTF(("xbdstart: ring_full\n"));
+ goto out;
+ }
- xbdreq->req_bp = bp;
- xbdreq->req_data = bp->b_data;
- if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
- if (__predict_false(xbd_map_align(xbdreq) != 0)) {
- ret = -1;
+ xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head);
+ if (__predict_false(xbdreq == NULL)) {
+ DPRINTF(("xbdstart: no req\n"));
goto out;
}
- }
- /* now we're sure we'll send this buf */
- disk_busy(&dksc->sc_dkdev);
- SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
- req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
- req->id = xbdreq->req_id;
- req->operation = bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE;
- req->sector_number = bp->b_rawblkno;
- req->handle = sc->sc_handle;
+
+ xbdreq->req_bp = bp;
+ xbdreq->req_data = bp->b_data;
+ if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
+ if (__predict_false(xbd_map_align(xbdreq) != 0)) {
+ DPRINTF(("xbdstart: no align\n"));
+ goto out;
+ }
+ }
+ /* now we're sure we'll send this buf */
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
+ disk_busy(&dksc->sc_dkdev);
+
+ SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
+ req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
+ req->id = xbdreq->req_id;
+ req->operation =
+ bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE;
+ req->sector_number = bp->b_rawblkno;
+ req->handle = sc->sc_handle;
- va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
- off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
- if (bp->b_rawblkno + bp->b_bcount / DEV_BSIZE >= sc->sc_xbdsize) {
- bcount = (sc->sc_xbdsize - bp->b_rawblkno) * DEV_BSIZE;
- bp->b_resid = bp->b_bcount - bcount;
- } else {
- bcount = bp->b_bcount;
- bp->b_resid = 0;
- }
- for (seg = 0, bcount = bp->b_bcount; bcount > 0;) {
- pmap_extract_ma(pmap_kernel(), va, &ma);
- KASSERT((ma & (XEN_BSIZE - 1)) == 0);
- if (bcount > PAGE_SIZE - off)
- nbytes = PAGE_SIZE - off;
- else
- nbytes = bcount;
- nsects = nbytes >> XEN_BSHIFT;
- req->seg[seg].first_sect = off >> XEN_BSHIFT;
- req->seg[seg].last_sect = (off >> XEN_BSHIFT) + nsects - 1;
- KASSERT(req->seg[seg].first_sect <= req->seg[seg].last_sect);
- KASSERT(req->seg[seg].last_sect < 8);
- if (__predict_false(xengnt_grant_access(
- sc->sc_xbusd->xbusd_otherend_id, ma,
- (bp->b_flags & B_READ) == 0, &xbdreq->req_gntref[seg])))
- panic("xbdstart: xengnt_grant_access"); /* XXX XXX !!! */
- req->seg[seg].gref = xbdreq->req_gntref[seg];
- seg++;
- KASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
- va += PAGE_SIZE;
- off = 0;
- bcount -= nbytes;
- }
- xbdreq->req_nr_segments = req->nr_segments = seg;
- sc->sc_ring.req_prod_pvt++;
- if (bufq_peek(sc->sc_dksc.sc_bufq)) {
- /* we will be called again; don't notify guest yet */
- runqueue = 0;
+ va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
+ off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
+ if (bp->b_rawblkno + bp->b_bcount / DEV_BSIZE >=
+ sc->sc_xbdsize) {
+ bcount = (sc->sc_xbdsize - bp->b_rawblkno) * DEV_BSIZE;
+ bp->b_resid = bp->b_bcount - bcount;
+ } else {
+ bcount = bp->b_bcount;
+ bp->b_resid = 0;
+ }
+ for (seg = 0; bcount > 0;) {
+ pmap_extract_ma(pmap_kernel(), va, &ma);
+ KASSERT((ma & (XEN_BSIZE - 1)) == 0);
+ if (bcount > PAGE_SIZE - off)
+ nbytes = PAGE_SIZE - off;
+ else
+ nbytes = bcount;
+ nsects = nbytes >> XEN_BSHIFT;
+ req->seg[seg].first_sect = off >> XEN_BSHIFT;
+ req->seg[seg].last_sect =
+ (off >> XEN_BSHIFT) + nsects - 1;
+ KASSERT(req->seg[seg].first_sect <=
+ req->seg[seg].last_sect);
+ KASSERT(req->seg[seg].last_sect < 8);
+ if (__predict_false(xengnt_grant_access(
+ sc->sc_xbusd->xbusd_otherend_id, ma,
+ (bp->b_flags & B_READ) == 0,
+ &xbdreq->req_gntref[seg])))
+ panic("xbdstart: xengnt_grant_access"); /* XXX XXX !!! */
+ req->seg[seg].gref = xbdreq->req_gntref[seg];
+ seg++;
+ KASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ va += PAGE_SIZE;
+ off = 0;
+ bcount -= nbytes;
+ }
+ xbdreq->req_nr_segments = req->nr_segments = seg;
+ sc->sc_ring.req_prod_pvt++;
}
out:
- if (runqueue) {
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_ring, notify);
- if (notify)
- hypervisor_notify_via_evtchn(sc->sc_evtchn);
- }
-
- return ret;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_ring, notify);
+ if (notify)
+ hypervisor_notify_via_evtchn(sc->sc_evtchn);
+ return;
err:
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
bp->b_resid = bp->b_bcount;
biodone(bp);
- return 0;
+ return;
}
Home |
Main Index |
Thread Index |
Old Index