Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys As proposed in
details: https://anonhg.NetBSD.org/src/rev/c4719fa19966
branches: trunk
changeset: 796237:c4719fa19966
user: bouyer <bouyer%NetBSD.org@localhost>
date: Sun May 25 19:23:49 2014 +0000
description:
As proposed in
https://mail-index.netbsd.org/tech-kern/2014/05/21/msg017098.html
remove dk_start() and dk_iodone() from dksubr.c and move the related code
to the underlying driver.
This increase complexity only marginally: the underlying drivers have
to do the while() loop themselves, but this can now be done properly with
bufq_peek()/bufq_get(), removing the buffer from the queue at the right time.
This handle both the recursion and reordering issues (the reordering
issue is described here:
https://mail-index.netbsd.org/tech-kern/2014/05/19/msg017089.html
the recursion isssue is PR #25240).
Difference with the patch posted to tech-kern@: KASSERT() that the
buffer we remove with bufq_get() is the same as the one we bufq_peek()'d
just before.
Hopefully this will allow more disk drivers to use dksubr.c
diffstat:
sys/arch/xen/xen/xbd_xenbus.c | 231 +++++++++++++++++++++++------------------
sys/dev/cgd.c | 113 +++++++++++---------
sys/dev/dksubr.c | 32 +-----
sys/dev/dkvar.h | 6 +-
4 files changed, 193 insertions(+), 189 deletions(-)
diffs (truncated from 542 to 300 lines):
diff -r 626d0f71f84e -r c4719fa19966 sys/arch/xen/xen/xbd_xenbus.c
--- a/sys/arch/xen/xen/xbd_xenbus.c Sun May 25 19:15:50 2014 +0000
+++ b/sys/arch/xen/xen/xbd_xenbus.c Sun May 25 19:23:49 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xbd_xenbus.c,v 1.62 2014/03/16 05:20:26 dholland Exp $ */
+/* $NetBSD: xbd_xenbus.c,v 1.63 2014/05/25 19:23:49 bouyer Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.62 2014/03/16 05:20:26 dholland Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.63 2014/05/25 19:23:49 bouyer Exp $");
#include "opt_xen.h"
@@ -168,7 +168,7 @@
static bool xbd_xenbus_resume(device_t, const pmf_qual_t *);
static int xbd_handler(void *);
-static int xbdstart(struct dk_softc *, struct buf *);
+static void xbdstart(struct dk_softc *);
static void xbd_backend_changed(void *, XenbusState);
static void xbd_connect(struct xbd_xenbus_softc *);
@@ -722,9 +722,10 @@
if (more_to_do)
goto again;
- dk_iodone(di, &sc->sc_dksc);
if (sc->sc_xbdreq_wait)
wakeup(&sc->sc_xbdreq_wait);
+ else
+ xbdstart(&sc->sc_dksc);
return 1;
}
@@ -926,133 +927,155 @@
return dk_dump(di, &sc->sc_dksc, dev, blkno, va, size);
}
-static int
-xbdstart(struct dk_softc *dksc, struct buf *bp)
+static void
+xbdstart(struct dk_softc *dksc)
{
struct xbd_xenbus_softc *sc = (struct xbd_xenbus_softc *)dksc;
+ struct buf *bp;
+#ifdef DIAGNOSTIC
+ struct buf *qbp;
+#endif
struct xbd_req *xbdreq;
blkif_request_t *req;
- int ret = 0, runqueue = 1;
size_t bcount, off;
paddr_t ma;
vaddr_t va;
int nsects, nbytes, seg;
int notify;
- DPRINTF(("xbdstart(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount));
+ while ((bp = bufq_peek(dksc->sc_bufq)) != NULL) {
- if (sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
- bp->b_error = EIO;
- goto err;
- }
+ DPRINTF(("xbdstart(%p): b_bcount = %ld\n",
+ bp, (long)bp->b_bcount));
- if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) {
- /* invalid block number */
- bp->b_error = EINVAL;
- goto err;
- }
+ if (sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
+ bp->b_error = EIO;
+ goto err;
+ }
- if (bp->b_rawblkno == sc->sc_xbdsize) {
- /* at end of disk; return short read */
- bp->b_resid = bp->b_bcount;
- biodone(bp);
- return 0;
- }
+ if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) {
+ /* invalid block number */
+ bp->b_error = EINVAL;
+ goto err;
+ }
- if (__predict_false(sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) {
- /* device is suspended, do not consume buffer */
- DPRINTF(("%s: (xbdstart) device suspended\n",
- device_xname(sc->sc_dksc.sc_dev)));
- ret = -1;
- goto out;
- }
+ if (bp->b_rawblkno == sc->sc_xbdsize) {
+ /* at end of disk; return short read */
+ bp->b_resid = bp->b_bcount;
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
+ biodone(bp);
+ continue;
+ }
- if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) {
- DPRINTF(("xbdstart: ring_full\n"));
- ret = -1;
- goto out;
- }
+ if (__predict_false(
+ sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) {
+ /* device is suspended, do not consume buffer */
+ DPRINTF(("%s: (xbdstart) device suspended\n",
+ device_xname(sc->sc_dksc.sc_dev)));
+ goto out;
+ }
- xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head);
- if (__predict_false(xbdreq == NULL)) {
- DPRINTF(("xbdstart: no req\n"));
- ret = -1; /* dk_start should not remove bp from queue */
- goto out;
- }
+ if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) {
+ DPRINTF(("xbdstart: ring_full\n"));
+ goto out;
+ }
- xbdreq->req_bp = bp;
- xbdreq->req_data = bp->b_data;
- if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
- if (__predict_false(xbd_map_align(xbdreq) != 0)) {
- ret = -1;
+ xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head);
+ if (__predict_false(xbdreq == NULL)) {
+ DPRINTF(("xbdstart: no req\n"));
goto out;
}
- }
- /* now we're sure we'll send this buf */
- disk_busy(&dksc->sc_dkdev);
- SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
- req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
- req->id = xbdreq->req_id;
- req->operation = bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE;
- req->sector_number = bp->b_rawblkno;
- req->handle = sc->sc_handle;
+
+ xbdreq->req_bp = bp;
+ xbdreq->req_data = bp->b_data;
+ if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
+ if (__predict_false(xbd_map_align(xbdreq) != 0)) {
+ DPRINTF(("xbdstart: no align\n"));
+ goto out;
+ }
+ }
+ /* now we're sure we'll send this buf */
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
+ disk_busy(&dksc->sc_dkdev);
+
+ SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
+ req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
+ req->id = xbdreq->req_id;
+ req->operation =
+ bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE;
+ req->sector_number = bp->b_rawblkno;
+ req->handle = sc->sc_handle;
- va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
- off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
- if (bp->b_rawblkno + bp->b_bcount / DEV_BSIZE >= sc->sc_xbdsize) {
- bcount = (sc->sc_xbdsize - bp->b_rawblkno) * DEV_BSIZE;
- bp->b_resid = bp->b_bcount - bcount;
- } else {
- bcount = bp->b_bcount;
- bp->b_resid = 0;
- }
- if (bcount > XBD_MAX_XFER) {
- bp->b_resid += bcount - XBD_MAX_XFER;
- bcount = XBD_MAX_XFER;
- }
- for (seg = 0; bcount > 0;) {
- pmap_extract_ma(pmap_kernel(), va, &ma);
- KASSERT((ma & (XEN_BSIZE - 1)) == 0);
- if (bcount > PAGE_SIZE - off)
- nbytes = PAGE_SIZE - off;
- else
- nbytes = bcount;
- nsects = nbytes >> XEN_BSHIFT;
- req->seg[seg].first_sect = off >> XEN_BSHIFT;
- req->seg[seg].last_sect = (off >> XEN_BSHIFT) + nsects - 1;
- KASSERT(req->seg[seg].first_sect <= req->seg[seg].last_sect);
- KASSERT(req->seg[seg].last_sect < 8);
- if (__predict_false(xengnt_grant_access(
- sc->sc_xbusd->xbusd_otherend_id, ma,
- (bp->b_flags & B_READ) == 0, &xbdreq->req_gntref[seg])))
- panic("xbdstart: xengnt_grant_access"); /* XXX XXX !!! */
- req->seg[seg].gref = xbdreq->req_gntref[seg];
- seg++;
- KASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
- va += PAGE_SIZE;
- off = 0;
- bcount -= nbytes;
- }
- xbdreq->req_nr_segments = req->nr_segments = seg;
- sc->sc_ring.req_prod_pvt++;
- if (bufq_peek(sc->sc_dksc.sc_bufq)) {
- /* we will be called again; don't notify guest yet */
- runqueue = 0;
+ va = (vaddr_t)xbdreq->req_data & ~PAGE_MASK;
+ off = (vaddr_t)xbdreq->req_data & PAGE_MASK;
+ if (bp->b_rawblkno + bp->b_bcount / DEV_BSIZE >=
+ sc->sc_xbdsize) {
+ bcount = (sc->sc_xbdsize - bp->b_rawblkno) * DEV_BSIZE;
+ bp->b_resid = bp->b_bcount - bcount;
+ } else {
+ bcount = bp->b_bcount;
+ bp->b_resid = 0;
+ }
+ if (bcount > XBD_MAX_XFER) {
+ bp->b_resid += bcount - XBD_MAX_XFER;
+ bcount = XBD_MAX_XFER;
+ }
+ for (seg = 0; bcount > 0;) {
+ pmap_extract_ma(pmap_kernel(), va, &ma);
+ KASSERT((ma & (XEN_BSIZE - 1)) == 0);
+ if (bcount > PAGE_SIZE - off)
+ nbytes = PAGE_SIZE - off;
+ else
+ nbytes = bcount;
+ nsects = nbytes >> XEN_BSHIFT;
+ req->seg[seg].first_sect = off >> XEN_BSHIFT;
+ req->seg[seg].last_sect =
+ (off >> XEN_BSHIFT) + nsects - 1;
+ KASSERT(req->seg[seg].first_sect <=
+ req->seg[seg].last_sect);
+ KASSERT(req->seg[seg].last_sect < 8);
+ if (__predict_false(xengnt_grant_access(
+ sc->sc_xbusd->xbusd_otherend_id, ma,
+ (bp->b_flags & B_READ) == 0,
+ &xbdreq->req_gntref[seg])))
+ panic("xbdstart: xengnt_grant_access"); /* XXX XXX !!! */
+ req->seg[seg].gref = xbdreq->req_gntref[seg];
+ seg++;
+ KASSERT(seg <= BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ va += PAGE_SIZE;
+ off = 0;
+ bcount -= nbytes;
+ }
+ xbdreq->req_nr_segments = req->nr_segments = seg;
+ sc->sc_ring.req_prod_pvt++;
}
out:
- if (runqueue) {
- RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_ring, notify);
- if (notify)
- hypervisor_notify_via_evtchn(sc->sc_evtchn);
- }
-
- return ret;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_ring, notify);
+ if (notify)
+ hypervisor_notify_via_evtchn(sc->sc_evtchn);
+ return;
err:
+#ifdef DIAGNOSTIC
+ qbp = bufq_get(dksc->sc_bufq);
+ KASSERT(bp == qbp);
+#else
+ (void)bufq_get(dksc->sc_bufq);
+#endif
bp->b_resid = bp->b_bcount;
biodone(bp);
- return 0;
+ return;
}
static int
diff -r 626d0f71f84e -r c4719fa19966 sys/dev/cgd.c
--- a/sys/dev/cgd.c Sun May 25 19:15:50 2014 +0000
+++ b/sys/dev/cgd.c Sun May 25 19:23:49 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cgd.c,v 1.86 2014/05/25 19:15:50 christos Exp $ */
Home |
Main Index |
Thread Index |
Old Index