Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys add a new bufq strategy, BUFQ_PRIOCSCAN (per-priority CS...
details: https://anonhg.NetBSD.org/src/rev/cad6c3304a7d
branches: trunk
changeset: 557496:cad6c3304a7d
user: yamt <yamt%NetBSD.org@localhost>
date: Sat Jan 10 14:49:44 2004 +0000
description:
add a new bufq strategy, BUFQ_PRIOCSCAN (per-priority CSCAN).
discussed on tech-kern@
diffstat:
sys/kern/subr_disk.c | 265 ++++++++++++++++++++++++++++++++++++++++++++++++++-
sys/sys/buf.h | 3 +-
2 files changed, 265 insertions(+), 3 deletions(-)
diffs (truncated from 310 to 300 lines):
diff -r 4d98161e9483 -r cad6c3304a7d sys/kern/subr_disk.c
--- a/sys/kern/subr_disk.c Sat Jan 10 14:46:24 2004 +0000
+++ b/sys/kern/subr_disk.c Sat Jan 10 14:49:44 2004 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_disk.c,v 1.57 2003/12/06 17:23:22 yamt Exp $ */
+/* $NetBSD: subr_disk.c,v 1.58 2004/01/10 14:49:44 yamt Exp $ */
/*-
* Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
@@ -74,7 +74,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.57 2003/12/06 17:23:22 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.58 2004/01/10 14:49:44 yamt Exp $");
#include "opt_compat_netbsd.h"
@@ -756,6 +756,264 @@
return (bp);
}
+
+/*
+ * Cyclical scan (CSCAN)
+ */
+TAILQ_HEAD(bqhead, buf);
+struct cscan_queue {
+ struct bqhead cq_head[2]; /* actual lists of buffers */
+ int cq_idx; /* current list index */
+ int cq_lastcylinder; /* b_cylinder of the last request */
+ daddr_t cq_lastrawblkno; /* b_rawblkno of the last request */
+};
+
+static int __inline cscan_empty(const struct cscan_queue *);
+static void cscan_put(struct cscan_queue *, struct buf *, int);
+static struct buf *cscan_get(struct cscan_queue *, int);
+static void cscan_init(struct cscan_queue *);
+
+static __inline int
+cscan_empty(const struct cscan_queue *q)
+{
+
+ return TAILQ_EMPTY(&q->cq_head[0]) && TAILQ_EMPTY(&q->cq_head[1]);
+}
+
+static void
+cscan_put(struct cscan_queue *q, struct buf *bp, int sortby)
+{
+ struct buf tmp;
+ struct buf *it;
+ struct bqhead *bqh;
+ int idx;
+
+ tmp.b_cylinder = q->cq_lastcylinder;
+ tmp.b_rawblkno = q->cq_lastrawblkno;
+
+ if (buf_inorder(bp, &tmp, sortby))
+ idx = 1 - q->cq_idx;
+ else
+ idx = q->cq_idx;
+
+ bqh = &q->cq_head[idx];
+
+ TAILQ_FOREACH(it, bqh, b_actq)
+ if (buf_inorder(bp, it, sortby))
+ break;
+
+ if (it != NULL)
+ TAILQ_INSERT_BEFORE(it, bp, b_actq);
+ else
+ TAILQ_INSERT_TAIL(bqh, bp, b_actq);
+}
+
+static struct buf *
+cscan_get(struct cscan_queue *q, int remove)
+{
+ int idx = q->cq_idx;
+ struct bqhead *bqh;
+ struct buf *bp;
+
+ bqh = &q->cq_head[idx];
+ bp = TAILQ_FIRST(bqh);
+
+ if (bp == NULL) {
+ /* switch queue */
+ idx = 1 - idx;
+ bqh = &q->cq_head[idx];
+ bp = TAILQ_FIRST(bqh);
+ }
+
+ KDASSERT((bp != NULL && !cscan_empty(q)) ||
+ (bp == NULL && cscan_empty(q)));
+
+ if (bp != NULL && remove) {
+ q->cq_idx = idx;
+ TAILQ_REMOVE(bqh, bp, b_actq);
+
+ q->cq_lastcylinder = bp->b_cylinder;
+ q->cq_lastrawblkno =
+ bp->b_rawblkno + (bp->b_bcount >> DEV_BSHIFT);
+ }
+
+ return (bp);
+}
+
+static void
+cscan_init(struct cscan_queue *q)
+{
+
+ TAILQ_INIT(&q->cq_head[0]);
+ TAILQ_INIT(&q->cq_head[1]);
+}
+
+
+/*
+ * Per-prioritiy CSCAN.
+ *
+ * XXX probably we should have a way to raise
+ * priority of the on-queue requests.
+ */
+#define PRIOCSCAN_NQUEUE 3
+
+struct priocscan_queue {
+ struct cscan_queue q_queue;
+ int q_burst;
+};
+
+struct bufq_priocscan {
+ struct priocscan_queue bq_queue[PRIOCSCAN_NQUEUE];
+
+#if 0
+ /*
+ * XXX using "global" head position can reduce positioning time
+ * when switching between queues.
+ * although it might affect against fairness.
+ */
+ daddr_t bq_lastrawblkno;
+ int bq_lastcylinder;
+#endif
+};
+
+/*
+ * how many requests to serve when having pending requests on other queues.
+ *
+ * XXX tune
+ */
+const int priocscan_burst[] = {
+ 64, 16, 4
+};
+
+static void bufq_priocscan_put(struct bufq_state *, struct buf *);
+static struct buf *bufq_priocscan_get(struct bufq_state *, int);
+static void bufq_priocscan_init(struct bufq_state *);
+static __inline struct cscan_queue *bufq_priocscan_selectqueue(
+ struct bufq_priocscan *, const struct buf *);
+
+static __inline struct cscan_queue *
+bufq_priocscan_selectqueue(struct bufq_priocscan *q, const struct buf *bp)
+{
+ static const int priocscan_priomap[] = {
+ [BPRIO_TIMENONCRITICAL] = 2,
+ [BPRIO_TIMELIMITED] = 1,
+ [BPRIO_TIMECRITICAL] = 0
+ };
+
+ return &q->bq_queue[priocscan_priomap[BIO_GETPRIO(bp)]].q_queue;
+}
+
+static void
+bufq_priocscan_put(struct bufq_state *bufq, struct buf *bp)
+{
+ struct bufq_priocscan *q = bufq->bq_private;
+ struct cscan_queue *cq;
+ const int sortby = bufq->bq_flags & BUFQ_SORT_MASK;
+
+ cq = bufq_priocscan_selectqueue(q, bp);
+ cscan_put(cq, bp, sortby);
+}
+
+static struct buf *
+bufq_priocscan_get(struct bufq_state *bufq, int remove)
+{
+ struct bufq_priocscan *q = bufq->bq_private;
+ struct priocscan_queue *pq, *npq;
+ struct priocscan_queue *first; /* first non-empty queue */
+ const struct priocscan_queue *epq;
+ const struct cscan_queue *cq;
+ struct buf *bp;
+ boolean_t single; /* true if there's only one non-empty queue */
+
+ pq = &q->bq_queue[0];
+ epq = pq + PRIOCSCAN_NQUEUE;
+ for (; pq < epq; pq++) {
+ cq = &pq->q_queue;
+ if (!cscan_empty(cq))
+ break;
+ }
+ if (pq == epq) {
+ /* there's no requests */
+ return NULL;
+ }
+
+ first = pq;
+ single = TRUE;
+ for (npq = first + 1; npq < epq; npq++) {
+ cq = &npq->q_queue;
+ if (!cscan_empty(cq)) {
+ single = FALSE;
+ if (pq->q_burst > 0)
+ break;
+ pq = npq;
+ }
+ }
+ if (single) {
+ /*
+ * there's only a non-empty queue. just serve it.
+ */
+ pq = first;
+ } else if (pq->q_burst > 0) {
+ /*
+ * XXX account only by number of requests. is it good enough?
+ */
+ pq->q_burst--;
+ } else {
+ /*
+ * no queue was selected due to burst counts
+ */
+ int i;
+#ifdef DEBUG
+ for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
+ pq = &q->bq_queue[i];
+ cq = &pq->q_queue;
+ if (!cscan_empty(cq) && pq->q_burst)
+ panic("%s: inconsist", __func__);
+ }
+#endif /* DEBUG */
+
+ /*
+ * reset burst counts
+ */
+ for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
+ pq = &q->bq_queue[i];
+ pq->q_burst = priocscan_burst[i];
+ }
+
+ /*
+ * serve first non-empty queue.
+ */
+ pq = first;
+ }
+
+ KDASSERT(!cscan_empty(&pq->q_queue));
+ bp = cscan_get(&pq->q_queue, remove);
+ KDASSERT(bp != NULL);
+ KDASSERT(&pq->q_queue == bufq_priocscan_selectqueue(q, bp));
+
+ return bp;
+}
+
+static void
+bufq_priocscan_init(struct bufq_state *bufq)
+{
+ struct bufq_priocscan *q;
+ int i;
+
+ bufq->bq_get = bufq_priocscan_get;
+ bufq->bq_put = bufq_priocscan_put;
+ bufq->bq_private = malloc(sizeof(struct bufq_priocscan),
+ M_DEVBUF, M_ZERO);
+
+ q = bufq->bq_private;
+ for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
+ struct cscan_queue *cq = &q->bq_queue[i].q_queue;
+
+ cscan_init(cq);
+ }
+}
+
+
/*
* Create a device buffer queue.
*/
@@ -806,6 +1064,9 @@
TAILQ_INIT(&prio->bq_read);
TAILQ_INIT(&prio->bq_write);
break;
+ case BUFQ_PRIOCSCAN:
+ bufq_priocscan_init(bufq);
+ break;
default:
panic("bufq_alloc: method out of range");
}
diff -r 4d98161e9483 -r cad6c3304a7d sys/sys/buf.h
--- a/sys/sys/buf.h Sat Jan 10 14:46:24 2004 +0000
+++ b/sys/sys/buf.h Sat Jan 10 14:49:44 2004 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: buf.h,v 1.69 2004/01/10 14:39:50 yamt Exp $ */
+/* $NetBSD: buf.h,v 1.70 2004/01/10 14:49:44 yamt Exp $ */
Home |
Main Index |
Thread Index |
Old Index