Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-6]: src/sys/dev/raidframe Pull up following revision(s) (requeste...
details: https://anonhg.NetBSD.org/src/rev/58497c581b5f
branches: netbsd-6
changeset: 774432:58497c581b5f
user: riz <riz%NetBSD.org@localhost>
date: Mon Aug 13 19:41:29 2012 +0000
description:
Pull up following revision(s) (requested by buhrow in ticket #488):
sys/dev/raidframe/rf_netbsdkintf.c: revision 1.298
Implement DIOCGSTRATEGY and DIOCSSTRATEGY to allow raidframe to use
different buffer queue strategies.
Initialize raid sets to use the default buffer queue strategy for the given
architecture, rather than forcing raidframe to use fcfs in all cases.
This should cause raidframe to use the same buffer queue strategy as the
underlying disks.
On I386, I see performance enhancements of between 14 and 16% with raid5
sets with no other change.
See http://mail-index.NetBSD.org/tech-kern/2012/08/08/msg013758.html
for a discussion of this issue.
diffstat:
sys/dev/raidframe/rf_netbsdkintf.c | 50 ++++++++++++++++++++++++++++++++++---
1 files changed, 46 insertions(+), 4 deletions(-)
diffs (99 lines):
diff -r 7200eda101d1 -r 58497c581b5f sys/dev/raidframe/rf_netbsdkintf.c
--- a/sys/dev/raidframe/rf_netbsdkintf.c Mon Aug 13 19:38:10 2012 +0000
+++ b/sys/dev/raidframe/rf_netbsdkintf.c Mon Aug 13 19:41:29 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: rf_netbsdkintf.c,v 1.295.6.1 2012/03/21 16:14:57 riz Exp $ */
+/* $NetBSD: rf_netbsdkintf.c,v 1.295.6.2 2012/08/13 19:41:29 riz Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2008-2011 The NetBSD Foundation, Inc.
@@ -101,7 +101,7 @@
***********************************************************/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.295.6.1 2012/03/21 16:14:57 riz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.295.6.2 2012/08/13 19:41:29 riz Exp $");
#ifdef _KERNEL_OPT
#include "opt_compat_netbsd.h"
@@ -376,7 +376,7 @@
memset(raid_softc, 0, num * sizeof(struct raid_softc));
for (raidID = 0; raidID < num; raidID++) {
- bufq_alloc(&raid_softc[raidID].buf_queue, "fcfs", 0);
+ bufq_alloc(&raid_softc[raidID].buf_queue, BUFQ_DISK_DEFAULT_STRAT, BUFQ_SORT_RAWBLOCK);
RF_Malloc(raidPtrs[raidID], sizeof(RF_Raid_t),
(RF_Raid_t *));
@@ -976,7 +976,7 @@
{
int unit = raidunit(dev);
int error = 0;
- int part, pmask;
+ int part, pmask, s;
cfdata_t cf;
struct raid_softc *rs;
RF_Config_t *k_cfg, *u_cfg;
@@ -1029,6 +1029,7 @@
case DIOCWLABEL:
case DIOCAWEDGE:
case DIOCDWEDGE:
+ case DIOCSSTRATEGY:
if ((flag & FWRITE) == 0)
return (EBADF);
}
@@ -1081,6 +1082,8 @@
case RAIDFRAME_PARITYMAP_GET_DISABLE:
case RAIDFRAME_PARITYMAP_SET_DISABLE:
case RAIDFRAME_PARITYMAP_SET_PARAMS:
+ case DIOCGSTRATEGY:
+ case DIOCSSTRATEGY:
if ((rs->sc_flags & RAIDF_INITED) == 0)
return (ENXIO);
}
@@ -1854,6 +1857,45 @@
(struct dkwedge_list *)data, l);
case DIOCCACHESYNC:
return rf_sync_component_caches(raidPtr);
+
+ case DIOCGSTRATEGY:
+ {
+ struct disk_strategy *dks = (void *)data;
+
+ s = splbio();
+ strlcpy(dks->dks_name, bufq_getstrategyname(rs->buf_queue),
+ sizeof(dks->dks_name));
+ splx(s);
+ dks->dks_paramlen = 0;
+
+ return 0;
+ }
+
+ case DIOCSSTRATEGY:
+ {
+ struct disk_strategy *dks = (void *)data;
+ struct bufq_state *new;
+ struct bufq_state *old;
+
+ if (dks->dks_param != NULL) {
+ return EINVAL;
+ }
+ dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */
+ error = bufq_alloc(&new, dks->dks_name,
+ BUFQ_EXACT|BUFQ_SORT_RAWBLOCK);
+ if (error) {
+ return error;
+ }
+ s = splbio();
+ old = rs->buf_queue;
+ bufq_move(new, old);
+ rs->buf_queue = new;
+ splx(s);
+ bufq_free(old);
+
+ return 0;
+ }
+
default:
retcode = ENOTTY;
}
Home |
Main Index |
Thread Index |
Old Index