Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-pagecache]: src/sys - uvm_page_array_fill: add some more parameters
details: https://anonhg.NetBSD.org/src/rev/af673fe2d414
branches: yamt-pagecache
changeset: 770839:af673fe2d414
user: yamt <yamt%NetBSD.org@localhost>
date: Sat Nov 26 15:19:06 2011 +0000
description:
- uvm_page_array_fill: add some more parameters
- uvn_findpages: use gang-lookup
- genfs_putpages: re-enable backward clustering
- mechanical changes after the recent radixtree.h api changes
diffstat:
sys/miscfs/genfs/genfs_io.c | 14 +++++++----
sys/uvm/uvm_aobj.c | 8 +++---
sys/uvm/uvm_object.c | 6 ++--
sys/uvm/uvm_page_array.c | 39 ++++++++++++++++++++-----------
sys/uvm/uvm_page_array.h | 13 ++++++++--
sys/uvm/uvm_vnode.c | 54 +++++++++++++++++++++++++++++++++++---------
6 files changed, 94 insertions(+), 40 deletions(-)
diffs (truncated from 400 to 300 lines):
diff -r cb2cd234c3b5 -r af673fe2d414 sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/miscfs/genfs/genfs_io.c Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: genfs_io.c,v 1.53.2.3 2011/11/20 10:49:20 yamt Exp $ */
+/* $NetBSD: genfs_io.c,v 1.53.2.4 2011/11/26 15:19:06 yamt Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.3 2011/11/20 10:49:20 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.4 2011/11/26 15:19:06 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -967,7 +967,8 @@
for (;;) {
bool protected;
- pg = uvm_page_array_fill_and_peek(&a, uobj, off, dirtyonly);
+ pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0,
+ dirtyonly ? UVM_PAGE_ARRAY_FILL_DIRTYONLY : 0);
if (pg == NULL) {
break;
}
@@ -1093,7 +1094,7 @@
pg->flags |= PG_BUSY;
UVM_PAGE_OWN(pg, "genfs_putpages");
-#if 0 /* XXX notyet */
+#if 1 /* XXX notyet */
/*
* first look backward.
*/
@@ -1139,7 +1140,10 @@
*/
nextpg = uvm_page_array_fill_and_peek(&a, uobj,
- pgs[npages - 1]->offset + PAGE_SIZE, true);
+ pgs[npages - 1]->offset + PAGE_SIZE,
+ maxpages - npages,
+ UVM_PAGE_ARRAY_FILL_DIRTYONLY |
+ UVM_PAGE_ARRAY_FILL_DENSE);
if (nextpg == NULL) {
break;
}
diff -r cb2cd234c3b5 -r af673fe2d414 sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/uvm/uvm_aobj.c Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.116.2.6 2011/11/26 15:19:06 yamt Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.5 2011/11/18 00:57:33 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.116.2.6 2011/11/26 15:19:06 yamt Exp $");
#include "opt_uvmhist.h"
@@ -663,7 +663,7 @@
uvm_page_array_init(&a);
mutex_enter(&uvm_pageqlock);
- while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, false))
+ while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, 0, 0))
!= NULL) {
uvm_page_array_advance(&a);
pmap_page_protect(pg, VM_PROT_NONE);
@@ -756,7 +756,7 @@
/* locked: uobj */
uvm_page_array_init(&a);
curoff = start;
- while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, false)) !=
+ while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, 0, 0)) !=
NULL) {
if (pg->offset >= stop) {
break;
diff -r cb2cd234c3b5 -r af673fe2d414 sys/uvm/uvm_object.c
--- a/sys/uvm/uvm_object.c Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/uvm/uvm_object.c Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $ */
+/* $NetBSD: uvm_object.c,v 1.11.2.3 2011/11/26 15:19:06 yamt Exp $ */
/*
* Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.2 2011/11/06 22:05:00 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.11.2.3 2011/11/26 15:19:06 yamt Exp $");
#include "opt_ddb.h"
@@ -256,7 +256,7 @@
(*pr)(" PAGES <pg,offset>:\n ");
uvm_page_array_init(&a);
off = 0;
- while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, false))
+ while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0, 0))
!= NULL) {
cnt++;
(*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
diff -r cb2cd234c3b5 -r af673fe2d414 sys/uvm/uvm_page_array.c
--- a/sys/uvm/uvm_page_array.c Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/uvm/uvm_page_array.c Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page_array.c,v 1.1.2.2 2011/11/06 22:04:07 yamt Exp $ */
+/* $NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.2 2011/11/06 22:04:07 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page_array.c,v 1.1.2.3 2011/11/26 15:19:06 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -115,6 +115,9 @@
* return 0 on success. in that case, cache the result in the array
* so that they will be picked by later uvm_page_array_peek.
*
+ * nwant is a number of pages to fetch. a caller should consider it a hint.
+ * nwant == 0 means a caller have no specific idea.
+ *
* return ENOENT if no pages are found.
*
* called with object lock held.
@@ -122,25 +125,33 @@
int
uvm_page_array_fill(struct uvm_page_array *ar, struct uvm_object *uobj,
- voff_t off, bool dirtyonly)
+ voff_t off, unsigned int nwant, unsigned int flags)
{
unsigned int npages;
#if defined(DEBUG)
unsigned int i;
#endif /* defined(DEBUG) */
- const unsigned int maxpages = __arraycount(ar->ar_pages);
+ unsigned int maxpages = __arraycount(ar->ar_pages);
+ const bool dense = (flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0;
+ const bool backward = (flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0;
+ if (nwant != 0 && nwant < maxpages) {
+ maxpages = nwant;
+ }
KASSERT(mutex_owned(uobj->vmobjlock));
KASSERT(uvm_page_array_peek(ar) == NULL);
- if (dirtyonly) {
- npages = radix_tree_gang_lookup_tagged_node(
- &uobj->uo_pages, off >> PAGE_SHIFT,
- (void **)ar->ar_pages, maxpages,
- UVM_PAGE_DIRTY_TAG);
+ if ((flags & UVM_PAGE_ARRAY_FILL_DIRTYONLY) != 0) {
+ npages =
+ (backward ? radix_tree_gang_lookup_tagged_node_reverse :
+ radix_tree_gang_lookup_tagged_node)(
+ &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
+ maxpages, dense, UVM_PAGE_DIRTY_TAG);
} else {
- npages = radix_tree_gang_lookup_node(
- &uobj->uo_pages, off >> PAGE_SHIFT,
- (void **)ar->ar_pages, maxpages);
+ npages =
+ (backward ? radix_tree_gang_lookup_node_reverse :
+ radix_tree_gang_lookup_node)(
+ &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
+ maxpages, dense);
}
if (npages == 0) {
uvm_page_array_clear(ar);
@@ -170,7 +181,7 @@
struct vm_page *
uvm_page_array_fill_and_peek(struct uvm_page_array *a, struct uvm_object *uobj,
- voff_t off, bool dirtyonly)
+ voff_t off, unsigned int nwant, unsigned int flags)
{
struct vm_page *pg;
int error;
@@ -179,7 +190,7 @@
if (pg != NULL) {
return pg;
}
- error = uvm_page_array_fill(a, uobj, off, dirtyonly);
+ error = uvm_page_array_fill(a, uobj, off, nwant, flags);
if (error != 0) {
return NULL;
}
diff -r cb2cd234c3b5 -r af673fe2d414 sys/uvm/uvm_page_array.h
--- a/sys/uvm/uvm_page_array.h Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/uvm/uvm_page_array.h Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page_array.h,v 1.1.2.3 2011/11/14 14:22:28 yamt Exp $ */
+/* $NetBSD: uvm_page_array.h,v 1.1.2.4 2011/11/26 15:19:06 yamt Exp $ */
/*-
* Copyright (c)2011 YAMAMOTO Takashi,
@@ -64,8 +64,15 @@
struct vm_page *uvm_page_array_peek(struct uvm_page_array *);
void uvm_page_array_advance(struct uvm_page_array *);
int uvm_page_array_fill(struct uvm_page_array *, struct uvm_object *,
- voff_t, bool);
+ voff_t, unsigned int, unsigned int);
struct vm_page *uvm_page_array_fill_and_peek(struct uvm_page_array *,
- struct uvm_object *, voff_t, bool);
+ struct uvm_object *, voff_t, unsigned int, unsigned int);
+
+/*
+ * flags for uvm_page_array_fill and uvm_page_array_fill_and_peek
+ */
+#define UVM_PAGE_ARRAY_FILL_DIRTYONLY 1 /* skip known-clean pages */
+#define UVM_PAGE_ARRAY_FILL_DENSE 2 /* stop on a hole */
+#define UVM_PAGE_ARRAY_FILL_BACKWARD 4 /* descend order */
#endif /* defined(_UVM_UVM_ARRAY_H_) */
diff -r cb2cd234c3b5 -r af673fe2d414 sys/uvm/uvm_vnode.c
--- a/sys/uvm/uvm_vnode.c Fri Nov 25 13:58:11 2011 +0000
+++ b/sys/uvm/uvm_vnode.c Sat Nov 26 15:19:06 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $ */
+/* $NetBSD: uvm_vnode.c,v 1.97.2.2 2011/11/26 15:19:06 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.1 2011/11/02 21:54:01 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.2 2011/11/26 15:19:06 yamt Exp $");
#include "opt_uvmhist.h"
@@ -64,6 +64,7 @@
#include <uvm/uvm.h>
#include <uvm/uvm_readahead.h>
+#include <uvm/uvm_page_array.h>
/*
* functions
@@ -76,7 +77,7 @@
static void uvn_reference(struct uvm_object *);
static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
- int);
+ int, struct uvm_page_array *a, unsigned int);
/*
* master pager structure
@@ -196,12 +197,15 @@
struct vm_page **pgs, int flags)
{
int i, count, found, npages, rv;
+ struct uvm_page_array a;
+ uvm_page_array_init(&a);
count = found = 0;
npages = *npagesp;
if (flags & UFP_BACKWARD) {
for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
- rv = uvn_findpage(uobj, offset, &pgs[i], flags);
+ rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a,
+ npages - i);
if (rv == 0) {
if (flags & UFP_DIRTYONLY)
break;
@@ -211,7 +215,8 @@
}
} else {
for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
- rv = uvn_findpage(uobj, offset, &pgs[i], flags);
+ rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a,
+ npages - i);
if (rv == 0) {
if (flags & UFP_DIRTYONLY)
break;
@@ -220,16 +225,21 @@
count++;
}
}
+ uvm_page_array_fini(&a);
*npagesp = count;
return (found);
}
Home |
Main Index |
Thread Index |
Old Index