Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Improve the efficiency of searching for a contiguous...
details: https://anonhg.NetBSD.org/src/rev/01c63e5d2587
branches: trunk
changeset: 761018:01c63e5d2587
user: matt <matt%NetBSD.org@localhost>
date: Tue Jan 18 21:43:29 2011 +0000
description:
Improve the efficiency of searching for a contiguous set of free pages.
diffstat:
sys/uvm/uvm_page.h | 6 +-
sys/uvm/uvm_pglist.c | 150 ++++++++++++++++++++++++++++++++++++++++----------
2 files changed, 123 insertions(+), 33 deletions(-)
diffs (298 lines):
diff -r ec751c46f209 -r 01c63e5d2587 sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h Tue Jan 18 21:34:31 2011 +0000
+++ b/sys/uvm/uvm_page.h Tue Jan 18 21:43:29 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.h,v 1.69 2010/11/26 00:45:27 uebayasi Exp $ */
+/* $NetBSD: uvm_page.h,v 1.70 2011/01/18 21:43:29 matt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -235,9 +235,11 @@
paddr_t end; /* (PF# of last page in segment) + 1 */
paddr_t avail_start; /* PF# of first free page in segment */
paddr_t avail_end; /* (PF# of last free page in segment) +1 */
- int free_list; /* which free list they belong on */
struct vm_page *pgs; /* vm_page structures (from start) */
struct vm_page *lastpg; /* vm_page structure for end */
+ int free_list; /* which free list they belong on */
+ u_int start_hint; /* start looking for free pages here */
+ /* protected by uvm_fpageqlock */
#ifdef __HAVE_PMAP_PHYSSEG
struct pmap_physseg pmseg; /* pmap specific (MD) data */
#endif
diff -r ec751c46f209 -r 01c63e5d2587 sys/uvm/uvm_pglist.c
--- a/sys/uvm/uvm_pglist.c Tue Jan 18 21:34:31 2011 +0000
+++ b/sys/uvm/uvm_pglist.c Tue Jan 18 21:43:29 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pglist.c,v 1.51 2010/11/25 04:45:30 uebayasi Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.52 2011/01/18 21:43:29 matt Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.51 2010/11/25 04:45:30 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.52 2011/01/18 21:43:29 matt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -82,9 +82,6 @@
uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
{
int free_list, color, pgflidx;
-#ifdef NOT_DEBUG
- struct vm_page *tp;
-#endif
KASSERT(mutex_owned(&uvm_fpageqlock));
@@ -96,10 +93,10 @@
color = VM_PGCOLOR_BUCKET(pg);
pgflidx = (pg->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN;
#ifdef NOT_DEBUG
- for (tp = LIST_FIRST(&uvm.page_free[
- free_list].pgfl_buckets[color].pgfl_queues[pgflidx]);
- tp != NULL;
- tp = LIST_NEXT(tp, pageq.list)) {
+ struct vm_page *tp;
+ LIST_FOREACH(tp,
+ &uvm.page_free[free_list].pgfl_buckets[color].pgfl_queues[pgflidx],
+ pageq.list) {
if (tp == pg)
break;
}
@@ -124,9 +121,10 @@
uvm_pglistalloc_c_ps(struct vm_physseg *ps, int num, paddr_t low, paddr_t high,
paddr_t alignment, paddr_t boundary, struct pglist *rlist)
{
- int try, limit, tryidx, end, idx;
+ signed int try, limit, tryidx, end, idx, skip;
struct vm_page *pgs;
int pagemask;
+ bool second_pass;
#ifdef DEBUG
paddr_t idxpa, lastidxpa;
int cidx = 0; /* XXX: GCC */
@@ -138,16 +136,42 @@
KASSERT(mutex_owned(&uvm_fpageqlock));
- try = roundup(max(atop(low), ps->avail_start), atop(alignment));
- limit = min(atop(high), ps->avail_end);
+ low = atop(low);
+ high = atop(high);
+ alignment = atop(alignment);
+
+ /*
+ * We start our search at the just after where the last allocation
+ * succeeded.
+ */
+ try = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
+ limit = min(high, ps->avail_end);
pagemask = ~((boundary >> PAGE_SHIFT) - 1);
+ skip = 0;
+ second_pass = false;
+ pgs = ps->pgs;
for (;;) {
+ bool ok = true;
+ signed int cnt;
+
if (try + num > limit) {
+ if (ps->start_hint == 0 || second_pass) {
+ /*
+ * We've run past the allowable range.
+ */
+ return 0; /* FAIL = 0 pages*/
+ }
/*
- * We've run past the allowable range.
+ * We've wrapped around the end of this segment
+ * so restart at the beginning but now our limit
+ * is were we started.
*/
- return (0); /* FAIL */
+ second_pass = true;
+ try = roundup2(max(low, ps->avail_start), alignment);
+ limit = min(high, ps->avail_start + ps->start_hint);
+ skip = 0;
+ continue;
}
if (boundary != 0 &&
((try ^ (try + num - 1)) & pagemask) != 0) {
@@ -156,7 +180,8 @@
* just crossed and ensure alignment.
*/
try = (try + num - 1) & pagemask;
- try = roundup(try, atop(alignment));
+ try = roundup2(try, alignment);
+ skip = 0;
continue;
}
#ifdef DEBUG
@@ -175,18 +200,31 @@
#endif
tryidx = try - ps->start;
end = tryidx + num;
- pgs = ps->pgs;
/*
* Found a suitable starting page. See if the range is free.
*/
- for (idx = tryidx; idx < end; idx++) {
- if (VM_PAGE_IS_FREE(&pgs[idx]) == 0)
+#ifdef PGALLOC_VERBOSE
+ printf("%s: ps=%p try=%#x end=%#x skip=%#x, align=%#x",
+ __func__, ps, tryidx, end, skip, alignment);
+#endif
+ /*
+ * We start at the end and work backwards since if we find a
+ * non-free page, it makes no sense to continue.
+ *
+ * But on the plus size we have "vetted" some number of free
+ * pages. If this iteration fails, we may be able to skip
+ * testing most of those pages again in the next pass.
+ */
+ for (idx = end - 1; idx >= tryidx + skip; idx--) {
+ if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) {
+ ok = false;
break;
+ }
#ifdef DEBUG
- idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
if (idx > tryidx) {
+ idxpa = VM_PAGE_TO_PHYS(&pgs[idx]);
lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]);
if ((lastidxpa + PAGE_SIZE) != idxpa) {
/*
@@ -205,23 +243,53 @@
}
#endif
}
- if (idx == end)
+
+ if (ok) {
+ while (skip-- > 0) {
+ KDASSERT(VM_PAGE_IS_FREE(&pgs[tryidx + skip]));
+ }
+#ifdef PGALLOC_VERBOSE
+ printf(": ok\n");
+#endif
break;
+ }
- try += atop(alignment);
+#ifdef PGALLOC_VERBOSE
+ printf(": non-free at %#x\n", idx - tryidx);
+#endif
+ /*
+ * count the number of pages we can advance
+ * since we know they aren't all free.
+ */
+ cnt = idx + 1 - tryidx;
+ /*
+ * now round up that to the needed alignment.
+ */
+ cnt = roundup2(cnt, alignment);
+ /*
+ * The number of pages we can skip checking
+ * (might be 0 if cnt > num).
+ */
+ skip = max(num - cnt, 0);
+ try += cnt;
}
/*
* we have a chunk of memory that conforms to the requested constraints.
*/
- idx = tryidx;
- while (idx < end)
- uvm_pglist_add(&pgs[idx++], rlist);
+ for (idx = tryidx, pgs += idx; idx < end; idx++, pgs++)
+ uvm_pglist_add(pgs, rlist);
+
+ /*
+ * the next time we need to search this segment, start after this
+ * chunk of pages we just allocated.
+ */
+ ps->start_hint = tryidx + num;
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num);
#endif
- return (num); /* number of pages allocated */
+ return num; /* number of pages allocated */
}
static int
@@ -287,6 +355,7 @@
{
int todo, limit, try;
struct vm_page *pg;
+ bool second_pass;
#ifdef DEBUG
int cidx = 0; /* XXX: GCC */
#endif
@@ -297,26 +366,45 @@
KASSERT(mutex_owned(&uvm_fpageqlock));
+ low = atop(low);
+ high = atop(high);
todo = num;
- limit = min(atop(high), ps->avail_end);
+ try = max(low, ps->avail_start + ps->start_hint);
+ limit = min(high, ps->avail_end);
+ pg = &ps->pgs[try - ps->start];
+ second_pass = false;
- for (try = max(atop(low), ps->avail_start);
- try < limit; try ++) {
+ for (;; try++, pg++) {
+ if (try >= limit) {
+ if (ps->start_hint == 0 || second_pass)
+ break;
+ second_pass = true;
+ try = max(low, ps->avail_start);
+ limit = min(high, ps->avail_start + ps->start_hint);
+ pg = &ps->pgs[try - ps->start];
+ continue;
+ }
#ifdef DEBUG
if (vm_physseg_find(try, &cidx) != ps - vm_physmem)
panic("pgalloc simple: botch1");
if (cidx != (try - ps->start))
panic("pgalloc simple: botch2");
#endif
- pg = &ps->pgs[try - ps->start];
if (VM_PAGE_IS_FREE(pg) == 0)
continue;
uvm_pglist_add(pg, rlist);
- if (--todo == 0)
+ if (--todo == 0) {
break;
+ }
}
+ /*
+ * The next time we need to search this segment,
+ * start just after the pages we just allocated.
+ */
+ ps->start_hint = try + 1 - ps->start;
+
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num - todo);
#endif
@@ -411,7 +499,7 @@
if (boundary != 0 && boundary < size)
return (EINVAL);
num = atop(round_page(size));
- low = roundup(low, alignment);
+ low = roundup2(low, alignment);
TAILQ_INIT(rlist);
Home |
Main Index |
Thread Index |
Old Index