Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm factor out swap clustering code.
details: https://anonhg.NetBSD.org/src/rev/2d95b20b6403
branches: trunk
changeset: 588225:2d95b20b6403
user: yamt <yamt%NetBSD.org@localhost>
date: Sun Feb 12 09:19:59 2006 +0000
description:
factor out swap clustering code.
diffstat:
sys/uvm/uvm_pdaemon.c | 662 +++++++++++++++++++++++++++----------------------
1 files changed, 358 insertions(+), 304 deletions(-)
diffs (truncated from 756 to 300 lines):
diff -r 84c77ab13145 -r 2d95b20b6403 sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c Sun Feb 12 09:19:27 2006 +0000
+++ b/sys/uvm/uvm_pdaemon.c Sun Feb 12 09:19:59 2006 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.73 2006/02/12 09:19:59 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.73 2006/02/12 09:19:59 yamt Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@@ -371,6 +371,121 @@
}
}
+#if defined(VMSWAP)
+struct swapcluster {
+ int swc_slot;
+ int swc_nallocated;
+ int swc_nused;
+ struct vm_page *swc_pages[round_page(MAXPHYS) >> PAGE_SHIFT];
+};
+
+static void
+swapcluster_init(struct swapcluster *swc)
+{
+
+ swc->swc_slot = 0;
+}
+
+static int
+swapcluster_allocslots(struct swapcluster *swc)
+{
+ int slot;
+ int npages;
+
+ if (swc->swc_slot != 0) {
+ return 0;
+ }
+
+ /* Even with strange MAXPHYS, the shift
+ implicitly rounds down to a page. */
+ npages = MAXPHYS >> PAGE_SHIFT;
+ slot = uvm_swap_alloc(&npages, TRUE);
+ if (slot == 0) {
+ return ENOMEM;
+ }
+ swc->swc_slot = slot;
+ swc->swc_nallocated = npages;
+ swc->swc_nused = 0;
+
+ return 0;
+}
+
+static int
+swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
+{
+ int slot;
+ struct uvm_object *uobj;
+
+ KASSERT(swc->swc_slot != 0);
+ KASSERT(swc->swc_nused < swc->swc_nallocated);
+ KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
+
+ slot = swc->swc_slot + swc->swc_nused;
+ uobj = pg->uobject;
+ if (uobj == NULL) {
+ LOCK_ASSERT(simple_lock_held(&pg->uanon->an_lock));
+ pg->uanon->an_swslot = slot;
+ } else {
+ int result;
+
+ LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
+ result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
+ if (result == -1) {
+ return ENOMEM;
+ }
+ }
+ swc->swc_pages[swc->swc_nused] = pg;
+ swc->swc_nused++;
+
+ return 0;
+}
+
+static void
+swapcluster_flush(struct swapcluster *swc, boolean_t now)
+{
+ int slot;
+ int nused;
+ int nallocated;
+ int error;
+
+ if (swc->swc_slot == 0) {
+ return;
+ }
+ KASSERT(swc->swc_nused <= swc->swc_nallocated);
+
+ slot = swc->swc_slot;
+ nused = swc->swc_nused;
+ nallocated = swc->swc_nallocated;
+
+ /*
+ * if this is the final pageout we could have a few
+ * unused swap blocks. if so, free them now.
+ */
+
+ if (nused < nallocated) {
+ if (!now) {
+ return;
+ }
+ uvm_swap_free(slot + nused, nallocated - nused);
+ }
+
+ /*
+ * now start the pageout.
+ */
+
+ uvmexp.pdpageouts++;
+ error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
+ KASSERT(error == 0);
+
+ /*
+ * zero swslot to indicate that we are
+ * no longer building a swap-backed cluster.
+ */
+
+ swc->swc_slot = 0;
+}
+#endif /* defined(VMSWAP) */
+
/*
* uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
*
@@ -388,13 +503,9 @@
struct uvm_object *uobj;
struct vm_anon *anon;
#if defined(VMSWAP)
- struct vm_page *swpps[round_page(MAXPHYS) >> PAGE_SHIFT];
- int error;
- int result;
+ struct swapcluster swc;
#endif /* defined(VMSWAP) */
struct simplelock *slock;
- int swnpages, swcpages;
- int swslot;
int dirtyreacts, t;
boolean_t anonunder, fileunder, execunder;
boolean_t anonover, fileover, execover;
@@ -407,8 +518,9 @@
* a swap-cluster to build.
*/
- swslot = 0;
- swnpages = swcpages = 0;
+#if defined(VMSWAP)
+ swapcluster_init(&swc);
+#endif /* defined(VMSWAP) */
dirtyreacts = 0;
/*
@@ -436,342 +548,279 @@
anonreact = TRUE;
#endif /* !defined(VMSWAP) */
- for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
+ for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) {
uobj = NULL;
anon = NULL;
- if (p) {
+
+ /*
+ * see if we've met the free target.
+ */
- /*
- * see if we've met the free target.
- */
+ if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
+ dirtyreacts == UVMPD_NUMDIRTYREACTS) {
+ UVMHIST_LOG(pdhist," met free target: "
+ "exit loop", 0, 0, 0, 0);
+ break;
+ }
+
+ /*
+ * we are below target and have a new page to consider.
+ */
- if (uvmexp.free + uvmexp.paging >=
- uvmexp.freetarg << 2 ||
- dirtyreacts == UVMPD_NUMDIRTYREACTS) {
- UVMHIST_LOG(pdhist," met free target: "
- "exit loop", 0, 0, 0, 0);
+ uvmexp.pdscans++;
+ nextpg = TAILQ_NEXT(p, pageq);
+
+ /*
+ * move referenced pages back to active queue and
+ * skip to next page.
+ */
- if (swslot == 0) {
- /* exit now if no swap-i/o pending */
- break;
- }
+ if (pmap_is_referenced(p)) {
+ uvm_pageactivate(p);
+ uvmexp.pdreact++;
+ continue;
+ }
+ anon = p->uanon;
+ uobj = p->uobject;
- /* set p to null to signal final swap i/o */
- p = NULL;
- nextpg = NULL;
- }
- }
- if (p) { /* if (we have a new page to consider) */
-
- /*
- * we are below target and have a new page to consider.
- */
+ /*
+ * enforce the minimum thresholds on different
+ * types of memory usage. if reusing the current
+ * page would reduce that type of usage below its
+ * minimum, reactivate the page instead and move
+ * on to the next page.
+ */
- uvmexp.pdscans++;
- nextpg = TAILQ_NEXT(p, pageq);
+ if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) {
+ uvm_pageactivate(p);
+ uvmexp.pdreexec++;
+ continue;
+ }
+ if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
+ !UVM_OBJ_IS_VTEXT(uobj) && filereact) {
+ uvm_pageactivate(p);
+ uvmexp.pdrefile++;
+ continue;
+ }
+ if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && anonreact) {
+ uvm_pageactivate(p);
+ uvmexp.pdreanon++;
+ continue;
+ }
- /*
- * move referenced pages back to active queue and
- * skip to next page.
- */
+ /*
+ * first we attempt to lock the object that this page
+ * belongs to. if our attempt fails we skip on to
+ * the next page (no harm done). it is important to
+ * "try" locking the object as we are locking in the
+ * wrong order (pageq -> object) and we don't want to
+ * deadlock.
+ *
+ * the only time we expect to see an ownerless page
+ * (i.e. a page with no uobject and !PQ_ANON) is if an
+ * anon has loaned a page from a uvm_object and the
+ * uvm_object has dropped the ownership. in that
+ * case, the anon can "take over" the loaned page
+ * and make it its own.
+ */
- if (pmap_is_referenced(p)) {
- uvm_pageactivate(p);
- uvmexp.pdreact++;
+ /* does the page belong to an object? */
+ if (uobj != NULL) {
+ slock = &uobj->vmobjlock;
+ if (!simple_lock_try(slock)) {
continue;
}
- anon = p->uanon;
- uobj = p->uobject;
-
- /*
- * enforce the minimum thresholds on different
- * types of memory usage. if reusing the current
- * page would reduce that type of usage below its
- * minimum, reactivate the page instead and move
- * on to the next page.
- */
-
- if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) {
- uvm_pageactivate(p);
- uvmexp.pdreexec++;
Home |
Main Index |
Thread Index |
Old Index