Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Improve comments on uvm_anon.c, tidy up slightly.
details: https://anonhg.NetBSD.org/src/rev/7f1a3b8b3ae8
branches: trunk
changeset: 766146:7f1a3b8b3ae8
user: rmind <rmind%NetBSD.org@localhost>
date: Fri Jun 17 02:12:35 2011 +0000
description:
Improve comments on uvm_anon.c, tidy up slightly.
No functional changes.
diffstat:
sys/uvm/uvm_anon.c | 192 +++++++++++++++++++++-------------------------------
1 files changed, 78 insertions(+), 114 deletions(-)
diffs (truncated from 401 to 300 lines):
diff -r df7d5d00bc04 -r 7f1a3b8b3ae8 sys/uvm/uvm_anon.c
--- a/sys/uvm/uvm_anon.c Thu Jun 16 23:35:35 2011 +0000
+++ b/sys/uvm/uvm_anon.c Fri Jun 17 02:12:35 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_anon.c,v 1.54 2011/06/12 03:36:02 rmind Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.55 2011/06/17 02:12:35 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.54 2011/06/12 03:36:02 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.55 2011/06/17 02:12:35 rmind Exp $");
#include "opt_uvmhist.h"
@@ -43,21 +43,17 @@
#include <uvm/uvm_swap.h>
#include <uvm/uvm_pdpolicy.h>
-static struct pool_cache uvm_anon_cache;
+static struct pool_cache uvm_anon_cache;
-static int uvm_anon_ctor(void *, void *, int);
-static void uvm_anon_dtor(void *, void *);
+static int uvm_anon_ctor(void *, void *, int);
-/*
- * allocate anons
- */
void
uvm_anon_init(void)
{
pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
- uvm_anon_dtor, NULL);
+ NULL, NULL);
}
static int
@@ -69,21 +65,14 @@
anon->an_page = NULL;
#if defined(VMSWAP)
anon->an_swslot = 0;
-#endif /* defined(VMSWAP) */
-
+#endif
return 0;
}
-static void
-uvm_anon_dtor(void *arg, void *object)
-{
-
-}
-
/*
- * allocate an anon
+ * uvm_analloc: allocate a new anon.
*
- * => new anon is returned locked!
+ * => anon will have no lock associated.
*/
struct vm_anon *
uvm_analloc(void)
@@ -96,7 +85,7 @@
KASSERT(anon->an_page == NULL);
#if defined(VMSWAP)
KASSERT(anon->an_swslot == 0);
-#endif /* defined(VMSWAP) */
+#endif
anon->an_ref = 1;
anon->an_lock = NULL;
}
@@ -104,35 +93,27 @@
}
/*
- * uvm_anfree: free a linked list of anon structures
+ * uvm_anfree1: free a single anon.
*
- * => caller must remove anon from its amap before calling (if it was in
- * an amap).
- * => amap must be locked, or anon must not be associated with a lock
- * or any other objects.
- * => we may lock the pageq's.
- * => we may drop and re-acquire amap lock
+ * => anon must be removed from the amap (if anon was in an amap).
+ * => amap must be locked or anon must not be associated.
+ * => amap lock may be dropped and re-acquired here.
*/
static void
uvm_anfree1(struct vm_anon *anon)
{
- struct vm_page *pg;
+ struct vm_page *pg = anon->an_page;
+
UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
KASSERT(anon->an_lock == NULL || mutex_owned(anon->an_lock));
/*
- * get page
- */
-
- pg = anon->an_page;
-
- /*
- * if there is a resident page and it is loaned, then anon may not
- * own it. call out to uvm_anon_lockpage() to ensure the real owner
- * of the page has been identified and locked.
+ * If there is a resident page and it is loaned, then anon may not
+ * own it. Call out to uvm_anon_lockloanpg() to identify and lock
+ * the real owner of the page.
*/
if (pg && pg->loan_count) {
@@ -141,16 +122,16 @@
}
/*
- * if we have a resident page, we must dispose of it before freeing
- * the anon.
+ * Dispose the page, if it is resident.
*/
if (pg) {
KASSERT(anon->an_lock != NULL);
/*
- * if the page is owned by a uobject (now locked), then we must
- * kill the loan on the page rather than free it.
+ * If the page is owned by a UVM object (now locked),
+ * then kill the loan on the page rather than free it,
+ * and release the object lock.
*/
if (pg->uobject) {
@@ -163,15 +144,16 @@
} else {
/*
- * page has no uobject, so we must be the owner of it.
+ * If page has no UVM object, then anon is the owner,
+ * and it is already locked.
*/
KASSERT((pg->flags & PG_RELEASED) == 0);
pmap_page_protect(pg, VM_PROT_NONE);
/*
- * if the page is busy, mark it as PG_RELEASED
- * so that uvm_anon_release will release it later.
+ * If the page is busy, mark it as PG_RELEASED, so
+ * that uvm_anon_release(9) would release it later.
*/
if (pg->flags & PG_BUSY) {
@@ -185,37 +167,29 @@
"freed now!", anon, pg, 0, 0);
}
}
+
#if defined(VMSWAP)
if (pg == NULL && anon->an_swslot > 0) {
- /* this page is no longer only in swap. */
+ /* This page is no longer only in swap. */
mutex_enter(&uvm_swap_data_lock);
KASSERT(uvmexp.swpgonly > 0);
uvmexp.swpgonly--;
mutex_exit(&uvm_swap_data_lock);
}
-#endif /* defined(VMSWAP) */
+#endif
/*
- * free any swap resources.
+ * Free any swap resources, leave a page replacement hint, drop
+ * the reference on lock and finally destroy the anon itself.
*/
uvm_anon_dropswap(anon);
-
- /*
- * give a page replacement hint.
- */
-
uvmpdpol_anfree(anon);
- /*
- * now that we've stripped the data areas from the anon,
- * free the anon itself.
- */
-
KASSERT(anon->an_page == NULL);
#if defined(VMSWAP)
KASSERT(anon->an_swslot == 0);
-#endif /* defined(VMSWAP) */
+#endif
if (anon->an_lock != NULL) {
mutex_obj_free(anon->an_lock);
@@ -224,43 +198,24 @@
UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
}
+/*
+ * uvm_anfree: free a linked list of anon structures.
+ */
void
uvm_anfree(struct vm_anon *anon)
{
struct vm_anon *next;
for (; anon != NULL; anon = next) {
+ /* Note: clearing an_link also clears a reference count. */
next = anon->an_link;
- anon->an_link = NULL; /* also clears reference count */
+ anon->an_link = NULL;
uvm_anfree1(anon);
}
}
-#if defined(VMSWAP)
-
/*
- * uvm_anon_dropswap: release any swap resources from this anon.
- *
- * => anon must be locked or have a reference count of 0.
- */
-void
-uvm_anon_dropswap(struct vm_anon *anon)
-{
- UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
-
- if (anon->an_swslot == 0)
- return;
-
- UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
- anon, anon->an_swslot, 0, 0);
- uvm_swap_free(anon->an_swslot, 1);
- anon->an_swslot = 0;
-}
-
-#endif /* defined(VMSWAP) */
-
-/*
- * uvm_anon_lockloanpg: given a locked anon, lock its resident page
+ * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner.
*
* => anon is locked by caller
* => on return: anon is locked
@@ -331,8 +286,8 @@
}
/*
- * if page is un-owned [i.e. the object dropped its ownership],
- * then we can take over as owner!
+ * If page is un-owned i.e. the object dropped its ownership,
+ * then we have to take the ownership.
*/
if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
@@ -343,13 +298,13 @@
}
break;
}
- return(pg);
+ return pg;
}
#if defined(VMSWAP)
/*
- * fetch an anon's page.
+ * uvm_anon_pagein: fetch an anon's page.
*
* => anon must be locked, and is unlocked upon return.
* => returns true if pagein was aborted due to lack of memory.
@@ -360,67 +315,56 @@
{
struct vm_page *pg;
struct uvm_object *uobj;
- int rv;
- /* locked: anon */
KASSERT(mutex_owned(anon->an_lock));
- rv = uvmfault_anonget(NULL, NULL, anon);
-
/*
- * if rv == 0, anon is still locked, else anon
- * is unlocked
+ * Get the page of the anon.
*/
Home |
Main Index |
Thread Index |
Old Index