Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Clean up, sprinkle asserts, consify, use unsigned, u...
details: https://anonhg.NetBSD.org/src/rev/51ba27021634
branches: trunk
changeset: 766208:51ba27021634
user: rmind <rmind%NetBSD.org@localhost>
date: Sat Jun 18 21:13:29 2011 +0000
description:
Clean up, sprinkle asserts, consify, use unsigned, use kmem_zalloc instead
of memset, reduce the scope of some variables, improve some comments.
No functional change intended.
diffstat:
sys/uvm/uvm_amap.c | 225 +++++++++++++++++++++++++---------------------------
1 files changed, 108 insertions(+), 117 deletions(-)
diffs (truncated from 504 to 300 lines):
diff -r 39a73b5737e1 -r 51ba27021634 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Sat Jun 18 20:51:22 2011 +0000
+++ b/sys/uvm/uvm_amap.c Sat Jun 18 21:13:29 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.94 2011/06/18 20:51:22 rmind Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.95 2011/06/18 21:13:29 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.94 2011/06/18 20:51:22 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.95 2011/06/18 21:13:29 rmind Exp $");
#include "opt_uvmhist.h"
@@ -160,24 +160,22 @@
#endif /* UVM_AMAP_PPREF */
/*
- * amap_alloc1: internal function that allocates an amap, but does not
- * init the overlay.
+ * amap_alloc1: allocate an amap, but do not initialise the overlay.
*
- * => lock is not initialized
+ * => Note: lock is not set.
*/
static inline struct vm_amap *
-amap_alloc1(int slots, int padslots, int waitf)
+amap_alloc1(int slots, int padslots, int flags)
{
+ const bool nowait = (flags & UVM_FLAG_NOWAIT) != 0;
+ const km_flag_t kmflags = nowait ? KM_NOSLEEP : KM_SLEEP;
struct vm_amap *amap;
int totalslots;
- km_flag_t kmflags;
- amap = pool_cache_get(&uvm_amap_cache,
- ((waitf & UVM_FLAG_NOWAIT) != 0) ? PR_NOWAIT : PR_WAITOK);
- if (amap == NULL)
- return(NULL);
-
- kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
+ amap = pool_cache_get(&uvm_amap_cache, nowait ? PR_NOWAIT : PR_WAITOK);
+ if (amap == NULL) {
+ return NULL;
+ }
totalslots = amap_roundup_slots(slots + padslots);
amap->am_lock = NULL;
amap->am_ref = 1;
@@ -206,7 +204,7 @@
if (amap->am_anon == NULL)
goto fail3;
- return(amap);
+ return amap;
fail3:
kmem_free(amap->am_bckptr, totalslots * sizeof(int));
@@ -219,13 +217,13 @@
* XXX hack to tell the pagedaemon how many pages we need,
* since we can need more than it would normally free.
*/
- if ((waitf & UVM_FLAG_NOWAIT) != 0) {
+ if (nowait) {
extern u_int uvm_extrapages;
atomic_add_int(&uvm_extrapages,
((sizeof(int) * 2 + sizeof(struct vm_anon *)) *
totalslots) >> PAGE_SHIFT);
}
- return (NULL);
+ return NULL;
}
/*
@@ -647,7 +645,8 @@
amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
{
struct vm_amap *amap = entry->aref.ar_amap;
- int slots, lcv, slot, stop;
+ u_int slots, lcv, slot, stop;
+ struct vm_anon *anon;
KASSERT(mutex_owned(amap->am_lock));
@@ -655,48 +654,57 @@
stop = entry->aref.ar_pageoff + slots;
if (slots < amap->am_nused) {
- /* cheaper to traverse am_anon */
+ /*
+ * Cheaper to traverse am_anon.
+ */
for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
- if (amap->am_anon[lcv] == NULL)
+ anon = amap->am_anon[lcv];
+ if (anon == NULL) {
continue;
- if (amap->am_anon[lcv]->an_page != NULL)
- pmap_page_protect(amap->am_anon[lcv]->an_page,
- prot);
+ }
+ if (anon->an_page) {
+ pmap_page_protect(anon->an_page, prot);
+ }
}
return;
}
- /* cheaper to traverse am_slots */
+ /*
+ * Cheaper to traverse am_slots.
+ */
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
slot = amap->am_slots[lcv];
- if (slot < entry->aref.ar_pageoff || slot >= stop)
+ if (slot < entry->aref.ar_pageoff || slot >= stop) {
continue;
- if (amap->am_anon[slot]->an_page != NULL)
- pmap_page_protect(amap->am_anon[slot]->an_page, prot);
+ }
+ anon = amap->am_anon[slot];
+ if (anon->an_page) {
+ pmap_page_protect(anon->an_page, prot);
+ }
}
}
/*
* amap_wipeout: wipeout all anon's in an amap; then free the amap!
*
- * => called from amap_unref when the final reference to an amap is
- * discarded (i.e. when reference count drops to 0)
- * => the amap should be locked (by the caller)
+ * => Called from amap_unref(), when reference count drops to zero.
+ * => amap must be locked.
*/
void
amap_wipeout(struct vm_amap *amap)
{
- int lcv, slot;
- struct vm_anon *anon;
+ u_int lcv;
+
UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);
+ KASSERT(mutex_owned(amap->am_lock));
KASSERT(amap->am_ref == 0);
- if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
+ if (__predict_false(amap->am_flags & AMAP_SWAPOFF)) {
/*
- * amap_swap_off will call us again.
+ * Note: amap_swap_off() will call us again.
*/
amap_unlock(amap);
return;
@@ -704,7 +712,8 @@
amap_list_remove(amap);
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
- int refs;
+ struct vm_anon *anon;
+ u_int slot;
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
@@ -713,27 +722,26 @@
KASSERT(anon->an_lock == amap->am_lock);
UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon,
anon->an_ref, 0, 0);
- refs = --anon->an_ref;
- if (refs == 0) {
- /*
- * we had the last reference to a vm_anon. free it.
- */
+ /*
+ * Drop the reference, and free the anon, if it is last.
+ */
+ if (--anon->an_ref == 0) {
uvm_anfree(anon);
}
-
- if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
+ if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
preempt();
+ }
}
/*
- * now we free the map
+ * Finally, destroy the amap.
*/
amap->am_nused = 0;
amap_unlock(amap);
- amap_free(amap); /* will unlock and free amap */
+ amap_free(amap);
UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
}
@@ -947,9 +955,9 @@
amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
{
struct vm_amap *amap = entry->aref.ar_amap;
- int lcv, slot;
struct vm_anon *anon, *nanon;
struct vm_page *pg, *npg;
+ u_int lcv, slot;
/*
* note that if we unlock the amap then we must ReStart the "lcv" for
@@ -960,20 +968,15 @@
ReStart:
amap_lock(amap);
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
-
- /*
- * get the page
- */
-
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
KASSERT(anon->an_lock == amap->am_lock);
/*
- * If the anon has only one ref, we must have already copied it.
- * This can happen if we needed to sleep waiting for memory
- * in a previous run through this loop. The new page might
- * even have been paged out, since the new page is not wired.
+ * If anon has only one reference - we must have already
+ * copied it. This can happen if we needed to sleep waiting
+ * for memory in a previous run through this loop. The new
+ * page might even have been paged out, since is not wired.
*/
if (anon->an_ref == 1) {
@@ -1000,7 +1003,7 @@
KASSERT(pg->uanon == anon && pg->uobject == NULL);
/*
- * if the page is busy then we have to unlock, wait for
+ * If the page is busy, then we have to unlock, wait for
* it and then restart.
*/
@@ -1012,14 +1015,16 @@
}
/*
- * ok, time to do a copy-on-write to a new anon
+ * Perform a copy-on-write.
+ * First - get a new anon and a page.
*/
nanon = uvm_analloc();
if (nanon) {
npg = uvm_pagealloc(NULL, 0, nanon, 0);
- } else
- npg = NULL; /* XXX: quiet gcc warning */
+ } else {
+ npg = NULL;
+ }
if (nanon == NULL || npg == NULL) {
/*
@@ -1037,19 +1042,20 @@
}
/*
- * got it... now we can copy the data and replace anon
- * with our new one...
+ * Copy the data and replace anon with the new one.
+ * Also, setup its lock (share the with amap's lock).
*/
nanon->an_lock = amap->am_lock;
mutex_obj_hold(nanon->an_lock);
- uvm_pagecopy(pg, npg); /* old -> new */
- anon->an_ref--; /* can't drop to zero */
- amap->am_anon[slot] = nanon; /* replace */
+ uvm_pagecopy(pg, npg);
+ anon->an_ref--;
+ KASSERT(anon->an_ref > 0);
+ amap->am_anon[slot] = nanon;
/*
- * drop PG_BUSY on new page ... since we have had its owner
- * locked the whole time it can't be PG_RELEASED or PG_WANTED.
+ * Drop PG_BUSY on new page. Since its owner was locked all
+ * this time - it cannot be PG_RELEASED or PG_WANTED.
*/
mutex_enter(&uvm_pageqlock);
Home |
Main Index |
Thread Index |
Old Index