Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm - Change uvm_{lock, unlock}_fpageq() to return/take t...
details: https://anonhg.NetBSD.org/src/rev/a7d16a83a071
branches: trunk
changeset: 473196:a7d16a83a071
user: thorpej <thorpej%NetBSD.org@localhost>
date: Mon May 24 19:10:57 1999 +0000
description:
- Change uvm_{lock,unlock}_fpageq() to return/take the previous interrupt
level directly, instead of making the caller wrap the calls in
splimp()/splx().
- Add a comment documenting that interrupts that cause memory allocation
must be blocked while the free page queue is locked.
Since interrupts must be blocked while this lock is asserted, tying them
together like this helps to prevent mistakes.
diffstat:
sys/uvm/uvm_page.c | 18 ++++++------------
sys/uvm/uvm_page.h | 7 ++++---
sys/uvm/uvm_page_i.h | 36 +++++++++++++++++++++++++++++++++++-
sys/uvm/uvm_pdaemon.c | 14 +++++---------
sys/uvm/uvm_pglist.c | 14 +++++---------
5 files changed, 55 insertions(+), 34 deletions(-)
diffs (222 lines):
diff -r f3ed854b19f4 -r a7d16a83a071 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Mon May 24 18:40:48 1999 +0000
+++ b/sys/uvm/uvm_page.c Mon May 24 19:10:57 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.20 1999/05/20 23:03:23 thorpej Exp $ */
+/* $NetBSD: uvm_page.c,v 1.21 1999/05/24 19:10:57 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -845,9 +845,7 @@
panic("uvm_pagealloc: obj and anon != NULL");
#endif
- s = splimp();
-
- uvm_lock_fpageq(); /* lock free page queue */
+ s = uvm_lock_fpageq(); /* lock free page queue */
/*
* check to see if we need to generate some free pages waking
@@ -917,8 +915,7 @@
TAILQ_REMOVE(freeq, pg, pageq);
uvmexp.free--;
- uvm_unlock_fpageq(); /* unlock free page queue */
- splx(s);
+ uvm_unlock_fpageq(s); /* unlock free page queue */
pg->offset = off;
pg->uobject = obj;
@@ -943,8 +940,7 @@
return(pg);
fail:
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
return (NULL);
}
@@ -1086,8 +1082,7 @@
* and put on free queue
*/
- s = splimp();
- uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)],
pg, pageq);
pg->pqflags = PQ_FREE;
@@ -1097,8 +1092,7 @@
pg->uanon = (void *)0xdeadbeef;
#endif
uvmexp.free++;
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
}
#if defined(UVM_PAGE_TRKOWN)
diff -r f3ed854b19f4 -r a7d16a83a071 sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h Mon May 24 18:40:48 1999 +0000
+++ b/sys/uvm/uvm_page.h Mon May 24 19:10:57 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.h,v 1.11 1999/03/25 18:48:53 mrg Exp $ */
+/* $NetBSD: uvm_page.h,v 1.12 1999/05/24 19:10:57 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -79,8 +79,6 @@
#define uvm_lock_pageq() simple_lock(&uvm.pageqlock)
#define uvm_unlock_pageq() simple_unlock(&uvm.pageqlock)
-#define uvm_lock_fpageq() simple_lock(&uvm.fpageqlock)
-#define uvm_unlock_fpageq() simple_unlock(&uvm.fpageqlock)
#define uvm_pagehash(obj,off) \
(((unsigned long)obj+(unsigned long)atop(off)) & uvm.page_hashmask)
@@ -108,6 +106,9 @@
#endif
void uvm_page_rehash __P((void));
+PAGE_INLINE int uvm_lock_fpageq __P((void));
+PAGE_INLINE void uvm_unlock_fpageq __P((int));
+
PAGE_INLINE void uvm_pageactivate __P((struct vm_page *));
vaddr_t uvm_pageboot_alloc __P((vsize_t));
PAGE_INLINE void uvm_pagecopy __P((struct vm_page *, struct vm_page *));
diff -r f3ed854b19f4 -r a7d16a83a071 sys/uvm/uvm_page_i.h
--- a/sys/uvm/uvm_page_i.h Mon May 24 18:40:48 1999 +0000
+++ b/sys/uvm/uvm_page_i.h Mon May 24 19:10:57 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page_i.h,v 1.9 1999/03/25 18:48:55 mrg Exp $ */
+/* $NetBSD: uvm_page_i.h,v 1.10 1999/05/24 19:10:57 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -80,6 +80,40 @@
#if defined(UVM_PAGE_INLINE) || defined(UVM_PAGE)
/*
+ * uvm_lock_fpageq: lock the free page queue
+ *
+ * => free page queue can be accessed in interrupt context, so this
+ * blocks all interrupts that can cause memory allocation, and
+ * returns the previous interrupt level.
+ */
+
+PAGE_INLINE int
+uvm_lock_fpageq()
+{
+ int s;
+
+ s = splimp();
+ simple_lock(&uvm.fpageqlock);
+ return (s);
+}
+
+/*
+ * uvm_unlock_fpageq: unlock the free page queue
+ *
+ * => caller must supply interrupt level returned by uvm_lock_fpageq()
+ * so that it may be restored.
+ */
+
+PAGE_INLINE void
+uvm_unlock_fpageq(s)
+ int s;
+{
+
+ simple_unlock(&uvm.fpageqlock);
+ splx(s);
+}
+
+/*
* uvm_pagelookup: look up a page
*
* => caller should lock object to keep someone from pulling the page
diff -r f3ed854b19f4 -r a7d16a83a071 sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c Mon May 24 18:40:48 1999 +0000
+++ b/sys/uvm/uvm_pdaemon.c Mon May 24 19:10:57 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pdaemon.c,v 1.15 1999/03/30 10:12:01 mycroft Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.16 1999/05/24 19:10:57 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -367,11 +367,9 @@
* update our copy of "free" and see if we've met
* our target
*/
- s = splimp();
- uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
free = uvmexp.free;
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
dirtyreacts == UVMPD_NUMDIRTYREACTS) {
@@ -954,11 +952,9 @@
/*
* get current "free" page count
*/
- s = splimp();
- uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
free = uvmexp.free;
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
#ifndef __SWAP_BROKEN
/*
diff -r f3ed854b19f4 -r a7d16a83a071 sys/uvm/uvm_pglist.c
--- a/sys/uvm/uvm_pglist.c Mon May 24 18:40:48 1999 +0000
+++ b/sys/uvm/uvm_pglist.c Mon May 24 19:10:57 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pglist.c,v 1.6 1998/08/13 02:11:03 eeh Exp $ */
+/* $NetBSD: uvm_pglist.c,v 1.7 1999/05/24 19:10:58 thorpej Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@@ -136,8 +136,7 @@
/*
* Block all memory allocation and lock the free list.
*/
- s = splimp();
- uvm_lock_fpageq(); /* lock free page queue */
+ s = uvm_lock_fpageq(); /* lock free page queue */
/* Are there even any free pages? */
for (idx = 0; idx < VM_NFREELIST; idx++)
@@ -238,8 +237,7 @@
error = 0;
out:
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
/*
* check to see if we need to generate some free pages waking
@@ -271,8 +269,7 @@
/*
* Block all memory allocation and lock the free list.
*/
- s = splimp();
- uvm_lock_fpageq();
+ s = uvm_lock_fpageq();
while ((m = list->tqh_first) != NULL) {
#ifdef DIAGNOSTIC
@@ -287,6 +284,5 @@
STAT_DECR(uvm_pglistalloc_npages);
}
- uvm_unlock_fpageq();
- splx(s);
+ uvm_unlock_fpageq(s);
}
Home |
Main Index |
Thread Index |
Old Index