Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Several changes, developed and tested concurrently:
details: https://anonhg.NetBSD.org/src/rev/31cbe9d5a22c
branches: trunk
changeset: 473697:31cbe9d5a22c
user: thorpej <thorpej%NetBSD.org@localhost>
date: Tue Jun 15 23:27:47 1999 +0000
description:
Several changes, developed and tested concurrently:
* Provide POSIX 1003.1b mlockall(2) and munlockall(2) system calls.
MCL_CURRENT is presently implemented. MCL_FUTURE is not fully
implemented. Also, the same one-unlock-for-every-lock caveat
currently applies here as it does to mlock(2). This will be
addressed in a future commit.
* Provide the mincore(2) system call, with the same semantics as
Solaris.
* Clean up the error recovery in uvm_map_pageable().
* Fix a bug where a process would hang if attempting to mlock a
zero-fill region where none of the pages in that region are resident.
[ This fix has been submitted for inclusion in 1.4.1 ]
diffstat:
sys/sys/mman.h | 11 +-
sys/uvm/uvm_extern.h | 3 +-
sys/uvm/uvm_map.c | 266 ++++++++++++++++++++++++++++++++++++++++++++++----
sys/uvm/uvm_mmap.c | 200 +++++++++++++++++++++++++++++++++++++-
sys/vm/vm_map.h | 16 ++-
5 files changed, 462 insertions(+), 34 deletions(-)
diffs (truncated from 664 to 300 lines):
diff -r f11943dd80f2 -r 31cbe9d5a22c sys/sys/mman.h
--- a/sys/sys/mman.h Tue Jun 15 22:34:45 1999 +0000
+++ b/sys/sys/mman.h Tue Jun 15 23:27:47 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: mman.h,v 1.21 1999/04/27 20:13:06 cgd Exp $ */
+/* $NetBSD: mman.h,v 1.22 1999/06/15 23:27:48 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986, 1993
@@ -91,6 +91,12 @@
#define MS_INVALIDATE 0x02 /* invalidate cached data */
#define MS_SYNC 0x04 /* perform synchronous writes */
+/*
+ * Flags to mlockall
+ */
+#define MCL_CURRENT 0x01 /* lock all pages currently mapped */
+#define MCL_FUTURE 0x02 /* lock all pages mapped in the future */
+
#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)
/*
* Advice to madvise
@@ -119,8 +125,11 @@
#endif
int mlock __P((const void *, size_t));
int munlock __P((const void *, size_t));
+int mlockall __P((int));
+int munlockall __P((void));
#if !defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)
int madvise __P((void *, size_t, int));
+int mincore __P((void *, size_t, char *));
int minherit __P((void *, size_t, int));
#endif
__END_DECLS
diff -r f11943dd80f2 -r 31cbe9d5a22c sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Tue Jun 15 22:34:45 1999 +0000
+++ b/sys/uvm/uvm_extern.h Tue Jun 15 23:27:47 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.27 1999/05/26 19:16:36 thorpej Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.28 1999/06/15 23:27:47 thorpej Exp $ */
/*
*
@@ -319,6 +319,7 @@
struct uvm_object *, vaddr_t, uvm_flag_t));
int uvm_map_pageable __P((vm_map_t, vaddr_t,
vaddr_t, boolean_t));
+int uvm_map_pageable_all __P((vm_map_t, int, vsize_t));
boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t,
vaddr_t, vm_prot_t));
int uvm_map_protect __P((vm_map_t, vaddr_t,
diff -r f11943dd80f2 -r 31cbe9d5a22c sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Tue Jun 15 22:34:45 1999 +0000
+++ b/sys/uvm/uvm_map.c Tue Jun 15 23:27:47 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.53 1999/06/07 16:31:42 thorpej Exp $ */
+/* $NetBSD: uvm_map.c,v 1.54 1999/06/15 23:27:47 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -1990,8 +1990,7 @@
vaddr_t start, end;
boolean_t new_pageable;
{
- vm_map_entry_t entry, start_entry;
- vaddr_t failed = 0;
+ vm_map_entry_t entry, start_entry, failed_entry;
int rv;
UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
@@ -2025,7 +2024,7 @@
* handle wiring and unwiring seperately.
*/
- if (new_pageable) { /* unwire */
+ if (new_pageable) { /* unwire */
UVM_MAP_CLIP_START(map, entry, start);
@@ -2060,11 +2059,9 @@
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
UVM_MAP_CLIP_END(map, entry, end);
-
entry->wired_count--;
if (entry->wired_count == 0)
uvm_map_entry_unwire(map, entry);
-
entry = entry->next;
}
vm_map_unlock(map);
@@ -2100,7 +2097,7 @@
while ((entry != &map->header) && (entry->start < end)) {
if (entry->wired_count == 0) { /* not already wired? */
-
+
/*
* perform actions of vm_map_lookup that need the
* write lock on the map: create an anonymous map
@@ -2108,22 +2105,17 @@
* for a zero-fill region. (XXXCDC: submap case
* ok?)
*/
-
+
if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
- /*
- * XXXCDC: protection vs. max_protection??
- * (wirefault uses max?)
- * XXXCDC: used to do it always if
- * uvm_obj == NULL (wrong?)
- */
- if ( UVM_ET_ISNEEDSCOPY(entry) &&
- (entry->protection & VM_PROT_WRITE) != 0) {
+ if (UVM_ET_ISNEEDSCOPY(entry) &&
+ ((entry->protection & VM_PROT_WRITE) ||
+ (entry->object.uvm_obj == NULL))) {
amap_copy(map, entry, M_WAITOK, TRUE,
start, end);
/* XXXCDC: wait OK? */
}
}
- } /* wired_count == 0 */
+ } /* wired_count == 0 */
UVM_MAP_CLIP_START(map, entry, start);
UVM_MAP_CLIP_END(map, entry, end);
entry->wired_count++;
@@ -2131,8 +2123,10 @@
/*
* Check for holes
*/
- if (entry->end < end && (entry->next == &map->header ||
- entry->next->start > entry->end)) {
+ if (entry->protection == VM_PROT_NONE ||
+ (entry->end < end &&
+ (entry->next == &map->header ||
+ entry->next->start > entry->end))) {
/*
* found one. amap creation actions do not need to
* be undone, but the wired counts need to be restored.
@@ -2182,16 +2176,24 @@
* first drop the wiring count on all the entries
* which haven't actually been wired yet.
*/
- failed = entry->start;
- while (entry != &map->header && entry->start < end)
+ failed_entry = entry;
+ while (entry != &map->header && entry->start < end) {
entry->wired_count--;
+ entry = entry->next;
+ }
/*
- * now, unlock the map, and unwire all the pages that
- * were successfully wired above.
+ * now, unwire all the entries that were successfully
+ * wired above.
*/
+ entry = start_entry;
+ while (entry != failed_entry) {
+ entry->wired_count--;
+ if (entry->wired_count == 0)
+ uvm_map_entry_unwire(map, entry);
+ entry = entry->next;
+ }
vm_map_unlock(map);
- (void) uvm_map_pageable(map, start, failed, TRUE);
UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
return(rv);
}
@@ -2204,6 +2206,214 @@
}
/*
+ * uvm_map_pageable_all: special case of uvm_map_pageable - affects
+ * all mapped regions.
+ *
+ * => map must not be locked.
+ * => if no flags are specified, all regions are unwired.
+ * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
+ */
+
+int
+uvm_map_pageable_all(map, flags, limit)
+ vm_map_t map;
+ int flags;
+ vsize_t limit;
+{
+ vm_map_entry_t entry, failed_entry;
+ vsize_t size;
+ int rv;
+ UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
+ UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
+
+#ifdef DIAGNOSTIC
+ if ((map->flags & VM_MAP_PAGEABLE) == 0)
+ panic("uvm_map_pageable_all: map %p not pageable", map);
+#endif
+
+ vm_map_lock(map);
+
+ /*
+ * handle wiring and unwiring separately.
+ */
+
+ if (flags == 0) { /* unwire */
+ /*
+ * Decrement the wiring count on the entries. If they
+ * reach zero, unwire them.
+ *
+ * Note, uvm_fault_unwire() (called via uvm_map_entry_unwire())
+ * does not lock the map, so we don't have to do anything
+ * special regarding locking here.
+ */
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->wired_count) {
+ if (--entry->wired_count == 0)
+ uvm_map_entry_unwire(map, entry);
+ }
+ }
+ map->flags &= ~VM_MAP_WIREFUTURE;
+ vm_map_unlock(map);
+ UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
+ return (KERN_SUCCESS);
+
+ /*
+ * end of unwire case!
+ */
+ }
+
+ if (flags & MCL_FUTURE) {
+ /*
+ * must wire all future mappings; remember this.
+ */
+ map->flags |= VM_MAP_WIREFUTURE;
+ }
+
+ if ((flags & MCL_CURRENT) == 0) {
+ /*
+ * no more work to do!
+ */
+ UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
+ vm_map_unlock(map);
+ return (KERN_SUCCESS);
+ }
+
+ /*
+ * wire case: in three passes [XXXCDC: ugly block of code here]
+ *
+ * 1: holding the write lock, count all pages mapped by non-wired
+ * entries. if this would cause us to go over our limit, we fail.
+ *
+ * 2: still holding the write lock, we create any anonymous maps that
+ * need to be created. then we increment its wiring count.
+ *
+ * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
+ * in the pages for any newly wired area (wired count is 1).
+ *
+ * downgrading to a read lock for uvm_fault_wire avoids a possible
+ * deadlock with another thread that may have faulted on one of
+ * the pages to be wired (it would mark the page busy, blocking
+ * us, then in turn block on the map lock that we hold). because
+ * of problems in the recursive lock package, we cannot upgrade
+ * to a write lock in vm_map_lookup. thus, any actions that
+ * require the write lock must be done beforehand. because we
+ * keep the read lock on the map, the copy-on-write status of the
+ * entries we modify here cannot change.
+ */
+
+ for (size = 0, entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->protection != VM_PROT_NONE &&
+ entry->wired_count == 0) { /* not already wired? */
+ size += entry->end - entry->start;
+ }
+ }
+
+ if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE); /* XXX overloaded */
+ }
+
+ /* XXX non-pmap_wired_count case must be handled by caller */
+#ifdef pmap_wired_count
+ if (limit != 0 &&
+ (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
+ vm_map_unlock(map);
+ return (KERN_NO_SPACE); /* XXX overloaded */
+ }
+#endif
+
+ /*
+ * Pass 2.
+ */
+
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ if (entry->protection == VM_PROT_NONE)
Home |
Main Index |
Thread Index |
Old Index