Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys add a new km flag UVM_KMF_CANFAIL, which causes uvm_km_k...
details: https://anonhg.NetBSD.org/src/rev/2d030ff775c6
branches: trunk
changeset: 536418:2d030ff775c6
user: chs <chs%NetBSD.org@localhost>
date: Sun Sep 15 16:54:26 2002 +0000
description:
add a new km flag UVM_KMF_CANFAIL, which causes uvm_km_kmemalloc() to
return failure if swap is full and there are no free physical pages.
have malloc() use this flag if M_CANFAIL is passed to it.
use M_CANFAIL to allow amap_extend() to fail when memory is scarce.
this should prevent most of the remaining hangs in low-memory situations.
diffstat:
sys/kern/kern_malloc.c | 9 ++++---
sys/uvm/uvm_amap.c | 53 ++++++++++++++++++++++++++++++++-----------------
sys/uvm/uvm_amap.h | 4 +-
sys/uvm/uvm_extern.h | 3 +-
sys/uvm/uvm_km.c | 13 ++++++++---
sys/uvm/uvm_map.c | 21 +++++++++++++------
6 files changed, 66 insertions(+), 37 deletions(-)
diffs (300 lines):
diff -r 88b45b662bbf -r 2d030ff775c6 sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/kern/kern_malloc.c Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_malloc.c,v 1.72 2002/08/25 21:19:41 thorpej Exp $ */
+/* $NetBSD: kern_malloc.c,v 1.73 2002/09/15 16:54:26 chs Exp $ */
/*
* Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.72 2002/08/25 21:19:41 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.73 2002/09/15 16:54:26 chs Exp $");
#include "opt_lockdebug.h"
@@ -257,7 +257,8 @@
npg = btoc(allocsize);
va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
(vsize_t)ctob(npg),
- (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
+ ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
+ ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
if (__predict_false(va == NULL)) {
/*
* Kmem_malloc() can return NULL, even if it can
@@ -270,7 +271,7 @@
if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
panic("malloc: out of space in kmem_map");
splx(s);
- return ((void *) NULL);
+ return (NULL);
}
#ifdef KMEMSTATS
kbp->kb_total += kbp->kb_elmpercl;
diff -r 88b45b662bbf -r 2d030ff775c6 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/uvm/uvm_amap.c Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.44 2002/06/29 18:27:30 chs Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.45 2002/09/15 16:54:27 chs Exp $ */
/*
*
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.44 2002/06/29 18:27:30 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.45 2002/09/15 16:54:27 chs Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@@ -282,10 +282,8 @@
* => amap should be unlocked (we will lock it)
* => to safely extend an amap it should have a reference count of
* one (thus it can't be shared)
- * => XXXCDC: needs a waitflag or failure return value?
- * => XXXCDC: support padding at this level?
*/
-void
+int
amap_extend(entry, addsize)
struct vm_map_entry *entry;
vsize_t addsize;
@@ -308,8 +306,7 @@
* there are some unused slots before us in the amap.
*/
- amap_lock(amap); /* lock! */
-
+ amap_lock(amap);
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
slotneed = slotoff + slotmapped + slotadd;
@@ -329,13 +326,14 @@
amap_unlock(amap);
UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
- return; /* done! */
+ return 0;
}
/*
* case 2: we pre-allocated slots for use and we just need to
* bump nslot up to take account for these slots.
*/
+
if (amap->am_maxslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -349,13 +347,15 @@
#endif
amap->am_nslot = slotneed;
amap_unlock(amap);
+
/*
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
*/
- UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, slotneed=%d",
- amap, slotneed, 0, 0);
- return;
+
+ UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, "
+ "slotneed=%d", amap, slotneed, 0, 0);
+ return 0;
}
/*
@@ -371,19 +371,32 @@
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_NOWAIT);
+ M_WAITOK | M_CANFAIL);
if (newppref == NULL) {
- /* give up if malloc fails */
free(amap->am_ppref, M_UVMAMAP);
amap->am_ppref = PPREF_NONE;
}
}
#endif
- newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
- newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
- newover = malloc(slotalloc * sizeof(struct vm_anon *),
- M_UVMAMAP, M_WAITOK);
- amap_lock(amap); /* re-lock! */
+ newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ if (newsl == NULL || newbck == NULL || newover == NULL) {
+ if (newsl != NULL) {
+ free(newsl, M_UVMAMAP);
+ }
+ if (newbck != NULL) {
+ free(newbck, M_UVMAMAP);
+ }
+ if (newover != NULL) {
+ free(newover, M_UVMAMAP);
+ }
+ return ENOMEM;
+ }
+ amap_lock(amap);
KASSERT(amap->am_maxslot < slotneed);
/*
@@ -400,7 +413,8 @@
/* do am_anon */
oldover = amap->am_anon;
memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
- memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) * slotadded);
+ memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
+ slotadded);
amap->am_anon = newover;
/* do am_bckptr */
@@ -437,6 +451,7 @@
#endif
UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
amap, slotneed, 0, 0);
+ return 0;
}
/*
diff -r 88b45b662bbf -r 2d030ff775c6 sys/uvm/uvm_amap.h
--- a/sys/uvm/uvm_amap.h Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/uvm/uvm_amap.h Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.h,v 1.17 2001/06/02 18:09:25 chs Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.18 2002/09/15 16:54:29 chs Exp $ */
/*
*
@@ -91,7 +91,7 @@
boolean_t, vaddr_t, vaddr_t));
void amap_cow_now /* resolve all COW faults now */
__P((struct vm_map *, struct vm_map_entry *));
-void amap_extend /* make amap larger */
+int amap_extend /* make amap larger */
__P((struct vm_map_entry *, vsize_t));
int amap_flags /* get amap's flags */
__P((struct vm_amap *));
diff -r 88b45b662bbf -r 2d030ff775c6 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/uvm/uvm_extern.h Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.71 2002/05/17 22:00:50 enami Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.72 2002/09/15 16:54:29 chs Exp $ */
/*
*
@@ -164,6 +164,7 @@
*/
#define UVM_KMF_NOWAIT 0x1 /* matches M_NOWAIT */
#define UVM_KMF_VALLOC 0x2 /* allocate VA only */
+#define UVM_KMF_CANFAIL 0x4 /* caller handles failure */
#define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */
/*
diff -r 88b45b662bbf -r 2d030ff775c6 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/uvm/uvm_km.c Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.57 2002/08/14 15:23:18 thorpej Exp $ */
+/* $NetBSD: uvm_km.c,v 1.58 2002/09/15 16:54:30 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -134,7 +134,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.57 2002/08/14 15:23:18 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.58 2002/09/15 16:54:30 chs Exp $");
#include "opt_uvmhist.h"
@@ -441,10 +441,15 @@
*/
if (__predict_false(pg == NULL)) {
- if (flags & UVM_KMF_NOWAIT) {
+ int t;
+
+ t = uvmexp.active + uvmexp.inactive + uvmexp.free;
+ if ((flags & UVM_KMF_NOWAIT) ||
+ ((flags & UVM_KMF_CANFAIL) &&
+ uvmexp.swpgonly == uvmexp.swpages)) {
/* free everything! */
uvm_unmap(map, kva, kva + size);
- return(0);
+ return (0);
} else {
uvm_wait("km_getwait2"); /* sleep here */
continue;
diff -r 88b45b662bbf -r 2d030ff775c6 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Sun Sep 15 16:46:23 2002 +0000
+++ b/sys/uvm/uvm_map.c Sun Sep 15 16:54:26 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.118 2002/03/08 20:48:47 thorpej Exp $ */
+/* $NetBSD: uvm_map.c,v 1.119 2002/09/15 16:54:31 chs Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.118 2002/03/08 20:48:47 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.119 2002/09/15 16:54:31 chs Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -522,6 +522,7 @@
UVM_MAXPROTECTION(flags);
vm_inherit_t inherit = UVM_INHERIT(flags);
int advice = UVM_ADVICE(flags);
+ int error;
UVMHIST_FUNC("uvm_map");
UVMHIST_CALLED(maphist);
@@ -656,7 +657,16 @@
goto nomerge;
}
- /* got it! */
+ if (prev_entry->aref.ar_amap) {
+ error = amap_extend(prev_entry, size);
+ if (error) {
+ vm_map_unlock(map);
+ if (new_entry) {
+ uvm_mapent_free(new_entry);
+ }
+ return error;
+ }
+ }
UVMCNT_INCR(map_backmerge);
UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
@@ -665,13 +675,10 @@
* drop our reference to uobj since we are extending a reference
* that we already have (the ref count can not drop to zero).
*/
+
if (uobj && uobj->pgops->pgo_detach)
uobj->pgops->pgo_detach(uobj);
- if (prev_entry->aref.ar_amap) {
- amap_extend(prev_entry, size);
- }
-
prev_entry->end += size;
map->size += size;
Home |
Main Index |
Thread Index |
Old Index