Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-1-6]: src/sys/uvm Pull up revision 1.45 (requested by skrll):
details: https://anonhg.NetBSD.org/src/rev/99e66eb523fb
branches: netbsd-1-6
changeset: 530232:99e66eb523fb
user: tron <tron%NetBSD.org@localhost>
date: Mon Jun 02 14:30:18 2003 +0000
description:
Pull up revision 1.45 (requested by skrll):
add a new km flag UVM_KMF_CANFAIL, which causes uvm_km_kmemalloc() to
return failure if swap is full and there are no free physical pages.
have malloc() use this flag if M_CANFAIL is passed to it.
use M_CANFAIL to allow amap_extend() to fail when memory is scarce.
this should prevent most of the remaining hangs in low-memory situations.
diffstat:
sys/uvm/uvm_amap.c | 53 ++++++++++++++++++++++++++++++++++-------------------
1 files changed, 34 insertions(+), 19 deletions(-)
diffs (133 lines):
diff -r 5371118b510b -r 99e66eb523fb sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Mon Jun 02 14:30:10 2003 +0000
+++ b/sys/uvm/uvm_amap.c Mon Jun 02 14:30:18 2003 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.43 2002/03/28 06:06:29 nathanw Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.43.4.1 2003/06/02 14:30:18 tron Exp $ */
/*
*
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.43 2002/03/28 06:06:29 nathanw Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.43.4.1 2003/06/02 14:30:18 tron Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@@ -282,10 +282,8 @@
* => amap should be unlocked (we will lock it)
* => to safely extend an amap it should have a reference count of
* one (thus it can't be shared)
- * => XXXCDC: needs a waitflag or failure return value?
- * => XXXCDC: support padding at this level?
*/
-void
+int
amap_extend(entry, addsize)
struct vm_map_entry *entry;
vsize_t addsize;
@@ -308,8 +306,7 @@
* there are some unused slots before us in the amap.
*/
- amap_lock(amap); /* lock! */
-
+ amap_lock(amap);
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
slotneed = slotoff + slotmapped + slotadd;
@@ -329,13 +326,14 @@
amap_unlock(amap);
UVMHIST_LOG(maphist,"<- done (case 1), amap = 0x%x, sltneed=%d",
amap, slotneed, 0, 0);
- return; /* done! */
+ return 0;
}
/*
* case 2: we pre-allocated slots for use and we just need to
* bump nslot up to take account for these slots.
*/
+
if (amap->am_maxslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -349,13 +347,15 @@
#endif
amap->am_nslot = slotneed;
amap_unlock(amap);
+
/*
* no need to zero am_anon since that was done at
* alloc time and we never shrink an allocation.
*/
- UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, slotneed=%d",
- amap, slotneed, 0, 0);
- return;
+
+ UVMHIST_LOG(maphist,"<- done (case 2), amap = 0x%x, "
+ "slotneed=%d", amap, slotneed, 0, 0);
+ return 0;
}
/*
@@ -371,19 +371,32 @@
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_NOWAIT);
+ M_WAITOK | M_CANFAIL);
if (newppref == NULL) {
- /* give up if malloc fails */
free(amap->am_ppref, M_UVMAMAP);
amap->am_ppref = PPREF_NONE;
}
}
#endif
- newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
- newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, M_WAITOK);
- newover = malloc(slotalloc * sizeof(struct vm_anon *),
- M_UVMAMAP, M_WAITOK);
- amap_lock(amap); /* re-lock! */
+ newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
+ M_WAITOK | M_CANFAIL);
+ if (newsl == NULL || newbck == NULL || newover == NULL) {
+ if (newsl != NULL) {
+ free(newsl, M_UVMAMAP);
+ }
+ if (newbck != NULL) {
+ free(newbck, M_UVMAMAP);
+ }
+ if (newover != NULL) {
+ free(newover, M_UVMAMAP);
+ }
+ return ENOMEM;
+ }
+ amap_lock(amap);
KASSERT(amap->am_maxslot < slotneed);
/*
@@ -400,7 +413,8 @@
/* do am_anon */
oldover = amap->am_anon;
memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot);
- memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) * slotadded);
+ memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) *
+ slotadded);
amap->am_anon = newover;
/* do am_bckptr */
@@ -437,6 +451,7 @@
#endif
UVMHIST_LOG(maphist,"<- done (case 3), amap = 0x%x, slotneed=%d",
amap, slotneed, 0, 0);
+ return 0;
}
/*
Home |
Main Index |
Thread Index |
Old Index