Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Change uvm_km_kmemalloc() to accept flag UVM_KMF_NOW...
details: https://anonhg.NetBSD.org/src/rev/28487a0e3182
branches: trunk
changeset: 539998:28487a0e3182
user: bouyer <bouyer%NetBSD.org@localhost>
date: Sat Nov 30 18:28:04 2002 +0000
description:
Change uvm_km_kmemalloc() to accept flag UVM_KMF_NOWAIT and pass it to
uvm_map(). Change uvm_map() to honnor UVM_KMF_NOWAIT. For this, change
amap_extend() to take a flags parameter instead of just boolean for
direction, and introduce AMAP_EXTEND_FORWARDS and AMAP_EXTEND_NOWAIT flags
(AMAP_EXTEND_BACKWARDS is still defined as 0x0, to keep the code easier to
read).
Add a flag parameter to uvm_mapent_alloc().
This solves a problem a pool_get(PR_NOWAIT) could trigger a pool_get(PR_WAITOK)
in uvm_mapent_alloc().
Thanks to Chuck Silvers, enami tsugutomo, Andrew Brown and Jason R Thorpe
for feedback.
diffstat:
sys/uvm/uvm_amap.c | 49 ++++++++++++++++++++++---------------------
sys/uvm/uvm_amap.h | 9 ++++---
sys/uvm/uvm_km.c | 10 +++++---
sys/uvm/uvm_map.c | 61 ++++++++++++++++++++++++++++++++++++-----------------
4 files changed, 77 insertions(+), 52 deletions(-)
diffs (truncated from 420 to 300 lines):
diff -r 7cdb37b6b6ff -r 28487a0e3182 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Sat Nov 30 17:57:42 2002 +0000
+++ b/sys/uvm/uvm_amap.c Sat Nov 30 18:28:04 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.47 2002/11/15 17:30:35 atatat Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.48 2002/11/30 18:28:04 bouyer Exp $ */
/*
*
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.47 2002/11/15 17:30:35 atatat Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.48 2002/11/30 18:28:04 bouyer Exp $");
#undef UVM_AMAP_INLINE /* enable/disable amap inlines */
@@ -284,10 +284,10 @@
* one (thus it can't be shared)
*/
int
-amap_extend(entry, addsize, forwards)
+amap_extend(entry, addsize, flags)
struct vm_map_entry *entry;
vsize_t addsize;
- int forwards;
+ int flags;
{
struct vm_amap *amap = entry->aref.ar_amap;
int slotoff = entry->aref.ar_pageoff;
@@ -298,10 +298,13 @@
#endif
int i, *newsl, *newbck, *oldsl, *oldbck;
struct vm_anon **newover, **oldover;
+ int mflag = (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT :
+ (M_WAITOK | M_CANFAIL);
+
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, forwards=%d)",
- entry, addsize, forwards, 0);
+ UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, flags=0x%x)",
+ entry, addsize, flags, 0);
/*
* first, determine how many slots we need in the amap. don't
@@ -312,7 +315,7 @@
amap_lock(amap);
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
slotneed = slotoff + slotmapped + slotadd;
slotadj = 0;
slotspace = 0;
@@ -329,7 +332,7 @@
* adding.
*/
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
if (amap->am_nslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -366,7 +369,7 @@
*/
if (amap->am_maxslot >= slotneed) {
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
@@ -461,15 +464,12 @@
#ifdef UVM_AMAP_PPREF
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
- newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
#endif
- newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
- newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
+ newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP, mflag);
newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ mflag);
if (newsl == NULL || newbck == NULL || newover == NULL) {
#ifdef UVM_AMAP_PPREF
if (newppref != NULL) {
@@ -495,12 +495,12 @@
*/
slotadded = slotalloc - amap->am_nslot;
- if (!forwards)
+ if (!(flags & AMAP_EXTEND_FORWARDS))
slotspace = slotalloc - slotmapped;
/* do am_slots */
oldsl = amap->am_slots;
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
else
for (i = 0; i < amap->am_nused; i++)
@@ -509,7 +509,7 @@
/* do am_anon */
oldover = amap->am_anon;
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
memcpy(newover, oldover,
sizeof(struct vm_anon *) * amap->am_nslot);
memset(newover + amap->am_nslot, 0,
@@ -524,7 +524,7 @@
/* do am_bckptr */
oldbck = amap->am_bckptr;
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
else
memcpy(newbck + slotspace, oldbck + slotoff,
@@ -535,7 +535,7 @@
/* do ppref */
oldppref = amap->am_ppref;
if (newppref) {
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
memcpy(newppref, oldppref,
sizeof(int) * amap->am_nslot);
memset(newppref + amap->am_nslot, 0,
@@ -545,10 +545,11 @@
sizeof(int) * slotmapped);
}
amap->am_ppref = newppref;
- if (forwards && (slotoff + slotmapped) < amap->am_nslot)
+ if ((flags & AMAP_EXTEND_FORWARDS) &&
+ (slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1);
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
else {
@@ -564,7 +565,7 @@
#endif
/* update master values */
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
amap->am_nslot = slotneed;
else {
entry->aref.ar_pageoff = slotspace - slotadd;
diff -r 7cdb37b6b6ff -r 28487a0e3182 sys/uvm/uvm_amap.h
--- a/sys/uvm/uvm_amap.h Sat Nov 30 17:57:42 2002 +0000
+++ b/sys/uvm/uvm_amap.h Sat Nov 30 18:28:04 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.h,v 1.19 2002/11/14 17:58:48 atatat Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.20 2002/11/30 18:28:05 bouyer Exp $ */
/*
*
@@ -137,10 +137,11 @@
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */
/*
- * amap_extend directions
+ * amap_extend flags
*/
-#define AMAP_EXTEND_BACKWARDS 0 /* add "size" to start of map */
-#define AMAP_EXTEND_FORWARDS 1 /* add "size" to end of map */
+#define AMAP_EXTEND_BACKWARDS 0x00 /* add "size" to start of map */
+#define AMAP_EXTEND_FORWARDS 0x01 /* add "size" to end of map */
+#define AMAP_EXTEND_NOWAIT 0x02 /* not allowed to sleep */
#endif /* _KERNEL */
diff -r 7cdb37b6b6ff -r 28487a0e3182 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Sat Nov 30 17:57:42 2002 +0000
+++ b/sys/uvm/uvm_km.c Sat Nov 30 18:28:04 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.59 2002/10/05 17:26:06 oster Exp $ */
+/* $NetBSD: uvm_km.c,v 1.60 2002/11/30 18:28:05 bouyer Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -134,7 +134,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.59 2002/10/05 17:26:06 oster Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.60 2002/11/30 18:28:05 bouyer Exp $");
#include "opt_uvmhist.h"
@@ -394,7 +394,8 @@
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
+ UVM_ADV_RANDOM,
+ (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))))
!= 0)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
@@ -745,7 +746,8 @@
*/
s = splvm();
- va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
+ va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
+ waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
splx(s);
return (va);
#endif /* PMAP_MAP_POOLPAGE */
diff -r 7cdb37b6b6ff -r 28487a0e3182 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Sat Nov 30 17:57:42 2002 +0000
+++ b/sys/uvm/uvm_map.c Sat Nov 30 18:28:04 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.125 2002/11/14 17:58:48 atatat Exp $ */
+/* $NetBSD: uvm_map.c,v 1.126 2002/11/30 18:28:06 bouyer Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.125 2002/11/14 17:58:48 atatat Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.126 2002/11/30 18:28:06 bouyer Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -190,7 +190,7 @@
* local prototypes
*/
-static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *));
+static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *, int));
static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *));
static void uvm_mapent_free __P((struct vm_map_entry *));
static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *));
@@ -206,11 +206,13 @@
*/
static __inline struct vm_map_entry *
-uvm_mapent_alloc(map)
+uvm_mapent_alloc(map, flags)
struct vm_map *map;
+ int flags;
{
struct vm_map_entry *me;
int s;
+ int pflags = (flags & UVM_KMF_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
if (map->flags & VM_MAP_INTRSAFE || cold) {
@@ -220,17 +222,21 @@
if (me) uvm.kentry_free = me->next;
simple_unlock(&uvm.kentry_lock);
splx(s);
- if (me == NULL) {
+ if (__predict_false(me == NULL)) {
panic("uvm_mapent_alloc: out of static map entries, "
"check MAX_KMAPENT (currently %d)",
MAX_KMAPENT);
}
me->flags = UVM_MAP_STATIC;
} else if (map == kernel_map) {
- me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK);
+ me = pool_get(&uvm_map_entry_kmem_pool, pflags);
+ if (__predict_false(me == NULL))
+ return NULL;
me->flags = UVM_MAP_KMEM;
} else {
- me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
+ me = pool_get(&uvm_map_entry_pool, pflags);
+ if (__predict_false(me == NULL))
+ return NULL;
me->flags = 0;
}
@@ -421,7 +427,7 @@
* starting address.
*/
- new_entry = uvm_mapent_alloc(map);
+ new_entry = uvm_mapent_alloc(map, 0);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
new_entry->end = start;
@@ -471,7 +477,7 @@
* AFTER the specified entry
*/
Home |
Main Index |
Thread Index |
Old Index