Subject: Re: lock bug in getnewvnode, or uvm_km_kmemalloc/uvm_map ?
To: Andrew Brown <atatat@atatdot.net>
From: Manuel Bouyer <bouyer@antioche.eu.org>
List: tech-kern
Date: 11/26/2002 22:52:42
--EVF5PPMfhYS0aIcm
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
On Thu, Nov 21, 2002 at 01:53:48PM -0500, Andrew Brown wrote:
> [...]
> >> otoh, perhaps you mean like this (rather contrived) call path:
> >
> >No, I'm just looking at it from an interface point of view.
>
> understood. all i'm trying to do here is talk you out of expending
> the effort of reworking the amap_extend() interface. actually, if you
> were to do that, i'd suggest that instead of adding a new argument,
> you should alter the "forwards" argument (of which only one bit is
> used) to be the flags argument, and put your WAIT/NOWAIT bits in
> there.
OK, so what about the attached change ?
--
Manuel Bouyer <bouyer@antioche.eu.org>
NetBSD: 23 ans d'experience feront toujours la difference
--
--EVF5PPMfhYS0aIcm
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename=diff
Index: uvm_amap.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_amap.c,v
retrieving revision 1.47
diff -u -r1.47 uvm_amap.c
--- uvm_amap.c 2002/11/15 17:30:35 1.47
+++ uvm_amap.c 2002/11/26 21:50:48
@@ -284,10 +284,10 @@
* one (thus it can't be shared)
*/
int
-amap_extend(entry, addsize, forwards)
+amap_extend(entry, addsize, flags)
struct vm_map_entry *entry;
vsize_t addsize;
- int forwards;
+ int flags;
{
struct vm_amap *amap = entry->aref.ar_amap;
int slotoff = entry->aref.ar_pageoff;
@@ -300,8 +300,8 @@
struct vm_anon **newover, **oldover;
UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, forwards=%d)",
- entry, addsize, forwards, 0);
+ UVMHIST_LOG(maphist, " (entry=0x%x, addsize=0x%x, flags=%d)",
+ entry, addsize, flags, 0);
/*
* first, determine how many slots we need in the amap. don't
@@ -312,7 +312,7 @@
amap_lock(amap);
AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */
AMAP_B2SLOT(slotadd, addsize); /* slots to add */
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
slotneed = slotoff + slotmapped + slotadd;
slotadj = 0;
slotspace = 0;
@@ -329,7 +329,7 @@
* adding.
*/
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
if (amap->am_nslot >= slotneed) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
@@ -366,7 +366,7 @@
*/
if (amap->am_maxslot >= slotneed) {
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
if ((slotoff + slotmapped) < amap->am_nslot)
@@ -462,14 +462,15 @@
newppref = NULL;
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
newppref = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT :
+ (M_WAITOK | M_CANFAIL));
#endif
newsl = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT : (M_WAITOK | M_CANFAIL));
newbck = malloc(slotalloc * sizeof(int), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT : (M_WAITOK | M_CANFAIL));
newover = malloc(slotalloc * sizeof(struct vm_anon *), M_UVMAMAP,
- M_WAITOK | M_CANFAIL);
+ (flags & AMAP_EXTEND_NOWAIT) ? M_NOWAIT : (M_WAITOK | M_CANFAIL));
if (newsl == NULL || newbck == NULL || newover == NULL) {
#ifdef UVM_AMAP_PPREF
if (newppref != NULL) {
@@ -495,12 +496,12 @@
*/
slotadded = slotalloc - amap->am_nslot;
- if (!forwards)
+ if (!(flags & AMAP_EXTEND_FORWARDS))
slotspace = slotalloc - slotmapped;
/* do am_slots */
oldsl = amap->am_slots;
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
else
for (i = 0; i < amap->am_nused; i++)
@@ -509,7 +510,7 @@
/* do am_anon */
oldover = amap->am_anon;
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
memcpy(newover, oldover,
sizeof(struct vm_anon *) * amap->am_nslot);
memset(newover + amap->am_nslot, 0,
@@ -524,7 +525,7 @@
/* do am_bckptr */
oldbck = amap->am_bckptr;
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
else
memcpy(newbck + slotspace, oldbck + slotoff,
@@ -535,7 +536,7 @@
/* do ppref */
oldppref = amap->am_ppref;
if (newppref) {
- if (forwards) {
+ if (flags & AMAP_EXTEND_FORWARDS) {
memcpy(newppref, oldppref,
sizeof(int) * amap->am_nslot);
memset(newppref + amap->am_nslot, 0,
@@ -545,10 +546,11 @@
sizeof(int) * slotmapped);
}
amap->am_ppref = newppref;
- if (forwards && (slotoff + slotmapped) < amap->am_nslot)
+ if ((flags & AMAP_EXTEND_FORWARDS) &&
+ (slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
(amap->am_nslot - (slotoff + slotmapped)), 1);
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
else {
@@ -564,7 +566,7 @@
#endif
/* update master values */
- if (forwards)
+ if (flags & AMAP_EXTEND_FORWARDS)
amap->am_nslot = slotneed;
else {
entry->aref.ar_pageoff = slotspace - slotadd;
Index: uvm_amap.h
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_amap.h,v
retrieving revision 1.19
diff -u -r1.19 uvm_amap.h
--- uvm_amap.h 2002/11/14 17:58:48 1.19
+++ uvm_amap.h 2002/11/26 21:50:48
@@ -137,10 +137,11 @@
#define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */
/*
- * amap_extend directions
+ * amap_extend flags
*/
-#define AMAP_EXTEND_BACKWARDS 0 /* add "size" to start of map */
-#define AMAP_EXTEND_FORWARDS 1 /* add "size" to end of map */
+#define AMAP_EXTEND_BACKWARDS 0x00 /* add "size" to start of map */
+#define AMAP_EXTEND_FORWARDS 0x01 /* add "size" to end of map */
+#define AMAP_EXTEND_NOWAIT 0x02 /* not allowed to sleep */
#endif /* _KERNEL */
Index: uvm_km.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_km.c,v
retrieving revision 1.59
diff -u -r1.59 uvm_km.c
--- uvm_km.c 2002/10/05 17:26:06 1.59
+++ uvm_km.c 2002/11/26 21:50:50
@@ -394,7 +394,8 @@
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
+ UVM_ADV_RANDOM,
+ (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))))
!= 0)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
@@ -745,7 +746,8 @@
*/
s = splvm();
- va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
+ va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
+ waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
splx(s);
return (va);
#endif /* PMAP_MAP_POOLPAGE */
Index: uvm_map.c
===================================================================
RCS file: /cvsroot/syssrc/sys/uvm/uvm_map.c,v
retrieving revision 1.125
diff -u -r1.125 uvm_map.c
--- uvm_map.c 2002/11/14 17:58:48 1.125
+++ uvm_map.c 2002/11/26 21:50:55
@@ -190,7 +190,7 @@
* local prototypes
*/
-static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *));
+static struct vm_map_entry *uvm_mapent_alloc __P((struct vm_map *, int));
static void uvm_mapent_copy __P((struct vm_map_entry *, struct vm_map_entry *));
static void uvm_mapent_free __P((struct vm_map_entry *));
static void uvm_map_entry_unwire __P((struct vm_map *, struct vm_map_entry *));
@@ -206,8 +206,9 @@
*/
static __inline struct vm_map_entry *
-uvm_mapent_alloc(map)
+uvm_mapent_alloc(map, flags)
struct vm_map *map;
+ int flags;
{
struct vm_map_entry *me;
int s;
@@ -220,17 +221,21 @@
if (me) uvm.kentry_free = me->next;
simple_unlock(&uvm.kentry_lock);
splx(s);
- if (me == NULL) {
+ if (__predict_false(me == NULL)) {
panic("uvm_mapent_alloc: out of static map entries, "
"check MAX_KMAPENT (currently %d)",
MAX_KMAPENT);
}
me->flags = UVM_MAP_STATIC;
} else if (map == kernel_map) {
- me = pool_get(&uvm_map_entry_kmem_pool, PR_WAITOK);
+ me = pool_get(&uvm_map_entry_kmem_pool, flags);
+ if (__predict_false(me == NULL))
+ return NULL;
me->flags = UVM_MAP_KMEM;
} else {
- me = pool_get(&uvm_map_entry_pool, PR_WAITOK);
+ me = pool_get(&uvm_map_entry_pool, flags);
+ if (__predict_false(me == NULL))
+ return NULL;
me->flags = 0;
}
@@ -421,7 +426,7 @@
* starting address.
*/
- new_entry = uvm_mapent_alloc(map);
+ new_entry = uvm_mapent_alloc(map, PR_WAITOK);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
new_entry->end = start;
@@ -471,7 +476,7 @@
* AFTER the specified entry
*/
- new_entry = uvm_mapent_alloc(map);
+ new_entry = uvm_mapent_alloc(map, PR_WAITOK);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
new_entry->start = entry->end = end;
@@ -572,7 +577,10 @@
new_entry = NULL;
if (map == pager_map) {
- new_entry = uvm_mapent_alloc(map);
+ new_entry = uvm_mapent_alloc(map,
+ (flags & UVM_KMF_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
+ if (__predict_false(new_entry == NULL))
+ return ENOMEM;
}
/*
@@ -681,6 +689,8 @@
if (prev_entry->aref.ar_amap) {
error = amap_extend(prev_entry, size,
+ (flags & UVM_KMF_NOWAIT) ?
+ (AMAP_EXTEND_FORWARDS|AMAP_EXTEND_NOWAIT) :
AMAP_EXTEND_FORWARDS);
if (error) {
vm_map_unlock(map);
@@ -779,6 +789,8 @@
if (amap_extend(prev_entry,
prev_entry->next->end -
prev_entry->next->start,
+ (flags & UVM_KMF_NOWAIT) ?
+ (AMAP_EXTEND_FORWARDS|AMAP_EXTEND_NOWAIT) :
AMAP_EXTEND_FORWARDS))
goto nomerge;
}
@@ -797,7 +809,9 @@
if (amap_extend(prev_entry->next,
prev_entry->end -
prev_entry->start + size,
- AMAP_EXTEND_BACKWARDS))
+ (flags & UVM_KMF_NOWAIT) ?
+ (AMAP_EXTEND_FORWARDS|AMAP_EXTEND_NOWAIT) :
+ AMAP_EXTEND_FORWARDS))
goto nomerge;
}
} else {
@@ -807,7 +821,9 @@
*/
if (prev_entry->next->aref.ar_amap) {
error = amap_extend(prev_entry->next, size,
- AMAP_EXTEND_BACKWARDS);
+ (flags & UVM_KMF_NOWAIT) ?
+ (AMAP_EXTEND_FORWARDS|AMAP_EXTEND_NOWAIT) :
+ AMAP_EXTEND_FORWARDS);
if (error) {
vm_map_unlock(map);
if (new_entry) {
@@ -879,7 +895,12 @@
*/
if (new_entry == NULL) {
- new_entry = uvm_mapent_alloc(map);
+ new_entry = uvm_mapent_alloc(map,
+ (flags & UVM_KMF_NOWAIT) ? PR_NOWAIT : PR_WAITOK);
+ if (__predict_false(new_entry == NULL)) {
+ vm_map_unlock(map);
+ return ENOMEM;
+ }
}
new_entry->start = *startp;
new_entry->end = new_entry->start + size;
@@ -911,7 +932,14 @@
vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
- struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
+ struct vm_amap *amap = amap_alloc(size, to_add,
+ (flags & UVM_KMF_NOWAIT) ? M_NOWAIT : M_WAITOK);
+ if (__predict_false(amap == NULL)) {
+ vm_map_unlock(map);
+ if (new_entry)
+ uvm_mapent_free(new_entry);
+ return ENOMEM;
+ }
new_entry->aref.ar_pageoff = 0;
new_entry->aref.ar_amap = amap;
} else {
@@ -1697,7 +1725,7 @@
oldoffset = (entry->start + fudge) - start;
/* allocate a new map entry */
- newentry = uvm_mapent_alloc(dstmap);
+ newentry = uvm_mapent_alloc(dstmap, PR_WAITOK);
if (newentry == NULL) {
error = ENOMEM;
goto bad;
@@ -3178,7 +3206,7 @@
/* XXXCDC: WAITOK??? */
}
- new_entry = uvm_mapent_alloc(new_map);
+ new_entry = uvm_mapent_alloc(new_map, PR_WAITOK);
/* old_entry -> new_entry */
uvm_mapent_copy(old_entry, new_entry);
@@ -3215,7 +3243,7 @@
* (note that new references are read-only).
*/
- new_entry = uvm_mapent_alloc(new_map);
+ new_entry = uvm_mapent_alloc(new_map, PR_WAITOK);
/* old_entry -> new_entry */
uvm_mapent_copy(old_entry, new_entry);
--EVF5PPMfhYS0aIcm--