Subject: Re: kernel map entry merging and PR 24039
To: None <thorpej@shagadelic.org>
From: YAMAMOTO Takashi <yamt@mwd.biglobe.ne.jp>
List: tech-kern
Date: 12/04/2004 17:26:21
--NextPart-20041204171856-0539600
Content-Type: Text/Plain; charset=us-ascii
> >> Like maybe ... always allocate the map entry, but hang it off a free
> >> list if you end up merging?
> >
> > does "hang it off a free list" mean "keep it for later splitting" ?
>
> Yes. A private free list for that map.
i made diffs. (attached)
it's mostly same as the backed out version.
only difference is, instead of disabling merging,
it just keep merged entries for later use if UVM_FLAG_FIXEDSIZE (a new flag)
is specified. thus, it increases memory usage but shouldn't increase
entry searching cost.
YAMAMOTO Takashi
--NextPart-20041204171856-0539600
Content-Type: Text/Plain; charset=us-ascii
Content-Disposition: attachment; filename="uvm.mergeent.diff"
Index: uvm/uvm_io.c
===================================================================
--- uvm/uvm_io.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:02 2004
@@ -142,7 +142,8 @@ uvm_io(map, uio)
*/
vm_map_lock(kernel_map);
- uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries);
+ uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries,
+ NULL);
vm_map_unlock(kernel_map);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, AMAP_REFALL);
Index: uvm/uvm_km.c
===================================================================
--- uvm/uvm_km.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:02 2004
@@ -155,6 +155,7 @@ struct vm_map *kernel_map = NULL;
*/
static struct vm_map kernel_map_store;
+static struct vm_map_entry kernel_first_mapent_store;
/*
* uvm_km_init: init kernel maps and objects to reflect reality (i.e.
@@ -187,13 +188,26 @@ uvm_km_init(start, end)
uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
kernel_map_store.pmap = pmap_kernel();
- if (start != base &&
- uvm_map(&kernel_map_store, &base, start - base, NULL,
- UVM_UNKNOWN_OFFSET, 0,
+ if (start != base) {
+ int error;
+ struct uvm_map_args args;
+
+ error = uvm_map_prepare(&kernel_map_store, base, start - base,
+ NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
- panic("uvm_km_init: could not reserve space for kernel");
+ UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
+ if (!error) {
+ kernel_first_mapent_store.flags =
+ UVM_MAP_KERNEL | UVM_MAP_FIRST;
+ error = uvm_map_enter(&kernel_map_store, &args,
+ &kernel_first_mapent_store);
+ }
+ if (error)
+ panic(
+ "uvm_km_init: could not reserve space for kernel");
+ }
+
/*
* install!
*/
@@ -398,7 +412,8 @@ uvm_km_kmemalloc1(map, obj, size, align,
if (__predict_false(uvm_map(map, &kva, size, obj, prefer, align,
UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
UVM_ADV_RANDOM,
- (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))))
+ (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))
+ | UVM_FLAG_FIXEDSIZE))
!= 0)) {
UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
return(0);
@@ -509,7 +524,7 @@ uvm_km_free_wakeup(map, addr, size)
vm_map_lock(map);
uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
- &dead_entries);
+ &dead_entries, NULL);
wakeup(map);
vm_map_unlock(map);
if (dead_entries != NULL)
@@ -545,7 +560,7 @@ uvm_km_alloc1(map, size, zeroit)
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
UVM_INH_NONE, UVM_ADV_RANDOM,
- 0)) != 0)) {
+ UVM_FLAG_FIXEDSIZE)) != 0)) {
UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
return(0);
}
@@ -632,6 +647,7 @@ uvm_km_valloc1(map, size, align, prefer,
if (size > vm_map_max(map) - vm_map_min(map))
return (0);
+ flags |= UVM_FLAG_FIXEDSIZE;
for (;;) {
kva = vm_map_min(map); /* hint */
Index: uvm/uvm_extern.h
===================================================================
--- uvm/uvm_extern.h (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:02 2004
@@ -143,6 +143,7 @@ typedef off_t voff_t; /* XXX: offset wi
#define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */
#define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
#define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */
+#define UVM_FLAG_FIXEDSIZE 0x800000 /* entry never be splitted later */
/* macros to extract info */
#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
Index: uvm/uvm_map.c
===================================================================
--- uvm/uvm_map.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:03 2004
@@ -101,8 +101,6 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v
#include <uvm/uvm_ddb.h>
#endif
-extern struct vm_map *pager_map;
-
#ifndef UVMMAP_NOCOUNTERS
#include <sys/device.h>
struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
@@ -162,8 +160,6 @@ POOL_INIT(uvm_vmspace_pool, sizeof(struc
POOL_INIT(uvm_map_entry_pool, sizeof(struct vm_map_entry), 0, 0, 0, "vmmpepl",
&pool_allocator_nointr);
-POOL_INIT(uvm_map_entry_kmem_pool, sizeof(struct vm_map_entry), 0, 0, 0,
- "vmmpekpl", NULL);
MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
@@ -184,6 +180,17 @@ vaddr_t uvm_maxkaddr;
*/
/*
+ * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
+ * for the vm_map.
+ *
+ * we exclude pager_map because it needs pager_map_wanted handling
+ * when doing map/unmap.
+ */
+extern struct vm_map *pager_map; /* XXX */
+#define VM_MAP_USE_KMAPENT(map) \
+ (vm_map_pmap(map) == pmap_kernel() && (map) != pager_map)
+
+/*
* uvm_map_entry_link: insert entry into a map
*
* => map must be locked
@@ -243,8 +250,15 @@ vaddr_t uvm_maxkaddr;
static struct vm_map_entry *
uvm_mapent_alloc(struct vm_map *, int);
+static struct vm_map_entry *
+ uvm_mapent_alloc_split(struct vm_map *,
+ const struct vm_map_entry *, int,
+ struct uvm_mapent_reservation *);
static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
static void uvm_mapent_free(struct vm_map_entry *);
+static struct vm_map_entry *
+ uvm_kmapent_alloc(struct vm_map *, int);
+static void uvm_kmapent_free(struct vm_map_entry *);
static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
static void uvm_map_reference_amap(struct vm_map_entry *, int);
static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
@@ -419,6 +433,8 @@ _uvm_tree_sanity(struct vm_map *map, con
* local inlines
*/
+static __inline struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
+
/*
* uvm_mapent_alloc: allocate a map entry
*/
@@ -427,29 +443,11 @@ static __inline struct vm_map_entry *
uvm_mapent_alloc(struct vm_map *map, int flags)
{
struct vm_map_entry *me;
- int s;
int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
- if (map->flags & VM_MAP_INTRSAFE || cold) {
- s = splvm();
- simple_lock(&uvm.kentry_lock);
- me = uvm.kentry_free;
- if (me)
- uvm.kentry_free = me->next;
- simple_unlock(&uvm.kentry_lock);
- splx(s);
- if (__predict_false(me == NULL)) {
- panic("uvm_mapent_alloc: out of static map entries, "
- "check MAX_KMAPENT (currently %d)",
- MAX_KMAPENT);
- }
- me->flags = UVM_MAP_STATIC;
- } else if (map == kernel_map) {
- me = pool_get(&uvm_map_entry_kmem_pool, pflags);
- if (__predict_false(me == NULL))
- return NULL;
- me->flags = UVM_MAP_KMEM;
+ if (VM_MAP_USE_KMAPENT(map)) {
+ me = uvm_kmapent_alloc(map, flags);
} else {
me = pool_get(&uvm_map_entry_pool, pflags);
if (__predict_false(me == NULL))
@@ -463,28 +461,84 @@ uvm_mapent_alloc(struct vm_map *map, int
}
/*
+ * uvm_mapent_alloc_split: allocate a map entry for clipping.
+ */
+
+static __inline struct vm_map_entry *
+uvm_mapent_alloc_split(struct vm_map *map,
+ const struct vm_map_entry *old_entry, int flags,
+ struct uvm_mapent_reservation *umr)
+{
+ struct vm_map_entry *me;
+
+ KASSERT(!VM_MAP_USE_KMAPENT(map) ||
+ (old_entry->flags & UVM_MAP_FIXEDSIZE) || !UMR_EMPTY(umr));
+
+ if (old_entry->flags & UVM_MAP_FIXEDSIZE) {
+ int s;
+
+ s = splvm();
+ simple_lock(&uvm.kentry_lock);
+ me = map->merged_entries;
+ KASSERT(me);
+ map->merged_entries = me->next;
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+ KASSERT(me->flags & UVM_MAP_FIXEDSIZE);
+ } else {
+ me = uvm_mapent_alloc(map, flags);
+ }
+
+ return me;
+}
+
+/*
* uvm_mapent_free: free map entry
*/
static __inline void
uvm_mapent_free(struct vm_map_entry *me)
{
- int s;
UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
me, me->flags, 0, 0);
- if (me->flags & UVM_MAP_STATIC) {
+ if (me->flags & UVM_MAP_KERNEL) {
+ uvm_kmapent_free(me);
+ } else {
+ pool_put(&uvm_map_entry_pool, me);
+ }
+}
+
+/*
+ * uvm_mapent_free_merge: free merged map entry
+ *
+ * => keep the entry if needed.
+ * => caller shouldn't hold map locked.
+ */
+
+static __inline void
+uvm_mapent_free_merged(struct vm_map_entry *me)
+{
+
+ if (me->flags & UVM_MAP_FIXEDSIZE) {
+ /*
+ * keep this entry for later splitting.
+ */
+ struct vm_map *map;
+ int s;
+
+ KASSERT(me->flags & UVM_MAP_KERNEL);
+
+ map = uvm_kmapent_map(me);
s = splvm();
simple_lock(&uvm.kentry_lock);
- me->next = uvm.kentry_free;
- uvm.kentry_free = me;
+ me->next = map->merged_entries;
+ map->merged_entries = me;
simple_unlock(&uvm.kentry_lock);
splx(s);
- } else if (me->flags & UVM_MAP_KMEM) {
- pool_put(&uvm_map_entry_kmem_pool, me);
} else {
- pool_put(&uvm_map_entry_pool, me);
+ uvm_mapent_free(me);
}
}
@@ -547,12 +601,10 @@ uvm_map_unreference_amap(struct vm_map_e
void
uvm_map_init(void)
{
- static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
#if defined(UVMHIST)
static struct uvm_history_ent maphistbuf[100];
static struct uvm_history_ent pdhistbuf[100];
#endif
- int lcv;
/*
* first, init logging system.
@@ -565,15 +617,12 @@ uvm_map_init(void)
UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
/*
- * now set up static pool of kernel map entrys ...
+ * initialize the global lock for kernel map entry.
+ *
+ * XXX is it worth to have per-map lock instead?
*/
simple_lock_init(&uvm.kentry_lock);
- uvm.kentry_free = NULL;
- for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
- kernel_map_entry[lcv].next = uvm.kentry_free;
- uvm.kentry_free = &kernel_map_entry[lcv];
- }
}
/*
@@ -591,7 +640,7 @@ uvm_map_init(void)
void
uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
- vaddr_t start)
+ vaddr_t start, struct uvm_mapent_reservation *umr)
{
struct vm_map_entry *new_entry;
vaddr_t new_adj;
@@ -605,8 +654,7 @@ uvm_map_clip_start(struct vm_map *map, s
* entry BEFORE this one, so that this entry has the specified
* starting address.
*/
-
- new_entry = uvm_mapent_alloc(map, 0);
+ new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
new_entry->end = start;
@@ -647,18 +695,19 @@ uvm_map_clip_start(struct vm_map *map, s
*/
void
-uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
+uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
+ struct uvm_mapent_reservation *umr)
{
struct vm_map_entry * new_entry;
vaddr_t new_adj; /* #bytes we move start forward */
uvm_tree_sanity(map, "clip_end entry");
+
/*
* Create a new entry and insert it
* AFTER the specified entry
*/
-
- new_entry = uvm_mapent_alloc(map, 0);
+ new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
new_entry->start = entry->end = end;
@@ -720,19 +769,56 @@ int
uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
{
- struct vm_map_entry *prev_entry, *new_entry;
- const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
- AMAP_EXTEND_NOWAIT : 0;
- vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
- UVM_MAXPROTECTION(flags);
- vm_inherit_t inherit = UVM_INHERIT(flags);
- int advice = UVM_ADVICE(flags);
- int error, merged = 0, kmap = (vm_map_pmap(map) == pmap_kernel());
- UVMHIST_FUNC("uvm_map");
+ struct uvm_map_args args;
+ struct vm_map_entry *new_entry;
+ int error;
+
+ KASSERT((flags & UVM_FLAG_FIXEDSIZE) == 0 || VM_MAP_USE_KMAPENT(map));
+
+ /*
+ * for pager_map, allocate the new entry first to avoid sleeping
+ * for memory while we have the map locked.
+ *
+ * besides, because we allocates entries for in-kernel maps
+ * a bit differently (cf. uvm_kmapent_alloc/free), we need to
+ * allocate them before locking the map.
+ */
+
+ new_entry = NULL;
+ if (VM_MAP_USE_KMAPENT(map) || map == pager_map) {
+ new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
+ if (__predict_false(new_entry == NULL))
+ return ENOMEM;
+ if (flags & UVM_FLAG_FIXEDSIZE)
+ new_entry->flags |= UVM_MAP_FIXEDSIZE;
+ }
+ if (map == pager_map)
+ flags |= UVM_FLAG_NOMERGE;
+
+ error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
+ flags, &args);
+ if (!error) {
+ error = uvm_map_enter(map, &args, new_entry);
+ *startp = args.uma_start;
+ }
+
+ return error;
+}
+
+int
+uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
+ struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
+ struct uvm_map_args *args)
+{
+ struct vm_map_entry *prev_entry;
+ vm_prot_t prot = UVM_PROTECTION(flags);
+ vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
+
+ UVMHIST_FUNC("uvm_map_prepare");
UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
- map, *startp, size, flags);
+ UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
+ map, start, size, flags);
UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
/*
@@ -760,37 +846,19 @@ uvm_map(struct vm_map *map, vaddr_t *sta
}
/*
- * for pager_map, allocate the new entry first to avoid sleeping
- * for memory while we have the map locked.
- */
-
- new_entry = NULL;
- if (map == pager_map) {
- new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
- if (__predict_false(new_entry == NULL))
- return ENOMEM;
- }
-
- /*
* figure out where to put new VM range
*/
if (vm_map_lock_try(map) == FALSE) {
if (flags & UVM_FLAG_TRYLOCK) {
- if (new_entry) {
- uvm_mapent_free(new_entry);
- }
return EAGAIN;
}
vm_map_lock(map); /* could sleep here */
}
- if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
+ if ((prev_entry = uvm_map_findspace(map, start, size, &start,
uobj, uoffset, align, flags)) == NULL) {
UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
vm_map_unlock(map);
- if (new_entry) {
- uvm_mapent_free(new_entry);
- }
return ENOMEM;
}
@@ -799,8 +867,8 @@ uvm_map(struct vm_map *map, vaddr_t *sta
* If the kernel pmap can't map the requested space,
* then allocate more resources for it.
*/
- if (map == kernel_map && uvm_maxkaddr < (*startp + size))
- uvm_maxkaddr = pmap_growkernel(*startp + size);
+ if (map == kernel_map && uvm_maxkaddr < (start + size))
+ uvm_maxkaddr = pmap_growkernel(start + size);
#endif
UVMCNT_INCR(uvm_map_call);
@@ -826,10 +894,59 @@ uvm_map(struct vm_map *map, vaddr_t *sta
} else {
if (uoffset == UVM_UNKNOWN_OFFSET) {
KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
- uoffset = *startp - vm_map_min(kernel_map);
+ uoffset = start - vm_map_min(kernel_map);
}
}
+ args->uma_flags = flags;
+ args->uma_prev = prev_entry;
+ args->uma_start = start;
+ args->uma_size = size;
+ args->uma_uobj = uobj;
+ args->uma_uoffset = uoffset;
+
+ return 0;
+}
+
+int
+uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
+ struct vm_map_entry *new_entry)
+{
+ struct vm_map_entry *prev_entry = args->uma_prev;
+ struct vm_map_entry *dead = NULL;
+
+ const uvm_flag_t flags = args->uma_flags;
+ const vm_prot_t prot = UVM_PROTECTION(flags);
+ const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
+ const vm_inherit_t inherit = UVM_INHERIT(flags);
+ const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
+ AMAP_EXTEND_NOWAIT : 0;
+ const int advice = UVM_ADVICE(flags);
+ const int meflagmask = UVM_MAP_NOMERGE | UVM_MAP_FIXEDSIZE;
+ const int meflagval = (flags & UVM_FLAG_FIXEDSIZE) ?
+ UVM_MAP_FIXEDSIZE : 0;
+
+ vaddr_t start = args->uma_start;
+ vsize_t size = args->uma_size;
+ struct uvm_object *uobj = args->uma_uobj;
+ voff_t uoffset = args->uma_uoffset;
+
+ const int kmap = (vm_map_pmap(map) == pmap_kernel());
+ int merged = 0;
+ int error;
+
+ UVMHIST_FUNC("uvm_map_enter");
+ UVMHIST_CALLED(maphist);
+
+ UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
+ map, start, size, flags);
+ UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
+
+ if (flags & UVM_FLAG_FIXEDSIZE) {
+ KASSERT(new_entry);
+ KASSERT(new_entry->flags & UVM_MAP_FIXEDSIZE);
+ }
+
/*
* try and insert in map by extending previous entry, if possible.
* XXX: we don't try and pull back the next entry. might be useful
@@ -839,11 +956,11 @@ uvm_map(struct vm_map *map, vaddr_t *sta
if (flags & UVM_FLAG_NOMERGE)
goto nomerge;
- if (prev_entry->end == *startp &&
+ if (prev_entry->end == start &&
prev_entry != &map->header &&
prev_entry->object.uvm_obj == uobj) {
- if (prev_entry->flags & UVM_MAP_NOMERGE)
+ if ((prev_entry->flags & meflagmask) != meflagval)
goto forwardmerge;
if (uobj && prev_entry->offset +
@@ -879,13 +996,8 @@ uvm_map(struct vm_map *map, vaddr_t *sta
if (prev_entry->aref.ar_amap) {
error = amap_extend(prev_entry, size,
amapwaitflag | AMAP_EXTEND_FORWARDS);
- if (error) {
- vm_map_unlock(map);
- if (new_entry) {
- uvm_mapent_free(new_entry);
- }
- return error;
- }
+ if (error)
+ goto done;
}
if (kmap)
@@ -908,19 +1020,15 @@ uvm_map(struct vm_map *map, vaddr_t *sta
uvm_tree_sanity(map, "map backmerged");
UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
- if (new_entry) {
- uvm_mapent_free(new_entry);
- new_entry = NULL;
- }
merged++;
}
forwardmerge:
- if (prev_entry->next->start == (*startp + size) &&
+ if (prev_entry->next->start == (start + size) &&
prev_entry->next != &map->header &&
prev_entry->next->object.uvm_obj == uobj) {
- if (prev_entry->next->flags & UVM_MAP_NOMERGE)
+ if ((prev_entry->next->flags & meflagmask) != meflagval)
goto nomerge;
if (uobj && prev_entry->next->offset != uoffset + size)
@@ -1010,13 +1118,8 @@ forwardmerge:
if (prev_entry->next->aref.ar_amap) {
error = amap_extend(prev_entry->next, size,
amapwaitflag | AMAP_EXTEND_BACKWARDS);
- if (error) {
- vm_map_unlock(map);
- if (new_entry) {
- uvm_mapent_free(new_entry);
- }
- return error;
- }
+ if (error)
+ goto done;
}
}
@@ -1045,14 +1148,13 @@ forwardmerge:
uobj->pgops->pgo_detach(uobj);
if (merged) {
- struct vm_map_entry *dead = prev_entry->next;
+ dead = prev_entry->next;
prev_entry->end = dead->end;
uvm_map_entry_unlink(map, dead);
if (dead->aref.ar_amap != NULL) {
prev_entry->aref = dead->aref;
dead->aref.ar_amap = NULL;
}
- uvm_mapent_free(dead);
} else {
prev_entry->next->start -= size;
if (prev_entry != &map->header)
@@ -1064,10 +1166,6 @@ forwardmerge:
uvm_tree_sanity(map, "map forwardmerged");
UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
- if (new_entry) {
- uvm_mapent_free(new_entry);
- new_entry = NULL;
- }
merged++;
}
@@ -1087,11 +1185,11 @@ nomerge:
new_entry = uvm_mapent_alloc(map,
(flags & UVM_FLAG_NOWAIT));
if (__predict_false(new_entry == NULL)) {
- vm_map_unlock(map);
- return ENOMEM;
+ error = ENOMEM;
+ goto done;
}
}
- new_entry->start = *startp;
+ new_entry->start = start;
new_entry->end = new_entry->start + size;
new_entry->object.uvm_obj = uobj;
new_entry->offset = uoffset;
@@ -1127,9 +1225,8 @@ nomerge:
struct vm_amap *amap = amap_alloc(size, to_add,
(flags & UVM_FLAG_NOWAIT) ? M_NOWAIT : M_WAITOK);
if (__predict_false(amap == NULL)) {
- vm_map_unlock(map);
- uvm_mapent_free(new_entry);
- return ENOMEM;
+ error = ENOMEM;
+ goto done;
}
new_entry->aref.ar_pageoff = 0;
new_entry->aref.ar_amap = amap;
@@ -1146,13 +1243,30 @@ nomerge:
if ((map->first_free == prev_entry) &&
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
+
+ new_entry = NULL;
}
map->size += size;
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
+
+ error = 0;
+done:
vm_map_unlock(map);
- return 0;
+ if (new_entry) {
+ if (error == 0) {
+ KDASSERT(merged);
+ uvm_mapent_free_merged(new_entry);
+ } else {
+ uvm_mapent_free(new_entry);
+ }
+ }
+ if (dead) {
+ KDASSERT(merged);
+ uvm_mapent_free_merged(dead);
+ }
+ return error;
}
/*
@@ -1683,7 +1797,8 @@ nextgap:
void
uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
- struct vm_map_entry **entry_list /* OUT */)
+ struct vm_map_entry **entry_list /* OUT */,
+ struct uvm_mapent_reservation *umr)
{
struct vm_map_entry *entry, *first_entry, *next;
vaddr_t len;
@@ -1702,7 +1817,7 @@ uvm_unmap_remove(struct vm_map *map, vad
if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
/* clip and go... */
entry = first_entry;
- UVM_MAP_CLIP_START(map, entry, start);
+ UVM_MAP_CLIP_START(map, entry, start, umr);
/* critical! prevents stale hint */
SAVE_HINT(map, entry, entry->prev);
} else {
@@ -1742,7 +1857,9 @@ uvm_unmap_remove(struct vm_map *map, vad
*/
while ((entry != &map->header) && (entry->start < end)) {
- UVM_MAP_CLIP_END(map, entry, end);
+ KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
+
+ UVM_MAP_CLIP_END(map, entry, end, umr);
next = entry->next;
len = entry->end - entry->start;
@@ -1763,8 +1880,11 @@ uvm_unmap_remove(struct vm_map *map, vad
* this is mostly used for kmem_map and mb_map.
*/
- uvm_km_pgremove_intrsafe(entry->start, entry->end);
- pmap_kremove(entry->start, len);
+ if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
+ uvm_km_pgremove_intrsafe(entry->start,
+ entry->end);
+ pmap_kremove(entry->start, len);
+ }
} else if (UVM_ET_ISOBJ(entry) &&
UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
KASSERT(vm_map_pmap(map) == pmap_kernel());
@@ -2160,7 +2280,7 @@ uvm_map_extract(struct vm_map *srcmap, v
* fudge is zero)
*/
- UVM_MAP_CLIP_START(srcmap, entry, start);
+ UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
SAVE_HINT(srcmap, srcmap->hint, entry->prev);
fudge = 0;
}
@@ -2189,7 +2309,7 @@ uvm_map_extract(struct vm_map *srcmap, v
/* if we are not doing a quick reference, clip it */
if ((flags & UVM_EXTRACT_QREF) == 0)
- UVM_MAP_CLIP_END(srcmap, entry, end);
+ UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
/* clear needs_copy (allow chunking) */
if (UVM_ET_ISNEEDSCOPY(entry)) {
@@ -2428,14 +2548,17 @@ uvm_map_submap(struct vm_map *map, vaddr
struct vm_map *submap)
{
struct vm_map_entry *entry;
+ struct uvm_mapent_reservation umr;
int error;
+ uvm_mapent_reserve(map, &umr, 2, 0);
+
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (uvm_map_lookup_entry(map, start, &entry)) {
- UVM_MAP_CLIP_START(map, entry, start);
- UVM_MAP_CLIP_END(map, entry, end); /* to be safe */
+ UVM_MAP_CLIP_START(map, entry, start, &umr);
+ UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
} else {
entry = NULL;
}
@@ -2453,6 +2576,9 @@ uvm_map_submap(struct vm_map *map, vaddr
error = EINVAL;
}
vm_map_unlock(map);
+
+ uvm_mapent_unreserve(map, &umr);
+
return error;
}
@@ -2480,7 +2606,7 @@ uvm_map_protect(struct vm_map *map, vadd
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
if (uvm_map_lookup_entry(map, start, &entry)) {
- UVM_MAP_CLIP_START(map, entry, start);
+ UVM_MAP_CLIP_START(map, entry, start, NULL);
} else {
entry = entry->next;
}
@@ -2524,7 +2650,7 @@ uvm_map_protect(struct vm_map *map, vadd
while ((current != &map->header) && (current->start < end)) {
vm_prot_t old_prot;
- UVM_MAP_CLIP_END(map, current, end);
+ UVM_MAP_CLIP_END(map, current, end, NULL);
old_prot = current->protection;
if (set_max)
current->protection =
@@ -2594,6 +2720,7 @@ uvm_map_protect(struct vm_map *map, vadd
out:
vm_map_unlock(map);
+
UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
return error;
}
@@ -2631,12 +2758,12 @@ uvm_map_inherit(struct vm_map *map, vadd
VM_MAP_RANGE_CHECK(map, start, end);
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
- UVM_MAP_CLIP_START(map, entry, start);
+ UVM_MAP_CLIP_START(map, entry, start, NULL);
} else {
entry = temp_entry->next;
}
while ((entry != &map->header) && (entry->start < end)) {
- UVM_MAP_CLIP_END(map, entry, end);
+ UVM_MAP_CLIP_END(map, entry, end, NULL);
entry->inheritance = new_inheritance;
entry = entry->next;
}
@@ -2663,7 +2790,7 @@ uvm_map_advice(struct vm_map *map, vaddr
VM_MAP_RANGE_CHECK(map, start, end);
if (uvm_map_lookup_entry(map, start, &temp_entry)) {
entry = temp_entry;
- UVM_MAP_CLIP_START(map, entry, start);
+ UVM_MAP_CLIP_START(map, entry, start, NULL);
} else {
entry = temp_entry->next;
}
@@ -2673,7 +2800,7 @@ uvm_map_advice(struct vm_map *map, vaddr
*/
while ((entry != &map->header) && (entry->start < end)) {
- UVM_MAP_CLIP_END(map, entry, end);
+ UVM_MAP_CLIP_END(map, entry, end, NULL);
switch (new_advice) {
case MADV_NORMAL:
@@ -2750,7 +2877,7 @@ uvm_map_pageable(struct vm_map *map, vad
*/
if (new_pageable) { /* unwire */
- UVM_MAP_CLIP_START(map, entry, start);
+ UVM_MAP_CLIP_START(map, entry, start, NULL);
/*
* unwiring. first ensure that the range to be unwired is
@@ -2778,7 +2905,7 @@ uvm_map_pageable(struct vm_map *map, vad
entry = start_entry;
while ((entry != &map->header) && (entry->start < end)) {
- UVM_MAP_CLIP_END(map, entry, end);
+ UVM_MAP_CLIP_END(map, entry, end, NULL);
if (VM_MAPENT_ISWIRED(entry))
uvm_map_entry_unwire(map, entry);
entry = entry->next;
@@ -2831,8 +2958,8 @@ uvm_map_pageable(struct vm_map *map, vad
}
}
}
- UVM_MAP_CLIP_START(map, entry, start);
- UVM_MAP_CLIP_END(map, entry, end);
+ UVM_MAP_CLIP_START(map, entry, start, NULL);
+ UVM_MAP_CLIP_END(map, entry, end, NULL);
entry->wired_count++;
/*
@@ -3598,7 +3725,7 @@ uvmspace_free(struct vmspace *vm)
#endif
if (map->nentries) {
uvm_unmap_remove(map, map->min_offset, map->max_offset,
- &dead_entries);
+ &dead_entries, NULL);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
}
@@ -3846,6 +3973,308 @@ uvmspace_fork(struct vmspace *vm1)
return (vm2);
}
+
+/*
+ * in-kernel map entry allocation.
+ */
+
+int ukh_alloc, ukh_free;
+int uke_alloc, uke_free;
+
+struct uvm_kmapent_hdr {
+ LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
+ int ukh_nused;
+ struct vm_map_entry *ukh_freelist;
+ struct vm_map *ukh_map;
+ struct vm_map_entry ukh_entries[];
+};
+
+#define UVM_KMAPENT_CHUNK \
+ ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
+ / sizeof(struct vm_map_entry))
+
+#define UVM_KHDR_FIND(entry) \
+ ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
+
+static __inline struct vm_map_entry *uvm_kmapent_get(struct uvm_kmapent_hdr *);
+static __inline void uvm_kmapent_put(struct uvm_kmapent_hdr *,
+ struct vm_map_entry *);
+
+static __inline struct vm_map *
+uvm_kmapent_map(struct vm_map_entry *entry)
+{
+ const struct uvm_kmapent_hdr *ukh;
+
+ ukh = UVM_KHDR_FIND(entry);
+ return ukh->ukh_map;
+}
+
+static __inline struct vm_map_entry *
+uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
+{
+ struct vm_map_entry *entry;
+
+ KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
+ KASSERT(ukh->ukh_nused >= 0);
+
+ entry = ukh->ukh_freelist;
+ if (entry) {
+ KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
+ == UVM_MAP_KERNEL);
+ ukh->ukh_freelist = entry->next;
+ ukh->ukh_nused++;
+ KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
+ } else {
+ KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
+ }
+
+ return entry;
+}
+
+static __inline void
+uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
+{
+
+ KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
+ == UVM_MAP_KERNEL);
+ KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
+ KASSERT(ukh->ukh_nused > 0);
+ KASSERT(ukh->ukh_freelist != NULL ||
+ ukh->ukh_nused == UVM_KMAPENT_CHUNK);
+ KASSERT(ukh->ukh_freelist == NULL ||
+ ukh->ukh_nused < UVM_KMAPENT_CHUNK);
+
+ ukh->ukh_nused--;
+ entry->next = ukh->ukh_freelist;
+ ukh->ukh_freelist = entry;
+}
+
+/*
+ * uvm_kmapent_alloc: allocate a map entry for in-kernel map
+ */
+
+static struct vm_map_entry *
+uvm_kmapent_alloc(struct vm_map *map, int flags)
+{
+ struct vm_page *pg;
+ struct uvm_map_args args;
+ struct uvm_kmapent_hdr *ukh;
+ struct vm_map_entry *entry;
+ uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
+ UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
+ vaddr_t va;
+ int error;
+ int i;
+ int s;
+
+ KDASSERT(UVM_KMAPENT_CHUNK > 2);
+ KDASSERT(kernel_map != NULL);
+ KASSERT(vm_map_pmap(map) == pmap_kernel());
+
+ uke_alloc++;
+ entry = NULL;
+again:
+ /*
+ * try to grab an entry from freelist.
+ */
+ s = splvm();
+ simple_lock(&uvm.kentry_lock);
+ ukh = LIST_FIRST(&map->kentry_free);
+ if (ukh) {
+ entry = uvm_kmapent_get(ukh);
+ if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
+ LIST_REMOVE(ukh, ukh_listq);
+ }
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+
+ if (entry)
+ return entry;
+
+ /*
+ * there's no free entry for this vm_map.
+ * now we need to allocate some vm_map_entry.
+ * for simplicity, always allocate one page chunk of them at once.
+ */
+
+ pg = uvm_pagealloc(NULL, 0, NULL, 0);
+ if (__predict_false(pg == NULL)) {
+ if (flags & UVM_FLAG_NOWAIT)
+ return NULL;
+ uvm_wait("kme_alloc");
+ goto again;
+ }
+
+ error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, 0, 0, mapflags, &args);
+ if (error) {
+ uvm_pagefree(pg);
+ return NULL;
+ }
+
+ va = args.uma_start;
+
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ pmap_update(vm_map_pmap(map));
+
+ ukh = (void *)va;
+
+ /*
+ * use the first entry for ukh itsself.
+ */
+
+ entry = &ukh->ukh_entries[0];
+ entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
+ error = uvm_map_enter(map, &args, entry);
+ KASSERT(error == 0);
+
+ ukh->ukh_nused = UVM_KMAPENT_CHUNK;
+ ukh->ukh_map = map;
+ ukh->ukh_freelist = NULL;
+ for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
+ struct vm_map_entry *entry = &ukh->ukh_entries[i];
+
+ entry->flags = UVM_MAP_KERNEL;
+ uvm_kmapent_put(ukh, entry);
+ }
+ KASSERT(ukh->ukh_nused == 2);
+
+ s = splvm();
+ simple_lock(&uvm.kentry_lock);
+ LIST_INSERT_HEAD(&map->kentry_free, ukh, ukh_listq);
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+
+ /*
+ * return second entry.
+ */
+
+ entry = &ukh->ukh_entries[1];
+ entry->flags = UVM_MAP_KERNEL;
+ ukh_alloc++;
+ return entry;
+}
+
+/*
+ * uvm_mapent_free: free map entry for in-kernel map
+ */
+
+static void
+uvm_kmapent_free(struct vm_map_entry *entry)
+{
+ struct uvm_kmapent_hdr *ukh;
+ struct vm_page *pg;
+ struct vm_map *map;
+ struct pmap *pmap;
+ vaddr_t va;
+ paddr_t pa;
+ struct vm_map_entry *deadentry;
+ int s;
+
+ uke_free++;
+ ukh = UVM_KHDR_FIND(entry);
+ map = ukh->ukh_map;
+
+ s = splvm();
+ simple_lock(&uvm.kentry_lock);
+ uvm_kmapent_put(ukh, entry);
+ if (ukh->ukh_nused > 1) {
+ if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
+ LIST_INSERT_HEAD(&map->kentry_free, ukh, ukh_listq);
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+ return;
+ }
+
+ /*
+ * now we can free this ukh.
+ *
+ * however, keep an empty ukh to avoid ping-pong.
+ */
+
+ if (LIST_FIRST(&map->kentry_free) == ukh &&
+ LIST_NEXT(ukh, ukh_listq) == NULL) {
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+ return;
+ }
+ LIST_REMOVE(ukh, ukh_listq);
+ simple_unlock(&uvm.kentry_lock);
+ splx(s);
+
+ KASSERT(ukh->ukh_nused == 1);
+
+ /*
+ * remove map entry for ukh itsself.
+ */
+
+ va = (vaddr_t)ukh;
+ KASSERT((va & PAGE_MASK) == 0);
+ uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL);
+ KASSERT(deadentry->flags & UVM_MAP_KERNEL);
+ KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
+ KASSERT(deadentry->next == NULL);
+ KASSERT(deadentry == &ukh->ukh_entries[0]);
+
+ /*
+ * unmap the page from pmap and free it.
+ */
+
+ pmap = vm_map_pmap(map);
+ KASSERT(pmap == pmap_kernel());
+ if (!pmap_extract(pmap, va, &pa))
+ panic("%s: no mapping", __func__);
+ pmap_kremove(va, PAGE_SIZE);
+ pg = PHYS_TO_VM_PAGE(pa);
+ uvm_pagefree(pg);
+ ukh_free++;
+}
+
+/*
+ * map entry reservation
+ */
+
+/*
+ * uvm_mapent_reserve: reserve map entries for clipping before locking map.
+ *
+ * => needed when unmapping entries allocated without UVM_FLAG_FIXEDSIZE.
+ * => caller shouldn't hold map locked.
+ */
+int
+uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
+ int nentries, int flags)
+{
+
+ umr->umr_nentries = 0;
+
+ if (!VM_MAP_USE_KMAPENT(map))
+ return 0;
+
+ while (nentries--) {
+ struct vm_map_entry *ent;
+ ent = uvm_kmapent_alloc(map, flags);
+ if (!ent) {
+ uvm_mapent_unreserve(map, umr);
+ return ENOMEM;
+ }
+ UMR_PUTENTRY(umr, ent);
+ }
+
+ return 0;
+}
+
+/*
+ * uvm_mapent_unreserve:
+ *
+ * => caller shouldn't hold map locked.
+ * => never fail or sleep.
+ */
+void
+uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
+{
+
+ while (!UMR_EMPTY(umr))
+ uvm_kmapent_free(UMR_GETENTRY(umr));
+}
#if defined(DDB)
Index: uvm/uvm_map.h
===================================================================
--- uvm/uvm_map.h (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:04 2004
@@ -86,8 +86,8 @@
* => map must be locked by caller
*/
-#define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
- if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
+#define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
+ if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
/*
* UVM_MAP_CLIP_END: ensure that the entry ends at or before
@@ -96,8 +96,8 @@
* => map must be locked by caller
*/
-#define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
- if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
+#define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
+ if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
/*
* extract flags
@@ -142,8 +142,11 @@ struct vm_map_entry {
#define uvm_map_entry_stop_copy flags
u_int8_t flags; /* flags */
-#define UVM_MAP_STATIC 0x01 /* static map entry */
-#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
+#define UVM_MAP_KERNEL 0x01 /* kernel map entry */
+#define UVM_MAP_KMAPENT 0x02 /* contains map entries */
+#define UVM_MAP_FIRST 0x04 /* the first special entry */
+#define UVM_MAP_FIXEDSIZE 0x08 /* allocated with
+ * UVM_FLAG_FIXEDSIZE */
#define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */
};
@@ -226,6 +229,11 @@ struct vm_map {
int flags; /* flags */
struct simplelock flags_lock; /* Lock for flags field */
unsigned int timestamp; /* Version number */
+ LIST_HEAD(, uvm_kmapent_hdr) kentry_free; /* Freelist of map entry */
+
+ struct vm_map_entry *merged_entries;/* Merged entries, kept for
+ * later splitting */
+
#define min_offset header.end
#define max_offset header.start
};
@@ -239,16 +247,29 @@ struct vm_map {
#define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
#define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
-/* XXX: number of kernel maps and entries to statically allocate */
+#ifdef _KERNEL
+struct uvm_mapent_reservation {
+ struct vm_map_entry *umr_entries[2];
+ int umr_nentries;
+};
+#define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0)
+#define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries])
+#define UMR_PUTENTRY(umr, ent) \
+ (umr)->umr_entries[(umr)->umr_nentries++] = (ent)
-#if !defined(MAX_KMAPENT)
-#if (50 + (2 * NPROC) > 1000)
-#define MAX_KMAPENT (50 + (2 * NPROC))
-#else
-#define MAX_KMAPENT 1000 /* XXXCDC: no crash */
-#endif
-#endif /* !defined MAX_KMAPENT */
+struct uvm_map_args {
+ struct vm_map_entry *uma_prev;
+ vaddr_t uma_start;
+ vsize_t uma_size;
+
+ struct uvm_object *uma_uobj;
+ voff_t uma_uoffset;
+
+ uvm_flag_t uma_flags;
+};
+#endif /* _KERNEL */
+
#ifdef _KERNEL
#define vm_map_modflags(map, set, clear) \
do { \
@@ -287,9 +308,9 @@ void uvm_map_deallocate(struct vm_map *
int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
- vaddr_t);
+ vaddr_t, struct uvm_mapent_reservation *);
void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
- vaddr_t);
+ vaddr_t, struct uvm_mapent_reservation *);
MAP_INLINE
struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
@@ -316,8 +337,20 @@ MAP_INLINE
void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
void uvm_unmap_detach(struct vm_map_entry *,int);
void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
- struct vm_map_entry **);
+ struct vm_map_entry **, struct uvm_mapent_reservation *);
+int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
+ struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
+ struct uvm_map_args *);
+int uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
+ struct vm_map_entry *);
+
+int uvm_mapent_reserve(struct vm_map *,
+ struct uvm_mapent_reservation *, int, int);
+void uvm_mapent_unreserve(struct vm_map *,
+ struct uvm_mapent_reservation *);
+
+
#endif /* _KERNEL */
/*
@@ -464,6 +497,7 @@ do { \
if (oflags & VM_MAP_WANTLOCK) \
wakeup(&(map)->flags); \
} while (/*CONSTCOND*/ 0)
+
#endif /* _KERNEL */
/*
Index: uvm/uvm_amap.c
===================================================================
--- uvm/uvm_amap.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:04 2004
@@ -741,10 +741,10 @@ amap_copy(map, entry, waitf, canchunk, s
UVMHIST_LOG(maphist, " chunk amap ==> clip 0x%x->0x%x"
"to 0x%x->0x%x", entry->start, entry->end, startva,
endva);
- UVM_MAP_CLIP_START(map, entry, startva);
+ UVM_MAP_CLIP_START(map, entry, startva, NULL);
/* watch out for endva wrap-around! */
if (endva >= startva)
- UVM_MAP_CLIP_END(map, entry, endva);
+ UVM_MAP_CLIP_END(map, entry, endva, NULL);
}
UVMHIST_LOG(maphist, "<- done [creating new amap 0x%x->0x%x]",
Index: uvm/uvm.h
===================================================================
--- uvm/uvm.h (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:04 2004
@@ -110,8 +110,6 @@ struct uvm {
struct vm_anon *afree; /* anon free list */
struct simplelock afreelock; /* lock on anon free list */
- /* static kernel map entry pool */
- struct vm_map_entry *kentry_free; /* free page pool */
struct simplelock kentry_lock;
/* aio_done is locked by uvm.pagedaemon_lock and splbio! */
Index: uvm/uvm_map_i.h
===================================================================
--- uvm/uvm_map_i.h (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:04 2004
@@ -120,6 +120,8 @@ uvm_map_setup(struct vm_map *map, vaddr_
simple_lock_init(&map->ref_lock);
simple_lock_init(&map->hint_lock);
simple_lock_init(&map->flags_lock);
+ LIST_INIT(&map->kentry_free);
+ map->merged_entries = NULL;
}
@@ -138,6 +140,7 @@ MAP_INLINE void
uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
{
struct vm_map_entry *dead_entries;
+ struct uvm_mapent_reservation umr;
UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
@@ -146,9 +149,11 @@ uvm_unmap(struct vm_map *map, vaddr_t st
* work now done by helper functions. wipe the pmap's and then
* detach from the dead entries...
*/
+ uvm_mapent_reserve(map, &umr, 2, 0);
vm_map_lock(map);
- uvm_unmap_remove(map, start, end, &dead_entries);
+ uvm_unmap_remove(map, start, end, &dead_entries, &umr);
vm_map_unlock(map);
+ uvm_mapent_unreserve(map, &umr);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
Index: uvm/uvm_mmap.c
===================================================================
--- uvm/uvm_mmap.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:04 2004
@@ -686,7 +686,7 @@ sys_munmap(l, v, retval)
return (EINVAL);
}
#endif
- uvm_unmap_remove(map, addr, addr + size, &dead_entries);
+ uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL);
vm_map_unlock(map);
if (dead_entries != NULL)
uvm_unmap_detach(dead_entries, 0);
Index: uvm/uvm_pager.c
===================================================================
--- uvm/uvm_pager.c (revision 968)
+++ /tmp/tmp.2 Sat Dec 4 17:18:05 2004
@@ -224,7 +224,7 @@ uvm_pagermapout(kva, npages)
}
vm_map_lock(pager_map);
- uvm_unmap_remove(pager_map, kva, kva + size, &entries);
+ uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL);
simple_lock(&pager_map_wanted_lock);
if (pager_map_wanted) {
pager_map_wanted = FALSE;
--NextPart-20041204171856-0539600
Content-Type: Text/Plain; charset=us-ascii
Content-Disposition: attachment; filename="i386.diff"
Index: arch/i386/i386/pmap.c
===================================================================
--- arch/i386/i386/pmap.c (working copy)
+++ arch/i386/i386/pmap.c Thu Dec 2 17:53:30 2004
@@ -1492,7 +1492,7 @@ pmap_free_pvpage()
/* unmap the page */
dead_entries = NULL;
uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
- &dead_entries);
+ &dead_entries, NULL);
vm_map_unlock(map);
if (dead_entries != NULL)
--NextPart-20041204171856-0539600--