Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys in the case of !PMAP_MAP_POOLPAGE, gather pool backend a...
details: https://anonhg.NetBSD.org/src/rev/f19bf4adb40a
branches: trunk
changeset: 572390:f19bf4adb40a
user: yamt <yamt%NetBSD.org@localhost>
date: Sat Jan 01 21:08:02 2005 +0000
description:
in the case of !PMAP_MAP_POOLPAGE, gather pool backend allocations to
large chunks for kernel_map and kmem_map to ease kva fragmentation.
diffstat:
sys/kern/kern_malloc.c | 5 +-
sys/kern/subr_pool.c | 47 ++++++---
sys/uvm/uvm_extern.h | 7 +-
sys/uvm/uvm_km.c | 241 +++++++++++++++++++++++++++++++++++++++++++-----
sys/uvm/uvm_map.h | 9 +-
5 files changed, 262 insertions(+), 47 deletions(-)
diffs (truncated from 506 to 300 lines):
diff -r cd29ff8ce4ca -r f19bf4adb40a sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c Sat Jan 01 21:04:39 2005 +0000
+++ b/sys/kern/kern_malloc.c Sat Jan 01 21:08:02 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_malloc.c,v 1.92 2005/01/01 21:02:13 yamt Exp $ */
+/* $NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $ */
/*
* Copyright (c) 1987, 1991, 1993
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.92 2005/01/01 21:02:13 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.93 2005/01/01 21:08:02 yamt Exp $");
#include "opt_lockdebug.h"
@@ -857,6 +857,7 @@
kmem_map = uvm_km_suballoc(kernel_map, &kmb,
&kml, (vsize_t)(nkmempages << PAGE_SHIFT),
VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
+ uvm_km_vacache_init(kmem_map, "kvakmem", 0);
kmembase = (char *)kmb;
kmemlimit = (char *)kml;
#ifdef KMEMSTATS
diff -r cd29ff8ce4ca -r f19bf4adb40a sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c Sat Jan 01 21:04:39 2005 +0000
+++ b/sys/kern/subr_pool.c Sat Jan 01 21:08:02 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $ */
+/* $NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.98 2005/01/01 21:08:02 yamt Exp $");
#include "opt_pool.h"
#include "opt_poollog.h"
@@ -82,6 +82,14 @@
static struct pool psppool;
#endif
+static void *pool_page_alloc_meta(struct pool *, int);
+static void pool_page_free_meta(struct pool *, void *);
+
+/* allocator for pool metadata */
+static struct pool_allocator pool_allocator_meta = {
+ pool_page_alloc_meta, pool_page_free_meta
+};
+
/* # of seconds to retain page after last use */
int pool_inactive_time = 10;
@@ -634,13 +642,7 @@
* XXX LOCKING.
*/
if (phpool[0].pr_size == 0) {
- struct pool_allocator *pa;
int idx;
-#ifdef POOL_SUBPAGE
- pa = &pool_allocator_kmem;
-#else
- pa = NULL;
-#endif
for (idx = 0; idx < PHPOOL_MAX; idx++) {
static char phpool_names[PHPOOL_MAX][6+1+6+1];
int nelem;
@@ -655,14 +657,14 @@
+ nelem * sizeof(uint16_t);
}
pool_init(&phpool[idx], sz, 0, 0, 0,
- phpool_names[idx], pa);
+ phpool_names[idx], &pool_allocator_meta);
}
#ifdef POOL_SUBPAGE
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
- PR_RECURSIVE, "psppool", &pool_allocator_kmem);
+ PR_RECURSIVE, "psppool", &pool_allocator_meta);
#endif
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
- 0, "pcgpool", NULL);
+ 0, "pcgpool", &pool_allocator_meta);
}
/* Insert into the list of all pools. */
@@ -2240,14 +2242,29 @@
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
- return ((void *) uvm_km_alloc_poolpage(waitok));
+ return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, NULL, waitok));
}
void
pool_page_free(struct pool *pp, void *v)
{
- uvm_km_free_poolpage((vaddr_t) v);
+ uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
+}
+
+static void *
+pool_page_alloc_meta(struct pool *pp, int flags)
+{
+ boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
+
+ return ((void *) uvm_km_alloc_poolpage1(kmem_map, NULL, waitok));
+}
+
+static void
+pool_page_free_meta(struct pool *pp, void *v)
+{
+
+ uvm_km_free_poolpage1(kmem_map, (vaddr_t) v);
}
#ifdef POOL_SUBPAGE
@@ -2292,7 +2309,7 @@
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
- return ((void *) uvm_km_alloc_poolpage1(kernel_map,
+ return ((void *) uvm_km_alloc_poolpage_cache(kernel_map,
uvm.kernel_object, waitok));
}
@@ -2300,6 +2317,6 @@
pool_page_free_nointr(struct pool *pp, void *v)
{
- uvm_km_free_poolpage1(kernel_map, (vaddr_t) v);
+ uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
}
#endif /* POOL_SUBPAGE */
diff -r cd29ff8ce4ca -r f19bf4adb40a sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Sat Jan 01 21:04:39 2005 +0000
+++ b/sys/uvm/uvm_extern.h Sat Jan 01 21:08:02 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.95 2005/01/01 21:02:13 yamt Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.96 2005/01/01 21:08:02 yamt Exp $ */
/*
*
@@ -609,6 +609,11 @@
vaddr_t uvm_km_alloc_poolpage1(struct vm_map *,
struct uvm_object *, boolean_t);
void uvm_km_free_poolpage1(struct vm_map *, vaddr_t);
+vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *,
+ struct uvm_object *, boolean_t);
+void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t);
+void uvm_km_vacache_init(struct vm_map *,
+ const char *, size_t);
extern __inline__ vaddr_t
uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t sz, int flags)
diff -r cd29ff8ce4ca -r f19bf4adb40a sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Sat Jan 01 21:04:39 2005 +0000
+++ b/sys/uvm/uvm_km.c Sat Jan 01 21:08:02 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $ */
+/* $NetBSD: uvm_km.c,v 1.72 2005/01/01 21:08:02 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -134,7 +134,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.72 2005/01/01 21:08:02 yamt Exp $");
#include "opt_uvmhist.h"
@@ -142,6 +142,7 @@
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/pool.h>
#include <uvm/uvm.h>
@@ -158,6 +159,120 @@
static struct vm_map_kernel kernel_map_store;
static struct vm_map_entry kernel_first_mapent_store;
+#if !defined(PMAP_MAP_POOLPAGE)
+
+/*
+ * kva cache
+ *
+ * XXX maybe it's better to do this at the uvm_map layer.
+ */
+
+#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
+
+static void *km_vacache_alloc(struct pool *, int);
+static void km_vacache_free(struct pool *, void *);
+static void km_vacache_init(struct vm_map *, const char *, size_t);
+
+/* XXX */
+#define KM_VACACHE_POOL_TO_MAP(pp) \
+ ((struct vm_map *)((char *)(pp) - \
+ offsetof(struct vm_map_kernel, vmk_vacache)))
+
+static void *
+km_vacache_alloc(struct pool *pp, int flags)
+{
+ vaddr_t va;
+ size_t size;
+ struct vm_map *map;
+#if defined(DEBUG)
+ vaddr_t loopva;
+#endif
+ size = pp->pr_alloc->pa_pagesz;
+
+ map = KM_VACACHE_POOL_TO_MAP(pp);
+
+ if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
+ ((flags & PR_WAITOK) ? 0 : UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
+ return NULL;
+
+#if defined(DEBUG)
+ for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
+ if (pmap_extract(pmap_kernel(), loopva, NULL))
+ panic("km_vacache_free: has mapping");
+ }
+#endif
+
+ return (void *)va;
+}
+
+static void
+km_vacache_free(struct pool *pp, void *v)
+{
+ vaddr_t va = (vaddr_t)v;
+ size_t size = pp->pr_alloc->pa_pagesz;
+ struct vm_map *map;
+#if defined(DEBUG)
+ vaddr_t loopva;
+
+ for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
+ if (pmap_extract(pmap_kernel(), loopva, NULL))
+ panic("km_vacache_free: has mapping");
+ }
+#endif
+ map = KM_VACACHE_POOL_TO_MAP(pp);
+ uvm_unmap(map, va, va + size);
+}
+
+/*
+ * km_vacache_init: initialize kva cache.
+ */
+
+static void
+km_vacache_init(struct vm_map *map, const char *name, size_t size)
+{
+ struct vm_map_kernel *vmk;
+ struct pool *pp;
+ struct pool_allocator *pa;
+
+ KASSERT(VM_MAP_IS_KERNEL(map));
+ KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
+
+ vmk = vm_map_to_kernel(map);
+ pp = &vmk->vmk_vacache;
+ pa = &vmk->vmk_vacache_allocator;
+ memset(pa, 0, sizeof(*pa));
+ pa->pa_alloc = km_vacache_alloc;
+ pa->pa_free = km_vacache_free;
+ pa->pa_pagesz = (unsigned int)size;
+ pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
+
+ /* XXX for now.. */
+ pool_sethiwat(pp, 0);
+}
+
+void
+uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
+{
+
+ map->flags |= VM_MAP_VACACHE;
+ if (size == 0)
+ size = KM_VACACHE_SIZE;
+ km_vacache_init(map, name, size);
+}
+
+#else /* !defined(PMAP_MAP_POOLPAGE) */
+
+void
+uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
+{
+
Home |
Main Index |
Thread Index |
Old Index