Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys revert previous commit not yet fully functional, sorry
details: https://anonhg.NetBSD.org/src/rev/18920b990719
branches: trunk
changeset: 784346:18920b990719
user: para <para%NetBSD.org@localhost>
date: Sat Jan 26 15:18:00 2013 +0000
description:
revert previous commit not yet fully functional, sorry
diffstat:
sys/kern/subr_vmem.c | 236 ++++++++++++++++++++++++++++++----------
sys/rump/librump/rumpkern/vm.c | 7 +-
sys/sys/vmem.h | 8 +-
sys/uvm/uvm_km.c | 15 +-
4 files changed, 190 insertions(+), 76 deletions(-)
diffs (truncated from 556 to 300 lines):
diff -r a87f345b846c -r 18920b990719 sys/kern/subr_vmem.c
--- a/sys/kern/subr_vmem.c Sat Jan 26 13:50:33 2013 +0000
+++ b/sys/kern/subr_vmem.c Sat Jan 26 15:18:00 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $ */
+/* $NetBSD: subr_vmem.c,v 1.79 2013/01/26 15:18:00 para Exp $ */
/*-
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.78 2013/01/26 13:50:33 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.79 2013/01/26 15:18:00 para Exp $");
#if defined(_KERNEL)
#include "opt_ddb.h"
@@ -53,7 +53,6 @@
#include <sys/kmem.h>
#include <sys/pool.h>
#include <sys/vmem.h>
-#include <sys/vmem_impl.h>
#include <sys/workqueue.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
@@ -62,13 +61,7 @@
#include <uvm/uvm_page.h>
#include <uvm/uvm_pdaemon.h>
#else /* defined(_KERNEL) */
-#include <stdio.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
#include "../sys/vmem.h"
-#include "../sys/vmem_impl.h"
#endif /* defined(_KERNEL) */
@@ -85,23 +78,28 @@
VMEM_EVCNT_DEFINE(bt_count)
VMEM_EVCNT_DEFINE(bt_inuse)
-#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
-#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
-#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
-#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
+#define LOCK_DECL(name) \
+ kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
+
+#define CONDVAR_DECL(name) \
+ kcondvar_t name
#else /* defined(_KERNEL) */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
#define VMEM_EVCNT_INCR(ev) /* nothing */
#define VMEM_EVCNT_DECR(ev) /* nothing */
-#define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
-#define VMEM_CONDVAR_DESTROY(vm) /* nothing */
-#define VMEM_CONDVAR_WAIT(vm) /* nothing */
-#define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
-
#define UNITTEST
#define KASSERT(a) assert(a)
+#define LOCK_DECL(name) /* nothing */
+#define CONDVAR_DECL(name) /* nothing */
+#define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */
+#define VMEM_CONDVAR_BROADCAST(vm) /* nothing */
#define mutex_init(a, b, c) /* nothing */
#define mutex_destroy(a) /* nothing */
#define mutex_enter(a) /* nothing */
@@ -112,25 +110,74 @@
#define panic(...) printf(__VA_ARGS__); abort()
#endif /* defined(_KERNEL) */
+struct vmem;
+struct vmem_btag;
+
#if defined(VMEM_SANITY)
static void vmem_check(vmem_t *);
#else /* defined(VMEM_SANITY) */
#define vmem_check(vm) /* nothing */
#endif /* defined(VMEM_SANITY) */
+#define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT)
+
#define VMEM_HASHSIZE_MIN 1 /* XXX */
#define VMEM_HASHSIZE_MAX 65536 /* XXX */
#define VMEM_HASHSIZE_INIT 1
#define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT)
-#if defined(_KERNEL)
-static bool vmem_bootstrapped = false;
-static kmutex_t vmem_list_lock;
-static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
-#endif /* defined(_KERNEL) */
+CIRCLEQ_HEAD(vmem_seglist, vmem_btag);
+LIST_HEAD(vmem_freelist, vmem_btag);
+LIST_HEAD(vmem_hashlist, vmem_btag);
+
+#if defined(QCACHE)
+#define VMEM_QCACHE_IDX_MAX 32
+
+#define QC_NAME_MAX 16
+
+struct qcache {
+ pool_cache_t qc_cache;
+ vmem_t *qc_vmem;
+ char qc_name[QC_NAME_MAX];
+};
+typedef struct qcache qcache_t;
+#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
+#endif /* defined(QCACHE) */
+
+#define VMEM_NAME_MAX 16
-/* ---- misc */
+/* vmem arena */
+struct vmem {
+ CONDVAR_DECL(vm_cv);
+ LOCK_DECL(vm_lock);
+ vm_flag_t vm_flags;
+ vmem_import_t *vm_importfn;
+ vmem_release_t *vm_releasefn;
+ size_t vm_nfreetags;
+ LIST_HEAD(, vmem_btag) vm_freetags;
+ void *vm_arg;
+ struct vmem_seglist vm_seglist;
+ struct vmem_freelist vm_freelist[VMEM_MAXORDER];
+ size_t vm_hashsize;
+ size_t vm_nbusytag;
+ struct vmem_hashlist *vm_hashlist;
+ struct vmem_hashlist vm_hash0;
+ size_t vm_quantum_mask;
+ int vm_quantum_shift;
+ size_t vm_size;
+ size_t vm_inuse;
+ char vm_name[VMEM_NAME_MAX+1];
+ LIST_ENTRY(vmem) vm_alllist;
+
+#if defined(QCACHE)
+ /* quantum cache */
+ size_t vm_qcache_max;
+ struct pool_allocator vm_qcache_allocator;
+ qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX];
+ qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX];
+#endif /* defined(QCACHE) */
+};
#define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock)
#define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock)
@@ -139,6 +186,44 @@
#define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock)
#define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock))
+#if defined(_KERNEL)
+#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
+#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
+#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
+#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
+#endif /* defined(_KERNEL) */
+
+/* boundary tag */
+struct vmem_btag {
+ CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
+ union {
+ LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
+ LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
+ } bt_u;
+#define bt_hashlist bt_u.u_hashlist
+#define bt_freelist bt_u.u_freelist
+ vmem_addr_t bt_start;
+ vmem_size_t bt_size;
+ int bt_type;
+};
+
+#define BT_TYPE_SPAN 1
+#define BT_TYPE_SPAN_STATIC 2
+#define BT_TYPE_FREE 3
+#define BT_TYPE_BUSY 4
+#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
+
+#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
+
+typedef struct vmem_btag bt_t;
+
+#if defined(_KERNEL)
+static kmutex_t vmem_list_lock;
+static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
+#endif /* defined(_KERNEL) */
+
+/* ---- misc */
+
#define VMEM_ALIGNUP(addr, align) \
(-(-(addr) & -(align)))
@@ -156,26 +241,36 @@
#else /* defined(_KERNEL) */
#define xmalloc(sz, flags) \
- kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
-#define xfree(p, sz) kmem_free(p, sz);
+ kmem_intr_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
+#define xfree(p, sz) kmem_intr_free(p, sz);
/*
+ * Memory for arenas initialized during bootstrap.
+ * There is memory for STATIC_VMEM_COUNT bootstrap arenas.
+ *
* BT_RESERVE calculation:
* we allocate memory for boundry tags with vmem, therefor we have
* to keep a reserve of bts used to allocated memory for bts.
* This reserve is 4 for each arena involved in allocating vmems memory.
* BT_MAXFREE: don't cache excessive counts of bts in arenas
*/
+#define STATIC_VMEM_COUNT 4
#define STATIC_BT_COUNT 200
#define BT_MINRESERVE 4
#define BT_MAXFREE 64
+/* must be equal or greater then qcache multiplier for kmem_va_arena */
+#define STATIC_QC_POOL_COUNT 8
+
+static struct vmem static_vmems[STATIC_VMEM_COUNT];
+static int static_vmem_count = STATIC_VMEM_COUNT;
static struct vmem_btag static_bts[STATIC_BT_COUNT];
static int static_bt_count = STATIC_BT_COUNT;
-static struct vmem kmem_va_meta_arena_store;
+static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
+static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
+
vmem_t *kmem_va_meta_arena;
-static struct vmem kmem_meta_arena_store;
vmem_t *kmem_meta_arena;
static kmutex_t vmem_refill_lock;
@@ -557,17 +652,30 @@
snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
vm->vm_name, size);
- pc = pool_cache_init(size,
- ORDER2SIZE(vm->vm_quantum_shift), 0,
- PR_NOALIGN | PR_NOTOUCH /* XXX */,
- qc->qc_name, pa, ipl, NULL, NULL, NULL);
-
+ if (vm->vm_flags & VM_BOOTSTRAP) {
+ KASSERT(static_qc_pool_count > 0);
+ pc = &static_qc_pools[--static_qc_pool_count];
+ pool_cache_bootstrap(pc, size,
+ ORDER2SIZE(vm->vm_quantum_shift), 0,
+ PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
+ qc->qc_name, pa, ipl, NULL, NULL, NULL);
+ } else {
+ pc = pool_cache_init(size,
+ ORDER2SIZE(vm->vm_quantum_shift), 0,
+ PR_NOALIGN | PR_NOTOUCH /* XXX */,
+ qc->qc_name, pa, ipl, NULL, NULL, NULL);
+ }
qc->qc_cache = pc;
KASSERT(qc->qc_cache != NULL); /* XXX */
if (prevqc != NULL &&
qc->qc_cache->pc_pool.pr_itemsperpage ==
prevqc->qc_cache->pc_pool.pr_itemsperpage) {
- pool_cache_destroy(qc->qc_cache);
+ if (vm->vm_flags & VM_BOOTSTRAP) {
+ pool_cache_bootstrap_destroy(pc);
+ //static_qc_pool_count++;
+ } else {
+ pool_cache_destroy(qc->qc_cache);
+ }
vm->vm_qcache[i - 1] = prevqc;
continue;
}
@@ -592,14 +700,18 @@
if (prevqc == qc) {
continue;
}
- pool_cache_destroy(qc->qc_cache);
+ if (vm->vm_flags & VM_BOOTSTRAP) {
+ pool_cache_bootstrap_destroy(qc->qc_cache);
+ } else {
+ pool_cache_destroy(qc->qc_cache);
+ }
prevqc = qc;
}
}
#endif
#if defined(_KERNEL)
-static void
+void
Home |
Main Index |
Thread Index |
Old Index