Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Merge from yamt-pagecache: use radixtree for page lookup.
details: https://anonhg.NetBSD.org/src/rev/7f65f7002980
branches: trunk
changeset: 466188:7f65f7002980
user: ad <ad%NetBSD.org@localhost>
date: Sat Dec 14 17:28:58 2019 +0000
description:
Merge from yamt-pagecache: use radixtree for page lookup.
rbtree page lookup was introduced during the NetBSD 5.0 development cycle to
bypass lock contention problems with the (then) global page hash, and was a
temporary solution to allow us to make progress. radixtree is the intended
replacement.
Ok yamt@.
diffstat:
sys/rump/librump/rumpkern/vm.c | 47 ++++++--------------
sys/uvm/uvm_km.c | 35 ++++++++++----
sys/uvm/uvm_object.c | 12 ++--
sys/uvm/uvm_object.h | 12 ++---
sys/uvm/uvm_page.c | 93 +++++++++++++++--------------------------
5 files changed, 84 insertions(+), 115 deletions(-)
diffs (truncated from 461 to 300 lines):
diff -r b28be7d5ca3f -r 7f65f7002980 sys/rump/librump/rumpkern/vm.c
--- a/sys/rump/librump/rumpkern/vm.c Sat Dec 14 17:24:43 2019 +0000
+++ b/sys/rump/librump/rumpkern/vm.c Sat Dec 14 17:28:58 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vm.c,v 1.174 2019/12/13 20:10:21 ad Exp $ */
+/* $NetBSD: vm.c,v 1.175 2019/12/14 17:28:58 ad Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.174 2019/12/13 20:10:21 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.175 2019/12/14 17:28:58 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@@ -52,6 +52,7 @@
#include <sys/mman.h>
#include <sys/null.h>
#include <sys/vnode.h>
+#include <sys/radixtree.h>
#include <machine/pmap.h>
@@ -125,34 +126,6 @@
static struct pglist vmpage_lruqueue;
static unsigned vmpage_onqueue;
-static int
-pg_compare_key(void *ctx, const void *n, const void *key)
-{
- voff_t a = ((const struct vm_page *)n)->offset;
- voff_t b = *(const voff_t *)key;
-
- if (a < b)
- return -1;
- else if (a > b)
- return 1;
- else
- return 0;
-}
-
-static int
-pg_compare_nodes(void *ctx, const void *n1, const void *n2)
-{
-
- return pg_compare_key(ctx, n1, &((const struct vm_page *)n2)->offset);
-}
-
-const rb_tree_ops_t uvm_page_tree_ops = {
- .rbto_compare_nodes = pg_compare_nodes,
- .rbto_compare_key = pg_compare_key,
- .rbto_node_offset = offsetof(struct vm_page, rb_node),
- .rbto_context = NULL
-};
-
/*
* vm pages
*/
@@ -204,7 +177,11 @@
}
TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
- (void)rb_tree_insert_node(&uobj->rb_tree, pg);
+ if (radix_tree_insert_node(&uobj->uo_pages, off >> PAGE_SHIFT,
+ pg) != 0) {
+ pool_cache_put(&pagecache, pg);
+ return NULL;
+ }
/*
* Don't put anons on the LRU page queue. We can't flush them
@@ -232,6 +209,7 @@
uvm_pagefree(struct vm_page *pg)
{
struct uvm_object *uobj = pg->uobject;
+ struct vm_page *pg2 __unused;
KASSERT(mutex_owned(uobj->vmobjlock));
@@ -241,7 +219,8 @@
TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
uobj->uo_npages--;
- rb_tree_remove_node(&uobj->rb_tree, pg);
+ pg2 = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
+ KASSERT(pg == pg2);
if (!UVM_OBJ_IS_AOBJ(uobj)) {
mutex_enter(&vmpage_lruqueue_lock);
@@ -396,6 +375,8 @@
pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
"page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
+ radix_tree_init();
+
/* create vmspace used by local clients */
rump_vmspace_local = kmem_zalloc(sizeof(*rump_vmspace_local), KM_SLEEP);
uvmspace_init(rump_vmspace_local, &rump_pmap_local, 0, 0, false);
@@ -618,7 +599,7 @@
struct vm_page *pg;
bool ispagedaemon = curlwp == uvm.pagedaemon_lwp;
- pg = rb_tree_find_node(&uobj->rb_tree, &off);
+ pg = radix_tree_lookup_node(&uobj->uo_pages, off >> PAGE_SHIFT);
if (pg && !UVM_OBJ_IS_AOBJ(pg->uobject) && !ispagedaemon) {
mutex_enter(&vmpage_lruqueue_lock);
TAILQ_REMOVE(&vmpage_lruqueue, pg, pageq.queue);
diff -r b28be7d5ca3f -r 7f65f7002980 sys/uvm/uvm_km.c
--- a/sys/uvm/uvm_km.c Sat Dec 14 17:24:43 2019 +0000
+++ b/sys/uvm/uvm_km.c Sat Dec 14 17:28:58 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_km.c,v 1.151 2019/12/13 20:10:22 ad Exp $ */
+/* $NetBSD: uvm_km.c,v 1.152 2019/12/14 17:28:58 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,7 +152,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.151 2019/12/13 20:10:22 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.152 2019/12/14 17:28:58 ad Exp $");
#include "opt_uvmhist.h"
@@ -545,9 +545,7 @@
void
uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
{
- struct vm_page *pg;
vaddr_t va;
- paddr_t pa;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KDASSERT(VM_MAP_IS_KERNEL(map));
@@ -556,17 +554,32 @@
KDASSERT(end <= vm_map_max(map));
for (va = start; va < end; va += PAGE_SIZE) {
+ paddr_t pa;
+
if (pmap_extract(pmap_kernel(), va, &pa)) {
panic("uvm_km_check_empty: va %p has pa 0x%llx",
(void *)va, (long long)pa);
}
- mutex_enter(uvm_kernel_object->vmobjlock);
- pg = uvm_pagelookup(uvm_kernel_object,
- va - vm_map_min(kernel_map));
- mutex_exit(uvm_kernel_object->vmobjlock);
- if (pg) {
- panic("uvm_km_check_empty: "
- "has page hashed at %p", (const void *)va);
+ /*
+ * kernel_object should not have pages for the corresponding
+ * region. check it.
+ *
+ * why trylock? because:
+ * - caller might not want to block.
+ * - we can recurse when allocating radix_node for
+ * kernel_object.
+ */
+ if (mutex_tryenter(uvm_kernel_object->vmobjlock)) {
+ struct vm_page *pg;
+
+ pg = uvm_pagelookup(uvm_kernel_object,
+ va - vm_map_min(kernel_map));
+ mutex_exit(uvm_kernel_object->vmobjlock);
+ if (pg) {
+ panic("uvm_km_check_empty: "
+ "has page hashed at %p",
+ (const void *)va);
+ }
}
}
}
diff -r b28be7d5ca3f -r 7f65f7002980 sys/uvm/uvm_object.c
--- a/sys/uvm/uvm_object.c Sat Dec 14 17:24:43 2019 +0000
+++ b/sys/uvm/uvm_object.c Sat Dec 14 17:28:58 2019 +0000
@@ -1,7 +1,7 @@
-/* $NetBSD: uvm_object.c,v 1.16 2019/12/13 20:10:22 ad Exp $ */
+/* $NetBSD: uvm_object.c,v 1.17 2019/12/14 17:28:58 ad Exp $ */
/*
- * Copyright (c) 2006, 2010 The NetBSD Foundation, Inc.
+ * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.16 2019/12/13 20:10:22 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.17 2019/12/14 17:28:58 ad Exp $");
#ifdef _KERNEL_OPT
#include "opt_ddb.h"
@@ -46,7 +46,6 @@
#include <sys/param.h>
#include <sys/mutex.h>
#include <sys/queue.h>
-#include <sys/rbtree.h>
#include <uvm/uvm.h>
#include <uvm/uvm_ddb.h>
@@ -77,7 +76,7 @@
LIST_INIT(&uo->uo_ubc);
uo->uo_npages = 0;
uo->uo_refs = refs;
- rb_tree_init(&uo->rb_tree, &uvm_page_tree_ops);
+ radix_tree_init_tree(&uo->uo_pages);
}
/*
@@ -87,7 +86,7 @@
uvm_obj_destroy(struct uvm_object *uo, bool dlock)
{
- KASSERT(rb_tree_iterate(&uo->rb_tree, NULL, RB_DIR_LEFT) == NULL);
+ KASSERT(radix_tree_empty_tree_p(&uo->uo_pages));
/* Purge any UBC entries associated with this object. */
ubc_purge(uo);
@@ -96,6 +95,7 @@
if (dlock) {
mutex_obj_free(uo->vmobjlock);
}
+ radix_tree_fini_tree(&uo->uo_pages);
}
/*
diff -r b28be7d5ca3f -r 7f65f7002980 sys/uvm/uvm_object.h
--- a/sys/uvm/uvm_object.h Sat Dec 14 17:24:43 2019 +0000
+++ b/sys/uvm/uvm_object.h Sat Dec 14 17:28:58 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_object.h,v 1.33 2012/09/14 22:20:50 rmind Exp $ */
+/* $NetBSD: uvm_object.h,v 1.34 2019/12/14 17:28:58 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -31,7 +31,7 @@
#define _UVM_UVM_OBJECT_H_
#include <sys/queue.h>
-#include <sys/rbtree.h>
+#include <sys/radixtree.h>
#include <uvm/uvm_pglist.h>
/*
@@ -54,12 +54,12 @@
*/
struct uvm_object {
- kmutex_t * vmobjlock; /* lock on memq */
+ kmutex_t * vmobjlock; /* lock on object */
const struct uvm_pagerops *pgops; /* pager ops */
struct pglist memq; /* pages in this object */
- int uo_npages; /* # of pages in memq */
+ int uo_npages; /* # of pages in uo_pages */
unsigned uo_refs; /* reference count */
- struct rb_tree rb_tree; /* tree of pages */
+ struct radix_tree uo_pages; /* tree of pages */
LIST_HEAD(,ubc_map) uo_ubc; /* ubc mappings */
};
@@ -112,8 +112,6 @@
#define UVM_OBJ_IS_AOBJ(uobj) \
((uobj)->pgops == &aobj_pager)
-extern const rb_tree_ops_t uvm_page_tree_ops;
-
#endif /* _KERNEL */
#endif /* _UVM_UVM_OBJECT_H_ */
diff -r b28be7d5ca3f -r 7f65f7002980 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Sat Dec 14 17:24:43 2019 +0000
+++ b/sys/uvm/uvm_page.c Sat Dec 14 17:28:58 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.201 2019/12/13 20:10:22 ad Exp $ */
+/* $NetBSD: uvm_page.c,v 1.202 2019/12/14 17:28:58 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.201 2019/12/13 20:10:22 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.202 2019/12/14 17:28:58 ad Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -79,6 +79,7 @@
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/proc.h>
+#include <sys/radixtree.h>
Home |
Main Index |
Thread Index |
Old Index