Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src an implementation of radix tree. the idea from linux.
details: https://anonhg.NetBSD.org/src/rev/465eedfdbddb
branches: trunk
changeset: 762546:465eedfdbddb
user: yamt <yamt%NetBSD.org@localhost>
date: Tue Feb 22 21:31:15 2011 +0000
description:
an implementation of radix tree. the idea from linux.
diffstat:
common/lib/libc/gen/radixtree.c | 1122 +++++++++++++++++++++++++++++++++++++++
sys/sys/radixtree.h | 82 ++
2 files changed, 1204 insertions(+), 0 deletions(-)
diffs (truncated from 1212 to 300 lines):
diff -r e6593b73263b -r 465eedfdbddb common/lib/libc/gen/radixtree.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/common/lib/libc/gen/radixtree.c Tue Feb 22 21:31:15 2011 +0000
@@ -0,0 +1,1122 @@
+/* $NetBSD: radixtree.c,v 1.1 2011/02/22 21:31:15 yamt Exp $ */
+
+/*-
+ * Copyright (c)2011 YAMAMOTO Takashi,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * radix tree
+ *
+ * it's designed to work efficiently with dense index distribution.
+ * the memory consumption (number of necessary intermediate nodes)
+ * heavily depends on index distribution. basically, more dense index
+ * distribution consumes less nodes per item.
+ * approximately,
+ * the best case: about RADIX_TREE_PTR_PER_NODE items per node.
+ * the worst case: RADIX_TREE_MAX_HEIGHT nodes per item.
+ */
+
+#include <sys/cdefs.h>
+
+#if defined(_KERNEL)
+__KERNEL_RCSID(0, "$NetBSD: radixtree.c,v 1.1 2011/02/22 21:31:15 yamt Exp $");
+#include <sys/param.h>
+#include <sys/null.h>
+#include <sys/pool.h>
+#include <sys/radixtree.h>
+#else /* defined(_KERNEL) */
+__RCSID("$NetBSD: radixtree.c,v 1.1 2011/02/22 21:31:15 yamt Exp $");
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#if 1
+#define KASSERT assert
+#else
+#define KASSERT(a) /* nothing */
+#endif
+/* XXX */
+#if !defined(CTASSERT)
+#define CTASSERT(x) /* nothing */
+#endif
+#endif /* defined(_KERNEL) */
+
+#include <sys/radixtree.h>
+
+#define RADIX_TREE_BITS_PER_HEIGHT 4 /* XXX tune */
+#define RADIX_TREE_PTR_PER_NODE (1 << RADIX_TREE_BITS_PER_HEIGHT)
+#define RADIX_TREE_MAX_HEIGHT (64 / RADIX_TREE_BITS_PER_HEIGHT)
+CTASSERT((64 % RADIX_TREE_BITS_PER_HEIGHT) == 0);
+
+CTASSERT(((1 << RADIX_TREE_TAG_ID_MAX) & (sizeof(int) - 1)) == 0);
+#define RADIX_TREE_TAG_MASK ((1 << RADIX_TREE_TAG_ID_MAX) - 1)
+
+static inline void *
+entry_ptr(void *p)
+{
+
+ return (void *)((uintptr_t)p & ~RADIX_TREE_TAG_MASK);
+}
+
+static inline unsigned int
+entry_tagmask(void *p)
+{
+
+ return (uintptr_t)p & RADIX_TREE_TAG_MASK;
+}
+
+static inline void *
+entry_compose(void *p, unsigned int tagmask)
+{
+
+ return (void *)((uintptr_t)p | tagmask);
+}
+
+static inline bool
+entry_match_p(void *p, unsigned int tagmask)
+{
+
+ KASSERT(entry_ptr(p) != NULL || entry_tagmask(p) == 0);
+ if (p == NULL) {
+ return false;
+ }
+ if (tagmask == 0) {
+ return true;
+ }
+ return (entry_tagmask(p) & tagmask) != 0;
+}
+
+static inline unsigned int
+tagid_to_mask(radix_tree_tagid_t id)
+{
+
+ return 1U << id;
+}
+
+/*
+ * radix_tree_node: an intermediate node
+ *
+ * we don't care the type of leaf nodes. they are just void *.
+ */
+
+struct radix_tree_node {
+ void *n_ptrs[RADIX_TREE_PTR_PER_NODE];
+ unsigned int n_nptrs; /* # of non-NULL pointers in n_ptrs */
+};
+
+static unsigned int
+any_children_tagmask(struct radix_tree_node *n)
+{
+ unsigned int mask;
+ int i;
+
+ mask = 0;
+ for (i = 0; i < RADIX_TREE_PTR_PER_NODE; i++) {
+ mask |= (unsigned int)(uintptr_t)n->n_ptrs[i];
+ }
+ return mask & RADIX_TREE_TAG_MASK;
+}
+
+/*
+ * p_refs[0].pptr == &t->t_root
+ * :
+ * p_refs[n].pptr == &(*p_refs[n-1])->n_ptrs[x]
+ * :
+ * :
+ * p_refs[t->t_height].pptr == &leaf_pointer
+ */
+
+struct radix_tree_path {
+ struct radix_tree_node_ref {
+ void **pptr;
+ } p_refs[RADIX_TREE_MAX_HEIGHT + 1]; /* +1 for the root ptr */
+ int p_lastidx;
+};
+
+static inline void **
+path_pptr(struct radix_tree *t, struct radix_tree_path *p,
+ unsigned int height)
+{
+
+ KASSERT(height <= t->t_height);
+ return p->p_refs[height].pptr;
+}
+
+static inline struct radix_tree_node *
+path_node(struct radix_tree * t, struct radix_tree_path *p, unsigned int height)
+{
+
+ KASSERT(height <= t->t_height);
+ return entry_ptr(*path_pptr(t, p, height));
+}
+
+static inline unsigned int
+path_idx(struct radix_tree * t, struct radix_tree_path *p, unsigned int height)
+{
+
+ KASSERT(height <= t->t_height);
+ return path_pptr(t, p, height + 1) - path_node(t, p, height)->n_ptrs;
+}
+
+/*
+ * radix_tree_init_tree:
+ *
+ * initialize a tree.
+ */
+
+void
+radix_tree_init_tree(struct radix_tree *t)
+{
+
+ t->t_height = 0;
+ t->t_root = NULL;
+}
+
+/*
+ * radix_tree_init_tree:
+ *
+ * clean up a tree.
+ */
+
+void
+radix_tree_fini_tree(struct radix_tree *t)
+{
+
+ KASSERT(t->t_root == NULL);
+ KASSERT(t->t_height == 0);
+}
+
+#if defined(_KERNEL)
+pool_cache_t radix_tree_node_cache;
+
+static int
+radix_tree_node_ctor(void *dummy, void *item, int flags)
+{
+ struct radix_tree_node *n = item;
+
+ KASSERT(dummy == NULL);
+ memset(n, 0, sizeof(*n));
+ return 0;
+}
+
+/*
+ * radix_tree_init:
+ *
+ * initialize the subsystem.
+ */
+
+void
+radix_tree_init(void)
+{
+
+ radix_tree_node_cache = pool_cache_init(sizeof(struct radix_tree_node),
+ 0, 0, 0, "radix_tree_node", NULL, IPL_NONE, radix_tree_node_ctor,
+ NULL, NULL);
+ KASSERT(radix_tree_node_cache != NULL);
+}
+#endif /* defined(_KERNEL) */
+
+static bool __unused
+radix_tree_node_clean_p(const struct radix_tree_node *n)
+{
+ unsigned int i;
+
+ if (n->n_nptrs != 0) {
+ return false;
+ }
+ for (i = 0; i < RADIX_TREE_PTR_PER_NODE; i++) {
+ if (n->n_ptrs[i] != NULL) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static struct radix_tree_node *
+radix_tree_alloc_node(void)
+{
+ struct radix_tree_node *n;
+
+#if defined(_KERNEL)
+ n = pool_cache_get(radix_tree_node_cache, PR_NOWAIT);
+#else /* defined(_KERNEL) */
+ n = calloc(1, sizeof(*n));
+#endif /* defined(_KERNEL) */
+ KASSERT(n == NULL || radix_tree_node_clean_p(n));
+ return n;
+}
+
+static void
+radix_tree_free_node(struct radix_tree_node *n)
+{
+
+ KASSERT(radix_tree_node_clean_p(n));
+#if defined(_KERNEL)
+ pool_cache_put(radix_tree_node_cache, n);
+#else /* defined(_KERNEL) */
+ free(n);
+#endif /* defined(_KERNEL) */
+}
+
+static int
+radix_tree_grow(struct radix_tree *t, unsigned int newheight)
+{
+ const unsigned int tagmask = entry_tagmask(t->t_root);
+
+ KASSERT(newheight <= 64 / RADIX_TREE_BITS_PER_HEIGHT);
+ if (t->t_root == NULL) {
+ t->t_height = newheight;
+ return 0;
+ }
+ while (t->t_height < newheight) {
+ struct radix_tree_node *n;
+
+ n = radix_tree_alloc_node();
Home |
Main Index |
Thread Index |
Old Index