Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys introduce a new flag for pool_init, PR_NOTOUCH.
details: https://anonhg.NetBSD.org/src/rev/cd29ff8ce4ca
branches: trunk
changeset: 572389:cd29ff8ce4ca
user: yamt <yamt%NetBSD.org@localhost>
date: Sat Jan 01 21:04:39 2005 +0000
description:
introduce a new flag for pool_init, PR_NOTOUCH.
if it's specified, don't use free items as storage for internal state.
so that we can use pools for non memory backed objects.
inspired from solaris's KMC_NOTOUCH.
diffstat:
sys/kern/subr_pool.c | 270 ++++++++++++++++++++++++++++++++++++++------------
sys/sys/pool.h | 4 +-
2 files changed, 209 insertions(+), 65 deletions(-)
diffs (truncated from 468 to 300 lines):
diff -r 0913c34572b3 -r cd29ff8ce4ca sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c Sat Jan 01 21:02:12 2005 +0000
+++ b/sys/kern/subr_pool.c Sat Jan 01 21:04:39 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $ */
+/* $NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.96 2004/06/20 18:19:27 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.97 2005/01/01 21:04:39 yamt Exp $");
#include "opt_pool.h"
#include "opt_poollog.h"
@@ -73,7 +73,9 @@
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head);
/* Private pool for page header structures */
-static struct pool phpool;
+#define PHPOOL_MAX 8
+static struct pool phpool[PHPOOL_MAX];
+#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
#ifdef POOL_SUBPAGE
/* Pool of subpages for use by normal pools. */
@@ -93,13 +95,29 @@
/* Page headers */
LIST_ENTRY(pool_item_header)
ph_pagelist; /* pool page list */
- TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */
SPLAY_ENTRY(pool_item_header)
ph_node; /* Off-page page headers */
- unsigned int ph_nmissing; /* # of chunks in use */
caddr_t ph_page; /* this page's address */
struct timeval ph_time; /* last referenced */
+ union {
+ /* !PR_NOTOUCH */
+ struct {
+ TAILQ_HEAD(, pool_item)
+ phu_itemlist; /* chunk list for this page */
+ } phu_normal;
+ /* PR_NOTOUCH */
+ struct {
+ uint16_t
+ phu_off; /* start offset in page */
+ uint16_t
+ phu_firstfree; /* first free item */
+ } phu_notouch;
+ } ph_u;
+ uint16_t ph_nmissing; /* # of chunks in use */
};
+#define ph_itemlist ph_u.phu_normal.phu_itemlist
+#define ph_off ph_u.phu_notouch.phu_off
+#define ph_firstfree ph_u.phu_notouch.phu_firstfree
struct pool_item {
#ifdef DIAGNOSTIC
@@ -152,7 +170,7 @@
void *pool_allocator_alloc(struct pool *, int);
void pool_allocator_free(struct pool *, void *);
-static void pool_print_pagelist(struct pool_pagelist *,
+static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
void (*)(const char *, ...));
static void pool_print1(struct pool *, const char *,
void (*)(const char *, ...));
@@ -279,6 +297,49 @@
#endif /* POOL_DIAGNOSTIC */
static __inline int
+pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
+ const void *v)
+{
+ const char *cp = v;
+ int idx;
+
+ KASSERT(pp->pr_roflags & PR_NOTOUCH);
+ idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
+ KASSERT(idx < pp->pr_itemsperpage);
+ return idx;
+}
+
+#define PR_FREELIST_ALIGN(p) roundup((uintptr_t)(p), sizeof(uint16_t))
+#define PR_FREELIST(ph) ((uint16_t *)PR_FREELIST_ALIGN((ph) + 1))
+#define PR_INDEX_USED ((uint16_t)-1)
+#define PR_INDEX_EOL ((uint16_t)-2)
+
+static __inline void
+pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
+ void *obj)
+{
+ int idx = pr_item_notouch_index(pp, ph, obj);
+ uint16_t *freelist = PR_FREELIST(ph);
+
+ KASSERT(freelist[idx] == PR_INDEX_USED);
+ freelist[idx] = ph->ph_firstfree;
+ ph->ph_firstfree = idx;
+}
+
+static __inline void *
+pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
+{
+ int idx = ph->ph_firstfree;
+ uint16_t *freelist = PR_FREELIST(ph);
+
+ KASSERT(freelist[idx] != PR_INDEX_USED);
+ ph->ph_firstfree = freelist[idx];
+ freelist[idx] = PR_INDEX_USED;
+
+ return ph->ph_page + ph->ph_off + idx * pp->pr_size;
+}
+
+static __inline int
phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
{
if (a->ph_page < b->ph_page)
@@ -346,7 +407,7 @@
pool_allocator_free(pp, ph->ph_page);
if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
s = splvm();
- pool_put(&phpool, ph);
+ pool_put(pp->pr_phpool, ph);
splx(s);
}
}
@@ -490,8 +551,9 @@
/* See the comment below about reserved bytes. */
trysize = palloc->pa_pagesz - ((align - ioff) % align);
phsize = ALIGN(sizeof(struct pool_item_header));
- if (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
- trysize / pp->pr_size == (trysize - phsize) / pp->pr_size) {
+ if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
+ (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
+ trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
/* Use the end of the page for the page header */
pp->pr_roflags |= PR_PHINPAGE;
pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
@@ -509,6 +571,30 @@
*/
pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
KASSERT(pp->pr_itemsperpage != 0);
+ if ((pp->pr_roflags & PR_NOTOUCH)) {
+ int idx;
+
+ for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
+ idx++) {
+ /* nothing */
+ }
+ if (idx >= PHPOOL_MAX) {
+ /*
+ * if you see this panic, consider to tweak
+ * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
+ */
+ panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
+ pp->pr_wchan, pp->pr_itemsperpage);
+ }
+ pp->pr_phpool = &phpool[idx];
+ } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
+ pp->pr_phpool = &phpool[0];
+ }
+#if defined(DIAGNOSTIC)
+ else {
+ pp->pr_phpool = NULL;
+ }
+#endif
/*
* Use the slack between the chunks and the page header
@@ -547,15 +633,33 @@
* haven't done so yet.
* XXX LOCKING.
*/
- if (phpool.pr_size == 0) {
+ if (phpool[0].pr_size == 0) {
+ struct pool_allocator *pa;
+ int idx;
#ifdef POOL_SUBPAGE
- pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0,
- "phpool", &pool_allocator_kmem);
+ pa = &pool_allocator_kmem;
+#else
+ pa = NULL;
+#endif
+ for (idx = 0; idx < PHPOOL_MAX; idx++) {
+ static char phpool_names[PHPOOL_MAX][6+1+6+1];
+ int nelem;
+ size_t sz;
+
+ nelem = PHPOOL_FREELIST_NELEM(idx);
+ snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
+ "phpool-%d", nelem);
+ sz = sizeof(struct pool_item_header);
+ if (nelem) {
+ sz = PR_FREELIST_ALIGN(sz)
+ + nelem * sizeof(uint16_t);
+ }
+ pool_init(&phpool[idx], sz, 0, 0, 0,
+ phpool_names[idx], pa);
+ }
+#ifdef POOL_SUBPAGE
pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
PR_RECURSIVE, "psppool", &pool_allocator_kmem);
-#else
- pool_init(&phpool, sizeof(struct pool_item_header), 0, 0,
- 0, "phpool", NULL);
#endif
pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
0, "pcgpool", NULL);
@@ -648,7 +752,7 @@
ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
else {
s = splvm();
- ph = pool_get(&phpool, flags);
+ ph = pool_get(pp->pr_phpool, flags);
splx(s);
}
@@ -815,38 +919,53 @@
/* Start the allocation process over. */
goto startover;
}
- if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) {
- pr_leave(pp);
- simple_unlock(&pp->pr_slock);
- panic("pool_get: %s: page empty", pp->pr_wchan);
- }
+ if (pp->pr_roflags & PR_NOTOUCH) {
#ifdef DIAGNOSTIC
- if (__predict_false(pp->pr_nitems == 0)) {
- pr_leave(pp);
- simple_unlock(&pp->pr_slock);
- printf("pool_get: %s: items on itemlist, nitems %u\n",
- pp->pr_wchan, pp->pr_nitems);
- panic("pool_get: nitems inconsistent");
- }
+ if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
+ pr_leave(pp);
+ simple_unlock(&pp->pr_slock);
+ panic("pool_get: %s: page empty", pp->pr_wchan);
+ }
+#endif
+ v = pr_item_notouch_get(pp, ph);
+#ifdef POOL_DIAGNOSTIC
+ pr_log(pp, v, PRLOG_GET, file, line);
+#endif
+ } else {
+ v = pi = TAILQ_FIRST(&ph->ph_itemlist);
+ if (__predict_false(v == NULL)) {
+ pr_leave(pp);
+ simple_unlock(&pp->pr_slock);
+ panic("pool_get: %s: page empty", pp->pr_wchan);
+ }
+#ifdef DIAGNOSTIC
+ if (__predict_false(pp->pr_nitems == 0)) {
+ pr_leave(pp);
+ simple_unlock(&pp->pr_slock);
+ printf("pool_get: %s: items on itemlist, nitems %u\n",
+ pp->pr_wchan, pp->pr_nitems);
+ panic("pool_get: nitems inconsistent");
+ }
#endif
#ifdef POOL_DIAGNOSTIC
- pr_log(pp, v, PRLOG_GET, file, line);
+ pr_log(pp, v, PRLOG_GET, file, line);
#endif
#ifdef DIAGNOSTIC
- if (__predict_false(pi->pi_magic != PI_MAGIC)) {
- pr_printlog(pp, pi, printf);
- panic("pool_get(%s): free list modified: magic=%x; page %p;"
- " item addr %p\n",
- pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
- }
+ if (__predict_false(pi->pi_magic != PI_MAGIC)) {
+ pr_printlog(pp, pi, printf);
+ panic("pool_get(%s): free list modified: "
+ "magic=%x; page %p; item addr %p\n",
+ pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
+ }
#endif
- /*
- * Remove from item list.
- */
- TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
+ /*
+ * Remove from item list.
+ */
+ TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list);
+ }
pp->pr_nitems--;
pp->pr_nout++;
if (ph->ph_nmissing == 0) {
@@ -864,9 +983,10 @@
LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
Home |
Main Index |
Thread Index |
Old Index