Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/kern Don't use PR_URGENT to allocate page header. We do...
details: https://anonhg.NetBSD.org/src/rev/48819f48aca9
branches: trunk
changeset: 503028:48819f48aca9
user: enami <enami%NetBSD.org@localhost>
date: Mon Jan 29 02:38:02 2001 +0000
description:
Don't use PR_URGENT to allocate page header. We don't want to just panic
on memory shortage. Instead, use the same wait/nowait condition with the
item requested, and just cleanup and return failure if we can't allocate
page header while we aren't allowed to wait.
diffstat:
sys/kern/subr_pool.c | 41 ++++++++++++++++++++++++++++++++---------
1 files changed, 32 insertions(+), 9 deletions(-)
diffs (108 lines):
diff -r 5287ac9909d4 -r 48819f48aca9 sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c Mon Jan 29 01:51:05 2001 +0000
+++ b/sys/kern/subr_pool.c Mon Jan 29 02:38:02 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_pool.c,v 1.49 2001/01/14 02:06:21 thorpej Exp $ */
+/* $NetBSD: subr_pool.c,v 1.50 2001/01/29 02:38:02 enami Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
@@ -145,7 +145,7 @@
static void pool_cache_reclaim(struct pool_cache *);
static int pool_catchup(struct pool *);
-static void pool_prime_page(struct pool *, caddr_t);
+static int pool_prime_page(struct pool *, caddr_t, int);
static void *pool_page_alloc(unsigned long, int, int);
static void pool_page_free(void *, unsigned long, int);
@@ -714,8 +714,18 @@
}
/* We have more memory; add it to the pool */
+ if (pool_prime_page(pp, v, flags & PR_WAITOK) != 0) {
+ /*
+ * Probably, we don't allowed to wait and
+ * couldn't allocate a page header.
+ */
+ (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype);
+ pp->pr_nfail++;
+ pr_leave(pp);
+ simple_unlock(&pp->pr_slock);
+ return (NULL);
+ }
pp->pr_npagealloc++;
- pool_prime_page(pp, v);
/* Start the allocation process over. */
goto startover;
@@ -962,7 +972,7 @@
pool_prime(struct pool *pp, int n, caddr_t storage)
{
caddr_t cp;
- int newnitems, newpages;
+ int error, newnitems, newpages;
#ifdef DIAGNOSTIC
if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC)))
@@ -992,8 +1002,14 @@
return (ENOMEM);
}
+ if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
+ if ((pp->pr_roflags & PR_STATIC) == 0)
+ (*pp->pr_free)(cp, pp->pr_pagesz,
+ pp->pr_mtype);
+ simple_unlock(&pp->pr_slock);
+ return (error);
+ }
pp->pr_npagealloc++;
- pool_prime_page(pp, cp);
pp->pr_minpages++;
}
@@ -1011,8 +1027,8 @@
*
* Note, we must be called with the pool descriptor LOCKED.
*/
-static void
-pool_prime_page(struct pool *pp, caddr_t storage)
+static int
+pool_prime_page(struct pool *pp, caddr_t storage, int flags)
{
struct pool_item *pi;
struct pool_item_header *ph;
@@ -1028,8 +1044,10 @@
ph = (struct pool_item_header *)(cp + pp->pr_phoffset);
} else {
s = splhigh();
- ph = pool_get(&phpool, PR_URGENT);
+ ph = pool_get(&phpool, flags);
splx(s);
+ if (ph == NULL)
+ return (ENOMEM);
LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)],
ph, ph_hashlist);
}
@@ -1083,6 +1101,8 @@
if (++pp->pr_npages > pp->pr_hiwat)
pp->pr_hiwat = pp->pr_npages;
+
+ return (0);
}
/*
@@ -1129,8 +1149,11 @@
error = ENOMEM;
break;
}
+ if ((error = pool_prime_page(pp, cp, PR_NOWAIT)) != 0) {
+ (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype);
+ break;
+ }
pp->pr_npagealloc++;
- pool_prime_page(pp, cp);
}
return (error);
Home |
Main Index |
Thread Index |
Old Index