Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys pool_page_alloc, pool_page_alloc_meta: avoid extra compa...
details: https://anonhg.NetBSD.org/src/rev/a7089a46d00b
branches: trunk
changeset: 773089:a7089a46d00b
user: rmind <rmind%NetBSD.org@localhost>
date: Sat Jan 28 00:00:06 2012 +0000
description:
pool_page_alloc, pool_page_alloc_meta: avoid extra compare, use const.
ffs_mountfs,sys_swapctl: replace memset with kmem_zalloc.
sys_swapctl: move kmem_free outside the lock path.
uvm_init: fix comment, remove pointless numeration of steps.
uvm_map_enter: remove meflagval variable.
Fix some indentation.
diffstat:
sys/kern/subr_pool.c | 58 +++++++++++++++++++++----------------------
sys/kern/vfs_bio.c | 17 +++++-------
sys/ufs/ffs/ffs_vfsops.c | 7 ++---
sys/uvm/uvm_init.c | 51 ++++++++++++++++++--------------------
sys/uvm/uvm_kmguard.c | 9 +++---
sys/uvm/uvm_map.c | 11 +++----
sys/uvm/uvm_pdpolicy_clock.c | 11 +++----
sys/uvm/uvm_swap.c | 10 +++----
8 files changed, 80 insertions(+), 94 deletions(-)
diffs (truncated from 502 to 300 lines):
diff -r 6293e9687018 -r a7089a46d00b sys/kern/subr_pool.c
--- a/sys/kern/subr_pool.c Fri Jan 27 21:53:50 2012 +0000
+++ b/sys/kern/subr_pool.c Sat Jan 28 00:00:06 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_pool.c,v 1.191 2012/01/27 19:48:40 para Exp $ */
+/* $NetBSD: subr_pool.c,v 1.192 2012/01/28 00:00:06 rmind Exp $ */
/*-
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.191 2012/01/27 19:48:40 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.192 2012/01/28 00:00:06 rmind Exp $");
#include "opt_ddb.h"
#include "opt_pool.h"
@@ -531,8 +531,8 @@
void
pool_subsystem_init(void)
{
+ size_t size;
int idx;
- size_t size;
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
@@ -2718,7 +2718,9 @@
#ifdef POOL_SUBPAGE
struct pool_allocator pool_allocator_kmem_fullpage = {
- pool_page_alloc, pool_page_free, 0
+ .pa_alloc = pool_page_alloc,
+ .pa_free = pool_page_free,
+ .pa_pagesz = 0
};
#else
struct pool_allocator pool_allocator_kmem = {
@@ -2733,7 +2735,9 @@
#ifdef POOL_SUBPAGE
struct pool_allocator pool_allocator_nointr_fullpage = {
- pool_page_alloc_nointr, pool_page_free_nointr, 0,
+ .pa_alloc = pool_page_alloc_nointr,
+ .pa_free = pool_page_free_nointr,
+ .pa_pagesz = 0
};
#else
struct pool_allocator pool_allocator_nointr = {
@@ -2755,7 +2759,9 @@
void pool_subpage_free_nointr(struct pool *, void *);
struct pool_allocator pool_allocator_nointr = {
- pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
+ .pa_alloc = pool_subpage_alloc,
+ .pa_free = pool_subpage_free,
+ .pa_pagesz = POOL_SUBPAGE
};
#endif /* POOL_SUBPAGE */
@@ -2791,18 +2797,14 @@
void *
pool_page_alloc(struct pool *pp, int flags)
{
- bool waitok = (flags & PR_WAITOK) ? true : false;
- int rc;
+ const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
vmem_addr_t va;
-
- rc = uvm_km_kmem_alloc(kmem_va_arena,
- pp->pr_alloc->pa_pagesz,
- ((waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT), &va);
-
- if (rc != 0)
- return NULL;
- else
- return (void *)va;
+ int ret;
+
+ ret = uvm_km_kmem_alloc(kmem_va_arena, pp->pr_alloc->pa_pagesz,
+ vflags | VM_INSTANTFIT, &va);
+
+ return ret ? NULL : (void *)va;
}
void
@@ -2815,25 +2817,21 @@
static void *
pool_page_alloc_meta(struct pool *pp, int flags)
{
- bool waitok = (flags & PR_WAITOK) ? true : false;
- int rc;
- vmem_addr_t addr;
-
- rc = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
- (waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT, &addr);
-
- if (rc != 0)
- return 0;
- else
- return (void *)addr;
+ const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
+ vmem_addr_t va;
+ int ret;
+
+ ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
+ vflags | VM_INSTANTFIT, &va);
+
+ return ret ? NULL : (void *)va;
}
static void
pool_page_free_meta(struct pool *pp, void *v)
{
- vmem_free(kmem_meta_arena, (vmem_addr_t)v,
- pp->pr_alloc->pa_pagesz);
+ vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
}
#ifdef POOL_SUBPAGE
diff -r 6293e9687018 -r a7089a46d00b sys/kern/vfs_bio.c
--- a/sys/kern/vfs_bio.c Fri Jan 27 21:53:50 2012 +0000
+++ b/sys/kern/vfs_bio.c Sat Jan 28 00:00:06 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_bio.c,v 1.234 2012/01/27 19:48:40 para Exp $ */
+/* $NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -123,7 +123,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.234 2012/01/27 19:48:40 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $");
#include "opt_bufcache.h"
@@ -231,17 +231,14 @@
static void *
bufpool_page_alloc(struct pool *pp, int flags)
{
- int rc;
+ const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
vmem_addr_t va;
+ int ret;
- rc = uvm_km_kmem_alloc(kmem_va_arena, MAXBSIZE,
- ((flags & PR_WAITOK) ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT,
- &va);
+ ret = uvm_km_kmem_alloc(kmem_va_arena, MAXBSIZE,
+ vflags | VM_INSTANTFIT, &va);
- if (rc != 0)
- return NULL;
- else
- return (void *)va;
+ return ret ? NULL : (void *)va;
}
static void
diff -r 6293e9687018 -r a7089a46d00b sys/ufs/ffs/ffs_vfsops.c
--- a/sys/ufs/ffs/ffs_vfsops.c Fri Jan 27 21:53:50 2012 +0000
+++ b/sys/ufs/ffs/ffs_vfsops.c Sat Jan 28 00:00:06 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: ffs_vfsops.c,v 1.273 2012/01/27 19:22:49 para Exp $ */
+/* $NetBSD: ffs_vfsops.c,v 1.274 2012/01/28 00:00:06 rmind Exp $ */
/*-
* Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.273 2012/01/27 19:22:49 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.274 2012/01/28 00:00:06 rmind Exp $");
#if defined(_KERNEL_OPT)
#include "opt_ffs.h"
@@ -890,8 +890,7 @@
if (error)
return error;
- ump = kmem_alloc(sizeof(*ump), KM_SLEEP);
- memset(ump, 0, sizeof *ump);
+ ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
error = ffs_snapshot_init(ump);
if (error)
diff -r 6293e9687018 -r a7089a46d00b sys/uvm/uvm_init.c
--- a/sys/uvm/uvm_init.c Fri Jan 27 21:53:50 2012 +0000
+++ b/sys/uvm/uvm_init.c Sat Jan 28 00:00:06 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_init.c,v 1.42 2012/01/27 19:48:41 para Exp $ */
+/* $NetBSD: uvm_init.c,v 1.43 2012/01/28 00:00:06 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.42 2012/01/27 19:48:41 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.43 2012/01/28 00:00:06 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -78,62 +78,59 @@
vaddr_t kvm_start, kvm_end;
/*
- * step 0: ensure that the hardware set the page size
+ * Ensure that the hardware set the page size, zero the UVM structure.
*/
if (uvmexp.pagesize == 0) {
panic("uvm_init: page size not set");
}
- /*
- * step 1: zero the uvm structure
- */
-
memset(&uvm, 0, sizeof(uvm));
averunnable.fscale = FSCALE;
/*
- * step 2: init the page sub-system. this includes allocating the
- * vm_page structures, and setting up all the page queues (and
- * locks). available memory will be put in the "free" queue.
- * kvm_start and kvm_end will be set to the area of kernel virtual
- * memory which is available for general use.
+ * Init the page sub-system. This includes allocating the vm_page
+ * structures, and setting up all the page queues (and locks).
+ * Available memory will be put in the "free" queue, kvm_start and
+ * kvm_end will be set to the area of kernel virtual memory which
+ * is available for general use.
*/
uvm_page_init(&kvm_start, &kvm_end);
/*
- * step 3: init the map sub-system.
+ * Init the map sub-system.
*/
uvm_map_init();
/*
- * step 4: setup the kernel's virtual memory data structures. this
- * includes setting up the kernel_map/kernel_object.
- * Bootstrap all kernel memory allocators.
+ * Setup the kernel's virtual memory data structures. This includes
+ * setting up the kernel_map/kernel_object. Bootstrap all kernel
+ * memory allocators.
*/
uao_init();
uvm_km_bootstrap(kvm_start, kvm_end);
- /*
- * step 5: setup uvm_map pool_caches and init the amap.
+ /*
+ * Setup uvm_map caches and init the amap.
*/
uvm_map_init_caches();
uvm_amap_init();
/*
- * step 5: init the pmap module. the pmap module is free to allocate
+ * Init the pmap module. The pmap module is free to allocate
* memory for its private use (e.g. pvlists).
*/
pmap_init();
- /* step 6: init the kernel maps virtual address caches.
- * make kernel memory allocator ready for use.
- * After this call the pool/kmem memory allocators can be used.
+ /*
+ * Init the kernel maps virtual address caches. Make kernel memory
+ * allocator ready for use. After this call the pool/kmem memory
+ * allocators can be used.
*/
uvm_km_init();
@@ -143,7 +140,7 @@
#endif
/*
- * step 6: init all pagers and the pager_map.
+ * Init all pagers and the pager_map.
*/
uvm_pager_init();
@@ -155,13 +152,13 @@
uvm_loan_init();
Home |
Main Index |
Thread Index |
Old Index