Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm If we are builging a small kernel [1], don't inline ...
details: https://anonhg.NetBSD.org/src/rev/1b542d4ac308
branches: trunk
changeset: 581936:1b542d4ac308
user: dsl <dsl%NetBSD.org@localhost>
date: Fri Jun 10 22:00:52 2005 +0000
description:
If we are builging a small kernel [1], don't inline all these functions.
Saves over 2k and lets i386 rescue_tiny build again.
[1] if MALLOC_NOINLINE is defined - not ideal but...
diffstat:
sys/uvm/uvm_map.c | 46 +++++++++++++++++++++++++++-------------------
1 files changed, 27 insertions(+), 19 deletions(-)
diffs (170 lines):
diff -r 41c78e4966db -r 1b542d4ac308 sys/uvm/uvm_map.c
--- a/sys/uvm/uvm_map.c Fri Jun 10 20:48:59 2005 +0000
+++ b/sys/uvm/uvm_map.c Fri Jun 10 22:00:52 2005 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.200 2005/06/02 17:01:44 matt Exp $ */
+/* $NetBSD: uvm_map.c,v 1.201 2005/06/10 22:00:52 dsl Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -71,7 +71,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.200 2005/06/02 17:01:44 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.201 2005/06/10 22:00:52 dsl Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -101,6 +101,13 @@
#include <uvm/uvm_ddb.h>
#endif
+/* If we are trying to build a small kernel, don't inline much here. */
+#ifdef MALLOC_NOINLINE
+#define __INLINE
+#else
+#define __INLINE __inline
+#endif
+
#ifndef UVMMAP_NOCOUNTERS
#include <sys/device.h>
struct evcnt map_ubackmerge = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
@@ -293,7 +300,7 @@
return (0);
}
-static __inline void
+static __INLINE void
uvm_rb_augment(struct vm_map_entry *entry)
{
@@ -334,7 +341,7 @@
return (space);
}
-static __inline void
+static __INLINE void
uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
{
/* We need to traverse to the very top */
@@ -344,7 +351,7 @@
} while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
}
-static __inline void
+static __INLINE void
uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
{
vaddr_t space = uvm_rb_space(map, entry);
@@ -361,7 +368,7 @@
uvm_rb_fixup(map, entry->prev);
}
-static __inline void
+static __INLINE void
uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
{
struct vm_map_entry *parent;
@@ -446,13 +453,15 @@
* local inlines
*/
-static __inline struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
+#ifdef DIAGNOSTIC
+static __INLINE struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
+#endif
/*
* uvm_mapent_alloc: allocate a map entry
*/
-static __inline struct vm_map_entry *
+static __INLINE struct vm_map_entry *
uvm_mapent_alloc(struct vm_map *map, int flags)
{
struct vm_map_entry *me;
@@ -477,7 +486,7 @@
* uvm_mapent_alloc_split: allocate a map entry for clipping.
*/
-static __inline struct vm_map_entry *
+static __INLINE struct vm_map_entry *
uvm_mapent_alloc_split(struct vm_map *map,
const struct vm_map_entry *old_entry, int flags,
struct uvm_mapent_reservation *umr)
@@ -510,7 +519,7 @@
* uvm_mapent_free: free map entry
*/
-static __inline void
+static __INLINE void
uvm_mapent_free(struct vm_map_entry *me)
{
UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
@@ -531,7 +540,7 @@
* => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
*/
-static __inline void
+static __INLINE void
uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
{
@@ -564,7 +573,7 @@
* uvm_mapent_copy: copy a map entry, preserving flags
*/
-static __inline void
+static __INLINE void
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
{
@@ -578,7 +587,7 @@
* => map should be locked by caller
*/
-static __inline void
+static __INLINE void
uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
{
@@ -590,7 +599,7 @@
/*
* wrapper for calling amap_ref()
*/
-static __inline void
+static __INLINE void
uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
{
@@ -1426,7 +1435,7 @@
* entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
* fit, and -1 address wraps around.
*/
-static __inline int
+static __INLINE int
uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
vsize_t align, int topdown, struct vm_map_entry *entry)
{
@@ -4075,10 +4084,8 @@
#define UVM_KHDR_FIND(entry) \
((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
-static __inline struct vm_map_entry *uvm_kmapent_get(struct uvm_kmapent_hdr *);
-static __inline void uvm_kmapent_put(struct uvm_kmapent_hdr *,
- struct vm_map_entry *);
-
+
+#ifdef DIAGNOSTIC
static __inline struct vm_map *
uvm_kmapent_map(struct vm_map_entry *entry)
{
@@ -4087,6 +4094,7 @@
ukh = UVM_KHDR_FIND(entry);
return ukh->ukh_map;
}
+#endif
static __inline struct vm_map_entry *
uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
Home |
Main Index |
Thread Index |
Old Index