Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Don't inline pmap_kenter_pa(). It doesn't buy us m...
details: https://anonhg.NetBSD.org/src/rev/41d2900b503f
branches: trunk
changeset: 474964:41d2900b503f
user: thorpej <thorpej%NetBSD.org@localhost>
date: Wed Jul 28 01:07:52 1999 +0000
description:
Don't inline pmap_kenter_pa(). It doesn't buy us much to do so, and
it's nice to have it show up in stack traces.
diffstat:
sys/arch/i386/i386/pmap.c | 34 +++++++++++++++++++++++++++++++++-
sys/arch/i386/include/pmap.h | 35 +----------------------------------
sys/arch/pc532/include/pmap.h | 35 +----------------------------------
sys/arch/pc532/pc532/pmap.c | 34 +++++++++++++++++++++++++++++++++-
4 files changed, 68 insertions(+), 70 deletions(-)
diffs (208 lines):
diff -r 5ce984c3c962 -r 41d2900b503f sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Wed Jul 28 00:58:14 1999 +0000
+++ b/sys/arch/i386/i386/pmap.c Wed Jul 28 01:07:52 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.74 1999/07/18 21:33:20 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.75 1999/07/28 01:07:58 thorpej Exp $ */
/*
*
@@ -3543,6 +3543,38 @@
}
/*
+ * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
+ *
+ * => no need to lock anything, assume va is already allocated
+ * => should be faster than normal pmap enter function
+ */
+
+void
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+{
+ struct pmap *pm = pmap_kernel();
+ pt_entry_t *pte, opte;
+ int s;
+
+ s = splimp();
+ simple_lock(&pm->pm_obj.vmobjlock);
+ pm->pm_stats.resident_count++;
+ pm->pm_stats.wired_count++;
+ simple_unlock(&pm->pm_obj.vmobjlock);
+ splx(s);
+
+ pte = vtopte(va);
+ opte = *pte;
+ *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
+ PG_V | pmap_pg_g; /* zap! */
+ if (pmap_valid_entry(opte))
+ pmap_update_pg(va);
+}
+
+/*
* pmap_growkernel: increase usage of KVM space
*
* => we allocate new PTPs for the kernel and install them in all
diff -r 5ce984c3c962 -r 41d2900b503f sys/arch/i386/include/pmap.h
--- a/sys/arch/i386/include/pmap.h Wed Jul 28 00:58:14 1999 +0000
+++ b/sys/arch/i386/include/pmap.h Wed Jul 28 01:07:52 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.41 1999/07/18 21:33:21 chs Exp $ */
+/* $NetBSD: pmap.h,v 1.42 1999/07/28 01:07:59 thorpej Exp $ */
/*
*
@@ -391,7 +391,6 @@
void pmap_bootstrap __P((vaddr_t));
boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
void pmap_deactivate __P((struct proc *));
-static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t));
static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
void pmap_page_remove __P((struct vm_page *));
static void pmap_protect __P((struct pmap *, vaddr_t,
@@ -495,38 +494,6 @@
}
}
-/*
- * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
- *
- * => no need to lock anything, assume va is already allocated
- * => should be faster than normal pmap enter function
- */
-
-__inline static void
-pmap_kenter_pa(va, pa, prot)
- vaddr_t va;
- paddr_t pa;
- vm_prot_t prot;
-{
- struct pmap *pm = pmap_kernel();
- pt_entry_t *pte, opte;
- int s;
-
- s = splimp();
- simple_lock(&pm->pm_obj.vmobjlock);
- pm->pm_stats.resident_count++;
- pm->pm_stats.wired_count++;
- simple_unlock(&pm->pm_obj.vmobjlock);
- splx(s);
-
- pte = vtopte(va);
- opte = *pte;
- *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
- PG_V | pmap_pg_g; /* zap! */
- if (pmap_valid_entry(opte))
- pmap_update_pg(va);
-}
-
vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
#if defined(USER_LDT)
diff -r 5ce984c3c962 -r 41d2900b503f sys/arch/pc532/include/pmap.h
--- a/sys/arch/pc532/include/pmap.h Wed Jul 28 00:58:14 1999 +0000
+++ b/sys/arch/pc532/include/pmap.h Wed Jul 28 01:07:52 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.26 1999/07/18 21:33:22 chs Exp $ */
+/* $NetBSD: pmap.h,v 1.27 1999/07/28 01:07:53 thorpej Exp $ */
/*
*
@@ -371,7 +371,6 @@
void pmap_bootstrap __P((vaddr_t));
boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
void pmap_deactivate __P((struct proc *));
-static void pmap_kenter_pa __P((vaddr_t, paddr_t, vm_prot_t));
static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
void pmap_page_remove __P((struct vm_page *));
static void pmap_protect __P((struct pmap *, vaddr_t,
@@ -463,38 +462,6 @@
}
}
-/*
- * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
- *
- * => no need to lock anything, assume va is already allocated
- * => should be faster than normal pmap enter function
- */
-
-__inline static void
-pmap_kenter_pa(va, pa, prot)
- vaddr_t va;
- paddr_t pa;
- vm_prot_t prot;
-{
- struct pmap *pm = pmap_kernel();
- pt_entry_t *pte, opte;
- int s;
-
- s = splimp();
- simple_lock(&pm->pm_obj.vmobjlock);
- pm->pm_stats.resident_count++;
- pm->pm_stats.wired_count++;
- simple_unlock(&pm->pm_obj.vmobjlock);
- splx(s);
-
- pte = vtopte(va);
- opte = *pte;
- *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
- PG_V; /* zap! */
- if (pmap_valid_entry(opte))
- pmap_update_pg(va);
-}
-
vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
#endif /* _KERNEL */
diff -r 5ce984c3c962 -r 41d2900b503f sys/arch/pc532/pc532/pmap.c
--- a/sys/arch/pc532/pc532/pmap.c Wed Jul 28 00:58:14 1999 +0000
+++ b/sys/arch/pc532/pc532/pmap.c Wed Jul 28 01:07:52 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.40 1999/07/18 21:33:22 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.41 1999/07/28 01:07:52 thorpej Exp $ */
/*
*
@@ -3308,6 +3308,38 @@
}
/*
+ * pmap_kenter_pa: enter a kernel mapping without R/M (pv_entry) tracking
+ *
+ * => no need to lock anything, assume va is already allocated
+ * => should be faster than normal pmap enter function
+ */
+
+void
+pmap_kenter_pa(va, pa, prot)
+ vaddr_t va;
+ paddr_t pa;
+ vm_prot_t prot;
+{
+ struct pmap *pm = pmap_kernel();
+ pt_entry_t *pte, opte;
+ int s;
+
+ s = splimp();
+ simple_lock(&pm->pm_obj.vmobjlock);
+ pm->pm_stats.resident_count++;
+ pm->pm_stats.wired_count++;
+ simple_unlock(&pm->pm_obj.vmobjlock);
+ splx(s);
+
+ pte = vtopte(va);
+ opte = *pte;
+ *pte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
+ PG_V; /* zap! */
+ if (pmap_valid_entry(opte))
+ pmap_update_pg(va);
+}
+
+/*
* pmap_growkernel: increase usage of KVM space
*
* => we allocate new PTPs for the kernel and install them in all
Home |
Main Index |
Thread Index |
Old Index