Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/powerpc Add evcnt's for common pmap activities when...
details: https://anonhg.NetBSD.org/src/rev/0a4db72964a8
branches: trunk
changeset: 535435:0a4db72964a8
user: matt <matt%NetBSD.org@localhost>
date: Sun Aug 18 19:18:33 2002 +0000
description:
Add evcnt's for common pmap activities when PMAPCOUNTERS is defined
in the config file.
diffstat:
sys/arch/powerpc/mpc6xx/pmap.c | 114 ++++++++++++++++++++++++++++++++--
sys/arch/powerpc/powerpc/pmap_subr.c | 51 ++++++++++++++-
2 files changed, 151 insertions(+), 14 deletions(-)
diffs (truncated from 351 to 300 lines):
diff -r 088c18ce627e -r 0a4db72964a8 sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c Sun Aug 18 17:17:59 2002 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c Sun Aug 18 19:18:33 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.55 2002/08/14 14:25:16 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.56 2002/08/18 19:18:33 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -67,12 +67,14 @@
*/
#include "opt_altivec.h"
+#include "opt_pmap.h"
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/user.h>
#include <sys/pool.h>
#include <sys/queue.h>
+#include <sys/device.h> /* for evcnt */
#include <sys/systm.h>
#if __NetBSD_Version__ < 105010000
@@ -92,8 +94,6 @@
#include <powerpc/bat.h>
#endif
-/*#define PMAPCHECK*/
-
#if defined(DEBUG) || defined(PMAPCHECK)
#define STATIC
#else
@@ -270,6 +270,64 @@
# define DPRINTFN(n, x)
#endif
+
+#ifdef PMAPCOUNTERS
+#define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
+struct evcnt pmap_evcnt_mappings =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
+ "pmap", "pages mapped");
+struct evcnt pmap_evcnt_unmappings =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
+ "pmap", "pages unmapped");
+
+struct evcnt pmap_evcnt_kernel_mappings =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
+ "pmap", "kernel pages mapped");
+struct evcnt pmap_evcnt_kernel_unmappings =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
+ "pmap", "kernel pages unmapped");
+
+struct evcnt pmap_evcnt_mappings_replaced =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
+ "pmap", "pages mappings replaced");
+
+struct evcnt pmap_evcnt_exec_mappings =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
+ "pmap", "exec pages mapped");
+struct evcnt pmap_evcnt_exec_cached =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
+ "pmap", "exec pages cached");
+
+struct evcnt pmap_evcnt_exec_synced =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages synced");
+struct evcnt pmap_evcnt_exec_synced_clear_modify =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages synced (CM)");
+
+struct evcnt pmap_evcnt_exec_uncached_page_protect =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages uncached (PP)");
+struct evcnt pmap_evcnt_exec_uncached_clear_modify =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages uncached (CM)");
+struct evcnt pmap_evcnt_exec_uncached_zero_page =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages uncached (ZP)");
+struct evcnt pmap_evcnt_exec_uncached_copy_page =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
+ "pmap", "exec pages uncached (CP)");
+
+/*
+ * From pmap_subr.c
+ */
+extern struct evcnt pmap_evcnt_zeroed_pages;
+extern struct evcnt pmap_evcnt_copied_pages;
+extern struct evcnt pmap_evcnt_idlezeroed_pages;
+#else
+#define PMAPCOUNT(ev) ((void) 0)
+#endif
+
#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va))
#define TLBSYNC() __asm __volatile("tlbsync")
#define SYNC() __asm __volatile("sync")
@@ -791,6 +849,29 @@
pmap_initialized = 1;
splx(s);
+
+#ifdef PMAPCOUNTERS
+ evcnt_attach_static(&pmap_evcnt_mappings);
+ evcnt_attach_static(&pmap_evcnt_mappings_replaced);
+ evcnt_attach_static(&pmap_evcnt_unmappings);
+
+ evcnt_attach_static(&pmap_evcnt_kernel_mappings);
+ evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
+
+ evcnt_attach_static(&pmap_evcnt_exec_mappings);
+ evcnt_attach_static(&pmap_evcnt_exec_cached);
+ evcnt_attach_static(&pmap_evcnt_exec_synced);
+ evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
+
+ evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
+ evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
+ evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
+ evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
+
+ evcnt_attach_static(&pmap_evcnt_zeroed_pages);
+ evcnt_attach_static(&pmap_evcnt_copied_pages);
+ evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
+#endif
}
/*
@@ -1182,7 +1263,6 @@
panic("pmap_pvo_enter: called recursively!");
#endif
- pmap_pvo_enter_calls++;
/*
* Compute the PTE Group index.
*/
@@ -1213,6 +1293,7 @@
#endif
}
#endif
+ PMAPCOUNT(mappings_replaced);
pmap_pvo_remove(pvo, -1);
break;
}
@@ -1245,12 +1326,18 @@
pvo->pvo_pmap = pm;
LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
pvo->pvo_vaddr &= ~ADDR_POFF;
- if (flags & VM_PROT_EXECUTE)
+ if (flags & VM_PROT_EXECUTE) {
+ PMAPCOUNT(exec_mappings);
pvo->pvo_vaddr |= PVO_EXECUTABLE;
+ }
if (flags & PMAP_WIRED)
pvo->pvo_vaddr |= PVO_WIRED;
- if (pvo_head != &pmap_pvo_kunmanaged)
+ if (pvo_head != &pmap_pvo_kunmanaged) {
pvo->pvo_vaddr |= PVO_MANAGED;
+ PMAPCOUNT(mappings);
+ } else {
+ PMAPCOUNT(kernel_mappings);
+ }
pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
@@ -1334,12 +1421,14 @@
* ... if we aren't going to reuse it.
*/
LIST_REMOVE(pvo, pvo_olink);
+ if (pvo->pvo_vaddr & PVO_MANAGED)
+ PMAPCOUNT(unmappings);
+ else
+ PMAPCOUNT(kernel_unmappings);
pool_put(pvo->pvo_vaddr & PVO_MANAGED
? &pmap_mpvo_pool
: &pmap_upvo_pool,
pvo);
- pmap_pvo_entries--;
- pmap_pvo_remove_calls++;
#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
pmap_pvo_remove_depth--;
#endif
@@ -1449,9 +1538,11 @@
(pte_lo & PTE_I) == 0 &&
was_exec == 0) {
DPRINTFN(ENTER, (" syncicache"));
+ PMAPCOUNT(exec_synced);
pmap_syncicache(pa, NBPG);
if (pg != NULL) {
pmap_attr_save(pg, PTE_EXEC);
+ PMAPCOUNT(exec_cached);
#if defined(DEBUG) || defined(PMAPDEBUG)
if (pmapdebug & PMAPDEBUG_ENTER)
printf(" marked-as-exec"));
@@ -1709,7 +1800,10 @@
if ((prot & VM_PROT_READ) == 0) {
DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
pg->phys_addr));
- pmap_attr_clear(pg, PTE_EXEC);
+ if (pmap_attr_fetch(pg) & PTE_EXEC) {
+ PMAPCOUNT(exec_uncached_page_protect);
+ pmap_attr_clear(pg, PTE_EXEC);
+ }
}
pvo_head = vm_page_to_pvoh(pg);
@@ -1924,10 +2018,12 @@
DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
pg->phys_addr));
pmap_attr_clear(pg, PTE_EXEC);
+ PMAPCOUNT(exec_uncached_clear_modify);
} else {
DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
pg->phys_addr));
pmap_syncicache(pg->phys_addr, NBPG);
+ PMAPCOUNT(exec_synced_clear_modify);
}
}
return (rv & ptebit) != 0;
diff -r 088c18ce627e -r 0a4db72964a8 sys/arch/powerpc/powerpc/pmap_subr.c
--- a/sys/arch/powerpc/powerpc/pmap_subr.c Sun Aug 18 17:17:59 2002 +0000
+++ b/sys/arch/powerpc/powerpc/pmap_subr.c Sun Aug 18 19:18:33 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_subr.c,v 1.4 2002/08/14 14:25:15 matt Exp $ */
+/* $NetBSD: pmap_subr.c,v 1.5 2002/08/18 19:18:33 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -36,10 +36,12 @@
*/
#include "opt_altivec.h"
+#include "opt_pmap.h"
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/sched.h>
+#include <sys/device.h>
#include <sys/systm.h>
#include <uvm/uvm_extern.h>
@@ -54,6 +56,22 @@
#define MFMSR() mfmsr()
#define MTMSR(psl) __asm __volatile("sync; mtmsr %0; isync" :: "r"(psl))
+#ifdef PMAPCOUNTERS
+struct evcnt pmap_evcnt_zeroed_pages =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "pages zeroed");
+struct evcnt pmap_evcnt_copied_pages =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "pages copied");
+struct evcnt pmap_evcnt_idlezeroed_pages =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "pages idle zeroed");
+#ifdef PPC_MPC6XX
+extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
+extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
+#endif
+#endif /* PMAPCOUNTERS */
+
/*
* This file uses a sick & twisted method to deal with the common pmap
* operations of zero'ing, copying, and syncing the page with the
@@ -73,7 +91,7 @@
* However while relocation is off, we MUST not access the kernel stack in
* any manner since it will probably no longer be mapped. This mean no
* calls while relocation is off. The AltiVEC routines need to handle the
- * MSR fiddling themselves so they save things on the stack.
+ * MSR fiddling themselves so they can save things on the stack.
*/
/*
@@ -85,7 +103,7 @@
size_t linewidth;
register_t msr;
-#if defined(PPC_MPC6XX) && !defined(OLDPMAP)
+#if defined(PPC_MPC6XX)
{
/*
* If we are zeroing this page, we must clear the EXEC-ness
@@ -94,9 +112,17 @@
struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
KDASSERT(pg != NULL);
KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
+#ifdef PMAPCOUNTERS
+ if (pg->mdpage.mdpg_attrs & PTE_EXEC) {
+ pmap_evcnt_exec_uncached_zero_page.ev_count++;
+ }
+#endif
pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
}
#endif
+#ifdef PMAPCOUNTERS
+ pmap_evcnt_zeroed_pages.ev_count++;
+#endif
#ifdef ALTIVEC
if (pmap_use_altivec) {
vzeropage(pa);
Home |
Main Index |
Thread Index |
Old Index