Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sh5 - Be more consistent about using ptel_t where i...
details: https://anonhg.NetBSD.org/src/rev/98ba89bab4c8
branches: trunk
changeset: 536216:98ba89bab4c8
user: scw <scw%NetBSD.org@localhost>
date: Wed Sep 11 11:08:45 2002 +0000
description:
- Be more consistent about using ptel_t where it matters.
- Add event counters for some key pmap events (similar to mpc6xx pmap).
- Use the cache-friendly, optimised copy/zero page functions.
- Add the necessary cache management code to enable WriteBack caching
of KSEG1 mappings. Seems to work fine so far.
diffstat:
sys/arch/sh5/include/pmap.h | 6 +-
sys/arch/sh5/sh5/pmap.c | 238 ++++++++++++++++++++++++++++++++-----------
2 files changed, 179 insertions(+), 65 deletions(-)
diffs (truncated from 530 to 300 lines):
diff -r eb55567baa92 -r 98ba89bab4c8 sys/arch/sh5/include/pmap.h
--- a/sys/arch/sh5/include/pmap.h Wed Sep 11 11:03:08 2002 +0000
+++ b/sys/arch/sh5/include/pmap.h Wed Sep 11 11:08:45 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.5 2002/09/10 11:11:44 scw Exp $ */
+/* $NetBSD: pmap.h,v 1.6 2002/09/11 11:08:45 scw Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@@ -82,8 +82,8 @@
#define pmap_kernel() (&kernel_pmap_store)
extern int pmap_write_trap(int, vaddr_t);
-extern boolean_t pmap_clear_bit(struct vm_page *, int);
-extern boolean_t pmap_query_bit(struct vm_page *, int);
+extern boolean_t pmap_clear_bit(struct vm_page *, ptel_t);
+extern boolean_t pmap_query_bit(struct vm_page *, ptel_t);
extern vaddr_t pmap_map_poolpage(paddr_t);
extern paddr_t pmap_unmap_poolpage(vaddr_t);
diff -r eb55567baa92 -r 98ba89bab4c8 sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c Wed Sep 11 11:03:08 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c Wed Sep 11 11:08:45 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.6 2002/09/10 12:42:03 scw Exp $ */
+/* $NetBSD: pmap.c,v 1.7 2002/09/11 11:08:45 scw Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@@ -114,6 +114,7 @@
#include <sys/user.h>
#include <sys/queue.h>
#include <sys/systm.h>
+#include <sys/device.h>
#include <uvm/uvm.h>
@@ -302,6 +303,30 @@
*/
struct pvo_head *pmap_upvo_table; /* pvo entries by ptegroup index */
+static struct evcnt pmap_pteg_idx_events[SH5_PTEG_SIZE] = {
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [0]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [1]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [2]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [3]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [4]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [5]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [6]"),
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap",
+ "ptes added at pteg group [7]")
+};
+
+static struct evcnt pmap_pte_spill_events =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "spills");
+static struct evcnt pmap_pte_spill_evict_events =
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", "spill evictions");
+
/*
* This array contains one entry per kernel IPT entry.
*/
@@ -321,8 +346,8 @@
static struct pvo_entry * pmap_pvo_find_va(pmap_t, vaddr_t, int *);
static void pmap_pinit(pmap_t);
static void pmap_release(pmap_t);
-static void pmap_pa_map_kva(vaddr_t, paddr_t);
-static ptel_t pmap_pa_unmap_kva(vaddr_t);
+static void pmap_pa_map_kva(vaddr_t, paddr_t, ptel_t);
+static ptel_t pmap_pa_unmap_kva(vaddr_t, ptel_t *);
static int pmap_pvo_enter(pmap_t, struct pvo_head *,
vaddr_t, paddr_t, ptel_t, int);
static void pmap_pvo_remove(struct pvo_entry *, int);
@@ -332,6 +357,9 @@
static u_int pmap_asid_generation;
static void pmap_asid_alloc(pmap_t);
+extern void pmap_asm_zero_page(vaddr_t);
+extern void pmap_asm_copy_page(vaddr_t, vaddr_t);
+
#define NPMAPS 16384
#define VSID_NBPW (sizeof(uint32_t) * 8)
static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
@@ -524,11 +552,48 @@
}
static __inline void
-pmap_kpte_clear_bit(int idx, struct pvo_entry *pvo, u_int ptebit)
+pmap_kpte_clear_bit(int idx, struct pvo_entry *pvo, ptel_t ptebit)
{
ptel_t ptel;
ptel = pmap_kernel_ipt[idx];
+
+ switch ((ptel & ptebit) & (SH5_PTEL_PR_W | SH5_PTEL_PR_X)) {
+ case SH5_PTEL_PR_W | SH5_PTEL_PR_X:
+ /*
+ * The page is being made no-exec, rd-only.
+ * Purge the data cache and invalidate insn cache.
+ */
+ __cpu_cache_dpurge_iinv(PVO_VADDR(pvo), NBPG);
+ break;
+
+ case SH5_PTEL_PR_W:
+ /*
+ * The page is being made read-only.
+ * Purge the data-cache.
+ */
+ __cpu_cache_dpurge(PVO_VADDR(pvo), NBPG);
+ break;
+
+ case SH5_PTEL_PR_X:
+ /*
+ * The page is being made no-exec.
+ * Invalidate the instruction cache.
+ */
+ __cpu_cache_iinv(PVO_VADDR(pvo), NBPG);
+ break;
+
+ case 0:
+ /*
+ * The page already has the required protection.
+ * No need to touch the cache.
+ */
+ break;
+ }
+
+ /*
+ * It's now safe to echo the change in the TLB.
+ */
pmap_kernel_ipt[idx] &= ~ptebit;
__cpu_tlbinv(PVO_VADDR(pvo) | SH5_PTEH_SH,
@@ -652,6 +717,8 @@
source_pvo = NULL;
victim_pvo = NULL;
+ pmap_pte_spill_events.ev_count++;
+
LIST_FOREACH(pvo, &pmap_upvo_table[ptegidx], pvo_olink) {
if (source_pvo == NULL && pmap_pteh_match(pvo, vsid, va)) {
/*
@@ -669,6 +736,7 @@
if (j >= 0) {
/* Excellent. No need to evict anyone! */
PVO_PTEGIDX_SET(pvo, j);
+ pmap_pteg_idx_events[j].ev_count++;
return (&ptg->pte[j]);
}
@@ -731,6 +799,8 @@
pmap_pteg_set(pt, source_pvo);
PVO_PTEGIDX_SET(source_pvo, idx);
+ pmap_pte_spill_evict_events.ev_count++;
+
return (pt);
}
@@ -926,7 +996,7 @@
void
pmap_init(void)
{
- int s;
+ int s, i;
s = splvm();
@@ -948,6 +1018,11 @@
pmap_initialized = 1;
splx(s);
+
+ evcnt_attach_static(&pmap_pte_spill_events);
+ evcnt_attach_static(&pmap_pte_spill_evict_events);
+ for (i = 0; i < SH5_PTEG_SIZE; i++)
+ evcnt_attach_static(&pmap_pteg_idx_events[i]);
}
/*
@@ -1126,11 +1201,11 @@
if (!pmap_initialized)
panic("pmap_zero_page: pmap_initialized is false!");
- pmap_pa_map_kva(pmap_zero_page_kva, pa);
+ pmap_pa_map_kva(pmap_zero_page_kva, pa, SH5_PTEL_PR_W);
- memset((void *)pmap_zero_page_kva, 0, NBPG);
+ pmap_asm_zero_page(pmap_zero_page_kva);
- (void) pmap_pa_unmap_kva(pmap_zero_page_kva);
+ (void) pmap_pa_unmap_kva(pmap_zero_page_kva, NULL);
}
/*
@@ -1144,14 +1219,13 @@
PMPRINTF(("pmap_copy_page: copying 0x%08lx -> 0x%08lx\n", src, dst));
- pmap_pa_map_kva(pmap_copy_page_src_kva, src);
- pmap_pa_map_kva(pmap_copy_page_dst_kva, dst);
+ pmap_pa_map_kva(pmap_copy_page_src_kva, src, 0);
+ pmap_pa_map_kva(pmap_copy_page_dst_kva, dst, SH5_PTEL_PR_W);
- memcpy((void *)pmap_copy_page_dst_kva,
- (void *)pmap_copy_page_src_kva, NBPG);
+ pmap_asm_copy_page(pmap_copy_page_dst_kva, pmap_copy_page_src_kva);
- (void) pmap_pa_unmap_kva(pmap_copy_page_src_kva);
- (void) pmap_pa_unmap_kva(pmap_copy_page_dst_kva);
+ (void) pmap_pa_unmap_kva(pmap_copy_page_src_kva, NULL);
+ (void) pmap_pa_unmap_kva(pmap_copy_page_dst_kva, NULL);
}
/*
@@ -1226,10 +1300,9 @@
* enabled,
*/
static void
-pmap_pa_map_kva(vaddr_t kva, paddr_t pa)
+pmap_pa_map_kva(vaddr_t kva, paddr_t pa, ptel_t wprot)
{
- ptel_t *ptel;
- u_int prot;
+ ptel_t prot;
int idx;
/*
@@ -1238,17 +1311,9 @@
if ((idx = kva_to_iptidx(kva)) < 0)
panic("pmap_pa_map_kva: Invalid KVA %p", (void *)kva);
- ptel = &pmap_kernel_ipt[idx];
-#if 0
- prot = SH5_PTEL_CB_WRITEBACK | SH5_PTEL_SZ_4KB |
- SH5_PTEL_PR_R | SH5_PTEL_PR_W;
-#else
- prot = SH5_PTEL_CB_NOCACHE | SH5_PTEL_SZ_4KB |
- SH5_PTEL_PR_R | SH5_PTEL_PR_W;
-#endif
- pa &= SH5_PTEL_PPN_MASK;
+ prot = SH5_PTEL_CB_WRITEBACK | SH5_PTEL_SZ_4KB | SH5_PTEL_PR_R;
- *ptel = (pa & SH5_PTEL_PPN_MASK) | prot;
+ pmap_kernel_ipt[idx] = (ptel_t)(pa & SH5_PTEL_PPN_MASK) | prot | wprot;
}
/*
@@ -1258,18 +1323,57 @@
* The contents of the PTEL which described the mapping are returned.
*/
static ptel_t
-pmap_pa_unmap_kva(vaddr_t kva)
+pmap_pa_unmap_kva(vaddr_t kva, ptel_t *ptel)
{
- ptel_t *ptel;
ptel_t oldptel;
int idx;
- if ((idx = kva_to_iptidx(kva)) < 0)
- panic("pmap_pa_unmap_kva: Invalid KVA %p", (void *)kva);
+ if (ptel == NULL) {
+ if ((idx = kva_to_iptidx(kva)) < 0)
+ panic("pmap_pa_unmap_kva: Invalid KVA %p", (void *)kva);
- ptel = &pmap_kernel_ipt[idx];
+ ptel = &pmap_kernel_ipt[idx];
+ }
oldptel = *ptel;
+
+ switch (oldptel & (SH5_PTEL_PR_W | SH5_PTEL_PR_X)) {
+ case SH5_PTEL_PR_W | SH5_PTEL_PR_X:
+ /*
+ * The page was mapped writable/executable.
+ * Purge the data cache and invalidate insn cache.
+ */
+ __cpu_cache_dpurge_iinv(kva, NBPG);
+ break;
+
+ case SH5_PTEL_PR_W:
+ /*
+ * The page was writable.
+ * Purge the data-cache.
+ */
+ __cpu_cache_dpurge(kva, NBPG);
+ break;
+
+ case SH5_PTEL_PR_X:
+ /*
+ * The page was executable.
+ * Invalidate the data and instruction cache.
+ */
+ __cpu_cache_dpurge_iinv(kva, NBPG);
Home |
Main Index |
Thread Index |
Old Index