Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sh5/sh5 As per TODO item #13, clear the Referenced ...
details: https://anonhg.NetBSD.org/src/rev/fd6d99d21b61
branches: trunk
changeset: 538543:fd6d99d21b61
user: scw <scw%NetBSD.org@localhost>
date: Tue Oct 22 13:10:28 2002 +0000
description:
As per TODO item #13, clear the Referenced bit in the PTE after purging
a mapping from the TLB/cache to prevent duplicate purges for pages
which were not touched since the last purge.
diffstat:
sys/arch/sh5/sh5/pmap.c | 41 +++++++++++++++++++++++++++--------------
1 files changed, 27 insertions(+), 14 deletions(-)
diffs (106 lines):
diff -r f23534e851ea -r fd6d99d21b61 sys/arch/sh5/sh5/pmap.c
--- a/sys/arch/sh5/sh5/pmap.c Tue Oct 22 12:25:18 2002 +0000
+++ b/sys/arch/sh5/sh5/pmap.c Tue Oct 22 13:10:28 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.20 2002/10/22 09:30:27 scw Exp $ */
+/* $NetBSD: pmap.c,v 1.21 2002/10/22 13:10:28 scw Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@@ -706,9 +706,16 @@
pm = pvo->pvo_pmap;
pteh = pt->pteh;
ptel = pt->ptel;
- pt->ptel = ptel & ~ptebit;
-
- if (pm->pm_asid != PMAP_ASID_UNASSIGNED &&
+
+ /*
+ * Note:
+ * We clear the Referenced bit here so that subsequent calls to
+ * pmap_cache_sync_*() will only purge the cache for the page
+ * if it has been accessed between now and then.
+ */
+ pt->ptel = ptel & ~(ptebit | SH5_PTEL_R);
+
+ if ((ptel & SH5_PTEL_R) != 0 && pm->pm_asid != PMAP_ASID_UNASSIGNED &&
pm->pm_asidgen == pmap_asid_generation) {
/*
* The mapping may be cached in the TLB. Call cpu-specific
@@ -736,10 +743,12 @@
{
ptel_t ptel;
- __cpu_tlbinv((pteh_t)PVO_VADDR(pvo) | SH5_PTEH_SH,
- SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
-
ptel = pmap_kernel_ipt[idx];
+
+ if ((ptel & SH5_PTEL_R) != 0)
+ __cpu_tlbinv((pteh_t)PVO_VADDR(pvo) | SH5_PTEH_SH,
+ SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
+
pmap_pteg_synch(ptel, pvo);
/*
@@ -749,8 +758,11 @@
/*
* It's now safe to change the page table.
+ * We clear the Referenced bit here so that subsequent calls to
+ * pmap_cache_sync_*() will only purge the cache for the page
+ * if it has been accessed between now and then.
*/
- pmap_kernel_ipt[idx] = ptel & ~ptebit;
+ pmap_kernel_ipt[idx] = ptel & ~(ptebit | SH5_PTEL_R);
}
/*
@@ -768,7 +780,7 @@
pmap_pteg_set(volatile pte_t *pt, struct pvo_entry *pvo)
{
- pt->ptel = pvo->pvo_ptel;
+ pt->ptel = pvo->pvo_ptel & ~SH5_PTEL_R;
pt->pteh = (pteh_t) PVO_VADDR(pvo);
pt->vsid = pvo->pvo_pmap->pm_vsid;
}
@@ -792,7 +804,7 @@
pteh = pt->pteh;
pt->pteh = 0;
- if (pm->pm_asid != PMAP_ASID_UNASSIGNED &&
+ if ((ptel & SH5_PTEL_R) != 0 && pm->pm_asid != PMAP_ASID_UNASSIGNED &&
pm->pm_asidgen == pmap_asid_generation) {
/*
* The mapping may be in the TLB. Call cpu-specific
@@ -1579,8 +1591,9 @@
oldptel = *ptel;
*ptel = 0;
- __cpu_tlbinv(((pteh_t)kva & SH5_PTEH_EPN_MASK) | SH5_PTEH_SH,
- SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
+ if ((oldptel & SH5_PTEL_R) != 0)
+ __cpu_tlbinv(((pteh_t)kva & SH5_PTEH_EPN_MASK) | SH5_PTEH_SH,
+ SH5_PTEH_EPN_MASK | SH5_PTEH_SH);
pmap_cache_sync_unmap(kva, oldptel);
@@ -1642,7 +1655,7 @@
pvo->pvo_ptel |= new_mode;
/* Re-insert it back into the page table */
- pmap_kernel_ipt[idx] = pvo->pvo_ptel;
+ pmap_kernel_ipt[idx] = pvo->pvo_ptel & ~SH5_PTEL_R;
}
PMPRINTF(("pmap_change_cache_attr: done\n"));
@@ -1827,7 +1840,7 @@
PVO_PTEGIDX_SET(pvo, i);
}
} else {
- pmap_kernel_ipt[idx] = ptel;
+ pmap_kernel_ipt[idx] = ptel & ~SH5_PTEL_R;
PMPRINTF((
"pmap_pvo_enter: kva 0x%lx, ptel 0x%lx, kipt (idx %d)\n",
va, (u_long)ptel, idx));
Home |
Main Index |
Thread Index |
Old Index