Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/powerpc Re-enable PTE_EXEC. PTE_EXEC is now also c...
details: https://anonhg.NetBSD.org/src/rev/8803be60487b
branches: trunk
changeset: 535301:8803be60487b
user: matt <matt%NetBSD.org@localhost>
date: Wed Aug 14 14:25:15 2002 +0000
description:
Re-enable PTE_EXEC. PTE_EXEC is now also cleared in pmap_zero_page,
pmap_copy_page, and pmap_clear_modify (pmap_clear_bit). Remove #ifdef
MULTIPROCESSOR since the cache instructions operate on all caches on
all processors.
diffstat:
sys/arch/powerpc/mpc6xx/pmap.c | 108 +++++++++++++++++++++++-----------
sys/arch/powerpc/powerpc/pmap_subr.c | 45 +++++++++++---
2 files changed, 107 insertions(+), 46 deletions(-)
diffs (truncated from 321 to 300 lines):
diff -r e46802a363da -r 8803be60487b sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c Wed Aug 14 13:02:58 2002 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c Wed Aug 14 14:25:15 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.54 2002/08/07 19:04:05 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.55 2002/08/14 14:25:16 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -250,11 +250,10 @@
static int pmap_initialized;
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(PMAPDEBUG)
#define PMAPDEBUG_BOOT 0x0001
#define PMAPDEBUG_PTE 0x0002
-#define PMAPDEBUG_PAMAP 0x0004
-#define PMAPDEBUG_SYNCICACHE 0x0008
+#define PMAPDEBUG_EXEC 0x0008
#define PMAPDEBUG_PVOENTER 0x0010
#define PMAPDEBUG_PVOREMOVE 0x0020
#define PMAPDEBUG_ACTIVATE 0x0100
@@ -939,9 +938,7 @@
void
pmap_update(struct pmap *pmap)
{
-#ifdef MULTIPROCESSOR
TLBSYNC();
-#endif
}
/*
@@ -1384,11 +1381,8 @@
* If this is a managed page, and it's the first reference to the
* page clear the execness of the page. Otherwise fetch the execness.
*/
-#if !defined(MULTIPROCESSOR) && 0 /* disable for now */
- /* XXX more is needed for MP */
if (pg != NULL)
was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
-#endif
DPRINTFN(ENTER, (" was_exec=%d", was_exec));
@@ -1412,6 +1406,15 @@
else
pte_lo |= PTE_BR;
+ /*
+ * If this was in response to a fault, "pre-fault" the PTE's
+ * changed/referenced bit appropriately.
+ */
+ if (flags & VM_PROT_WRITE)
+ pte_lo |= PTE_CHG;
+ if (flags & (VM_PROT_READ|VM_PROT_WRITE))
+ pte_lo |= PTE_REF;
+
#if 0
if (pm == pmap_kernel()) {
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
@@ -1447,11 +1450,20 @@
was_exec == 0) {
DPRINTFN(ENTER, (" syncicache"));
pmap_syncicache(pa, NBPG);
- if (pg != NULL)
+ if (pg != NULL) {
pmap_attr_save(pg, PTE_EXEC);
+#if defined(DEBUG) || defined(PMAPDEBUG)
+ if (pmapdebug & PMAPDEBUG_ENTER)
+ printf(" marked-as-exec"));
+ else if (pmapdebug & PMAPDEBUG_EXEC)
+ printf("[pmap_enter: %#lx: marked-as-exec]\n",
+ pg->phys_addr);
+
+#endif
+ }
}
- DPRINTFN(ENTER, (" error=%d\n", error));
+ DPRINTFN(ENTER, (": error=%d\n", error));
return error;
}
@@ -1490,6 +1502,9 @@
else
pte_lo |= PTE_BR;
+ /*
+ * We don't care about REF/CHG on PVOs on the unmanaged list.
+ */
s = splvm();
msr = pmap_interrupts_off();
error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
@@ -1500,19 +1515,6 @@
if (error != 0)
panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
va, pa, error);
-
- /*
- * Flush the real memory from the instruction cache.
- * If it's writeable, clear the PTE_EXEC attribute.
- */
- if (prot & VM_PROT_EXECUTE) {
- if ((pte_lo & (PTE_IG)) == 0)
- pmap_syncicache(pa, NBPG);
- } else if (prot & VM_PROT_WRITE) {
- struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
- if (pg != NULL)
- pmap_attr_clear(pg, PTE_EXEC);
- }
}
void
@@ -1586,15 +1588,12 @@
int s;
int pteidx;
-#if 0
/*
- * Since this routine only downgrades protection, if the
- * maximal protection is desired, there isn't any change
- * to be made.
+ * Since this routine only downgrades protection, we should
+ * always be called without WRITE permisison.
*/
- if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
- return;
-#endif
+ KASSERT((prot & VM_PROT_WRITE) == 0);
+
/*
* If there is no protection, this is equivalent to
* remove the pmap from the pmap.
@@ -1695,6 +1694,7 @@
* maximal protection is desired, there isn't any change
* to be made.
*/
+ KASSERT((prot & VM_PROT_WRITE) == 0);
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
return;
@@ -1706,8 +1706,11 @@
* VM_PROT_NONE. At that point, we can clear the exec flag
* since we know the page will have different contents.
*/
- if ((prot & VM_PROT_READ) == 0)
+ if ((prot & VM_PROT_READ) == 0) {
+ DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
+ pg->phys_addr));
pmap_attr_clear(pg, PTE_EXEC);
+ }
pvo_head = vm_page_to_pvoh(pg);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
@@ -1853,6 +1856,7 @@
boolean_t
pmap_clear_bit(struct vm_page *pg, int ptebit)
{
+ struct pvo_head *pvoh = vm_page_to_pvoh(pg);
struct pvo_entry *pvo;
volatile pte_t *pt;
u_int32_t msr;
@@ -1861,10 +1865,15 @@
s = splvm();
msr = pmap_interrupts_off();
+
+ /*
+ * Fetch the cache value
+ */
+ rv |= pmap_attr_fetch(pg);
+
/*
* Clear the cached value.
*/
- rv |= pmap_attr_fetch(pg);
pmap_attr_clear(pg, ptebit);
/*
@@ -1880,20 +1889,47 @@
* For each pvo entry, clear pvo's ptebit. If this pvo have a
* valid PTE. If so, clear the ptebit from the valid PTE.
*/
- LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
+ LIST_FOREACH(pvo, pvoh, pvo_vlink) {
PMAP_PVO_CHECK(pvo); /* sanity check */
pt = pmap_pvo_to_pte(pvo, -1);
if (pt != NULL) {
- pmap_pte_synch(pt, &pvo->pvo_pte);
+ /*
+ * Only sync the PTE if the bit we are looking
+ * for is not already set.
+ */
+ if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
+ pmap_pte_synch(pt, &pvo->pvo_pte);
+ /*
+ * If the bit we are looking for was already set,
+ * clear that bit in the pte.
+ */
if (pvo->pvo_pte.pte_lo & ptebit)
pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
}
- rv |= pvo->pvo_pte.pte_lo;
+ rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
pvo->pvo_pte.pte_lo &= ~ptebit;
PMAP_PVO_CHECK(pvo); /* sanity check */
}
pmap_interrupts_restore(msr);
splx(s);
+ /*
+ * If we are clearing the modify bit and this page was marked EXEC
+ * and the user of the page thinks the page was modified, then we
+ * need to clean it from the icache if it's mapped or clear the EXEC
+ * bit if it's not mapped. The page itself might not have the CHG
+ * bit set if the modification was done via DMA to the page.
+ */
+ if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
+ if (LIST_EMPTY(pvoh)) {
+ DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
+ pg->phys_addr));
+ pmap_attr_clear(pg, PTE_EXEC);
+ } else {
+ DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
+ pg->phys_addr));
+ pmap_syncicache(pg->phys_addr, NBPG);
+ }
+ }
return (rv & ptebit) != 0;
}
diff -r e46802a363da -r 8803be60487b sys/arch/powerpc/powerpc/pmap_subr.c
--- a/sys/arch/powerpc/powerpc/pmap_subr.c Wed Aug 14 13:02:58 2002 +0000
+++ b/sys/arch/powerpc/powerpc/pmap_subr.c Wed Aug 14 14:25:15 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_subr.c,v 1.3 2002/07/28 07:05:19 chs Exp $ */
+/* $NetBSD: pmap_subr.c,v 1.4 2002/08/14 14:25:15 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -83,8 +83,20 @@
pmap_zero_page(paddr_t pa)
{
size_t linewidth;
- u_int32_t msr;
+ register_t msr;
+#if defined(PPC_MPC6XX) && !defined(OLDPMAP)
+ {
+ /*
+ * If we are zeroing this page, we must clear the EXEC-ness
+ * of this page since the page contents will have changed.
+ */
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+ KDASSERT(pg != NULL);
+ KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
+ pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
+ }
+#endif
#ifdef ALTIVEC
if (pmap_use_altivec) {
vzeropage(pa);
@@ -139,11 +151,24 @@
void
pmap_copy_page(paddr_t src, paddr_t dst)
{
- const long *sp;
- long *dp;
+ const register_t *sp;
+ register_t *dp;
size_t i;
- u_int32_t msr;
+ register_t msr;
+#if defined(PPC_MPC6XX) && !defined(OLDPMAP)
+ {
+ /*
+ * If we are copying to the destination page, we must clear
+ * the EXEC-ness of this page since the page contents have
+ * changed.
+ */
+ struct vm_page *pg = PHYS_TO_VM_PAGE(dst);
+ KDASSERT(pg != NULL);
+ KDASSERT(LIST_EMPTY(&pg->mdpage.mdpg_pvoh));
+ pg->mdpage.mdpg_attrs &= ~PTE_EXEC;
+ }
+#endif
#ifdef ALTIVEC
if (pmap_use_altivec) {
vcopypage(dst, src);
@@ -171,8 +196,8 @@
* Copy the page. Don't use memcpy as we can't refer to the
* kernel stack at this point.
*/
- sp = (long *) src;
- dp = (long *) dst;
+ sp = (const register_t *) src;
+ dp = (register_t *) dst;
for (i = 0; i < NBPG/sizeof(dp[0]); i += 8, dp += 8, sp += 8) {
dp[0] = sp[0]; dp[1] = sp[1]; dp[2] = sp[2]; dp[3] = sp[3];
Home |
Main Index |
Thread Index |
Old Index