Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm/arm32 Some armv7 fixes for speculative tlb loads.



details:   https://anonhg.NetBSD.org/src/rev/8ac18b681db3
branches:  trunk
changeset: 784907:8ac18b681db3
user:      matt <matt%NetBSD.org@localhost>
date:      Wed Feb 13 23:14:35 2013 +0000

description:
Some armv7 fixes for speculative tlb loads.

diffstat:

 sys/arch/arm/arm32/pmap.c |  40 ++++++++++++++++++++++++++++++++++++++--
 1 files changed, 38 insertions(+), 2 deletions(-)

diffs (85 lines):

diff -r 1eec4c1efd82 -r 8ac18b681db3 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed Feb 13 23:10:58 2013 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed Feb 13 23:14:35 2013 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.252 2013/02/04 13:37:30 macallan Exp $      */
+/*     $NetBSD: pmap.c,v 1.253 2013/02/13 23:14:35 matt Exp $  */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
 #include <arm/cpuconf.h>
 #include <arm/arm32/katelib.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.252 2013/02/04 13:37:30 macallan Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.253 2013/02/13 23:14:35 matt Exp $");
 
 #ifdef PMAP_DEBUG
 
@@ -605,14 +605,32 @@
  * Macro to determine if a mapping might be resident in the
  * instruction cache and/or TLB
  */
+#if ARM_MMU_V7 > 0
+/*
+ * Speculative loads by Cortex cores can cause TLB entries to be filled even if
+ * there are no explicit accesses, so there may be always be TLB entries to
+ * flush.  If we used ASIDs then this would not be a problem.
+ */
+#define        PV_BEEN_EXECD(f)  (((f) & PVF_EXEC) == PVF_EXEC)
+#else
 #define        PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
+#endif
 #define        PV_IS_EXEC_P(f)   (((f) & PVF_EXEC) != 0)
 
 /*
  * Macro to determine if a mapping might be resident in the
  * data cache and/or TLB
  */
+#if ARM_MMU_V7 > 0
+/*
+ * Speculative loads by Cortex cores can cause TLB entries to be filled even if
+ * there are no explicit accesses, so there may be always be TLB entries to
+ * flush.  If we used ASIDs then this would not be a problem.
+ */
+#define        PV_BEEN_REFD(f)   (1)
+#else
 #define        PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
+#endif
 
 /*
  * Local prototypes
@@ -726,7 +744,16 @@
 
        if (pm->pm_cstate.cs_tlb_id) {
                cpu_tlb_flushID();
+#if ARM_MMU_V7 == 0
+               /*
+                * Speculative loads by Cortex cores can cause TLB entries to
+                * be filled even if there are no explicit accesses, so there
+                * may be always be TLB entries to flush.  If we used ASIDs
+                * then it would not be a problem.
+                * This is not true for other CPUs.
+                */
                pm->pm_cstate.cs_tlb = 0;
+#endif
        }
 }
 
@@ -736,7 +763,16 @@
 
        if (pm->pm_cstate.cs_tlb_d) {
                cpu_tlb_flushD();
+#if ARM_MMU_V7 == 0
+               /*
+                * Speculative loads by Cortex cores can cause TLB entries to
+                * be filled even if there are no explicit accesses, so there
+                * may be always be TLB entries to flush.  If we used ASIDs
+                * then it would not be a problem.
+                * This is not true for other CPUs.
+                */
                pm->pm_cstate.cs_tlb_d = 0;
+#endif
        }
 }
 



Home | Main Index | Thread Index | Old Index