Subject: Patch to add support for ARM9E
To: Richard Earnshaw <Richard.Earnshaw@buzzard.freeserve.co.uk>
From: Scott <scott_allan@picovex.com>
List: port-arm
Date: 07/31/2006 17:02:16
This is a multi-part message in MIME format.
--------------020507050501000103030708
Content-Type: text/plain; charset=ISO-8859-1; format=flowed
Content-Transfer-Encoding: 7bit
Scott wrote:
> I'll go ahead and work on a patch for consideration.
Ok, the patch is attached. It is written such that the ARM1026EJS will use
the enhanced cache functions, but alas, I don't have a board with that core.
It would be great if someone with one would give it a try. I look forward to
feedback.
Thanks,
Scott
--------------020507050501000103030708
Content-Type: text/x-patch;
name="arm926ejs.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline;
filename="arm926ejs.patch"
Index: netbsd_quilt/src/sys/arch/arm/arm32/cpu.c
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/arm32/cpu.c
+++ netbsd_quilt/src/sys/arch/arm/arm32/cpu.c
@@ -169,6 +169,7 @@
CPU_CLASS_ARM8,
CPU_CLASS_ARM9TDMI,
CPU_CLASS_ARM9ES,
+ CPU_CLASS_ARM9EJS,
CPU_CLASS_ARM10E,
CPU_CLASS_ARM10EJ,
CPU_CLASS_SA1,
@@ -316,6 +317,8 @@
generic_steppings },
{ CPU_ID_ARM922T, CPU_CLASS_ARM9TDMI, "ARM922T",
generic_steppings },
+ { CPU_ID_ARM926EJS, CPU_CLASS_ARM9EJS, "ARM926EJ-S",
+ generic_steppings },
{ CPU_ID_ARM940T, CPU_CLASS_ARM9TDMI, "ARM940T",
generic_steppings },
{ CPU_ID_ARM946ES, CPU_CLASS_ARM9ES, "ARM946E-S",
@@ -406,7 +409,8 @@
{ "ARM7TDMI", "CPU_ARM7TDMI" }, /* CPU_CLASS_ARM7TDMI */
{ "ARM8", "CPU_ARM8" }, /* CPU_CLASS_ARM8 */
{ "ARM9TDMI", NULL }, /* CPU_CLASS_ARM9TDMI */
- { "ARM9E-S", NULL }, /* CPU_CLASS_ARM9ES */
+ { "ARM9E-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9ES */
+ { "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */
{ "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */
{ "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */
{ "SA-1", "CPU_SA110" }, /* CPU_CLASS_SA1 */
@@ -483,6 +487,8 @@
aprint_normal(" IDC enabled");
break;
case CPU_CLASS_ARM9TDMI:
+ case CPU_CLASS_ARM9ES:
+ case CPU_CLASS_ARM9EJS:
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
case CPU_CLASS_SA1:
@@ -561,6 +567,10 @@
#ifdef CPU_ARM9
case CPU_CLASS_ARM9TDMI:
#endif
+#ifdef CPU_ARM9E
+ case CPU_CLASS_ARM9ES:
+ case CPU_CLASS_ARM9EJS:
+#endif
#ifdef CPU_ARM10
case CPU_CLASS_ARM10E:
case CPU_CLASS_ARM10EJ:
Index: netbsd_quilt/src/sys/arch/arm/include/armreg.h
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/include/armreg.h
+++ netbsd_quilt/src/sys/arch/arm/include/armreg.h
@@ -186,6 +186,7 @@
#define CPU_ID_ARM810 0x41018100
#define CPU_ID_ARM920T 0x41129200
#define CPU_ID_ARM922T 0x41029220
+#define CPU_ID_ARM926EJS 0x41069260
#define CPU_ID_ARM940T 0x41029400 /* XXX no MMU */
#define CPU_ID_ARM946ES 0x41049460 /* XXX no MMU */
#define CPU_ID_ARM966ES 0x41049660 /* XXX no MMU */
Index: netbsd_quilt/src/sys/arch/arm/arm/cpufunc.c
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/arm/cpufunc.c
+++ netbsd_quilt/src/sys/arch/arm/arm/cpufunc.c
@@ -457,6 +457,64 @@
};
#endif /* CPU_ARM9 */
+#if defined(CPU_ARM9E) || defined(CPU_ARM10)
+struct cpu_functions armv5_ec_cpufuncs = {
+ /* CPU functions */
+
+ cpufunc_id, /* id */
+ cpufunc_nullop, /* cpwait */
+
+ /* MMU functions */
+
+ cpufunc_control, /* control */
+ cpufunc_domains, /* Domain */
+ armv5_ec_setttb, /* Setttb */
+ cpufunc_faultstatus, /* Faultstatus */
+ cpufunc_faultaddress, /* Faultaddress */
+
+ /* TLB functions */
+
+ armv4_tlb_flushID, /* tlb_flushID */
+ arm10_tlb_flushID_SE, /* tlb_flushID_SE */
+ armv4_tlb_flushI, /* tlb_flushI */
+ arm10_tlb_flushI_SE, /* tlb_flushI_SE */
+ armv4_tlb_flushD, /* tlb_flushD */
+ armv4_tlb_flushD_SE, /* tlb_flushD_SE */
+
+ /* Cache operations */
+
+ armv5_ec_icache_sync_all, /* icache_sync_all */
+ armv5_ec_icache_sync_range, /* icache_sync_range */
+
+ armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
+ armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
+/*XXX*/ armv5_ec_dcache_wbinv_range, /* dcache_inv_range */
+ armv5_ec_dcache_wb_range, /* dcache_wb_range */
+
+ armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
+ armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
+
+ /* Other functions */
+
+ cpufunc_nullop, /* flush_prefetchbuf */
+ armv4_drain_writebuf, /* drain_writebuf */
+ cpufunc_nullop, /* flush_brnchtgt_C */
+ (void *)cpufunc_nullop, /* flush_brnchtgt_E */
+
+ (void *)cpufunc_nullop, /* sleep */
+
+ /* Soft functions */
+
+ cpufunc_null_fixup, /* dataabt_fixup */
+ cpufunc_null_fixup, /* prefetchabt_fixup */
+
+ arm10_context_switch, /* context_switch */
+
+ arm10_setup /* cpu setup */
+
+};
+#endif /* CPU_ARM9E || CPU_ARM10 */
+
#ifdef CPU_ARM10
struct cpu_functions arm10_cpufuncs = {
/* CPU functions */
@@ -468,7 +526,7 @@
cpufunc_control, /* control */
cpufunc_domains, /* Domain */
- arm10_setttb, /* Setttb */
+ armv5_setttb, /* Setttb */
cpufunc_faultstatus, /* Faultstatus */
cpufunc_faultaddress, /* Faultaddress */
@@ -571,7 +629,7 @@
arm11_setup /* cpu setup */
};
-#endif /* CPU_ARM10 || CPU_ARM11 */
+#endif /* CPU_ARM11 */
#ifdef CPU_SA110
struct cpu_functions sa110_cpufuncs = {
@@ -812,7 +870,7 @@
u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
- defined (CPU_ARM10) || defined (CPU_ARM11) || \
+ defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
static void get_cachetype_cp15 __P((void));
@@ -1043,10 +1101,19 @@
return 0;
}
#endif /* CPU_ARM9 */
+#if defined(CPU_ARM9E) || defined(CPU_ARM10)
+ if (cputype == CPU_ID_ARM926EJS ||
+ cputype == CPU_ID_ARM1026EJS) {
+ cpufuncs = armv5_ec_cpufuncs;
+ cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
+ get_cachetype_cp15();
+ pmap_pte_init_generic();
+ return 0;
+ }
+#endif /* CPU_ARM9E || CPU_ARM10 */
#ifdef CPU_ARM10
if (/* cputype == CPU_ID_ARM1020T || */
- cputype == CPU_ID_ARM1020E ||
- cputype == CPU_ID_ARM1026EJS) {
+ cputype == CPU_ID_ARM1020E) {
/*
* Select write-through cacheing (this isn't really an
* option on ARM1020T).
@@ -1616,8 +1683,8 @@
*/
#if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
- defined(CPU_ARM8) || defined (CPU_ARM9) || defined(CPU_SA110) || \
- defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \
+ defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
defined(CPU_ARM10) || defined(CPU_ARM11)
@@ -1961,7 +2028,7 @@
}
#endif /* CPU_ARM9 */
-#ifdef CPU_ARM10
+#if defined(CPU_ARM9E) || defined(CPU_ARM10)
struct cpu_option arm10_options[] = {
{ "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
{ "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
@@ -2013,7 +2080,7 @@
/* And again. */
cpu_idcache_wbinv_all();
}
-#endif /* CPU_ARM10 */
+#endif /* CPU_ARM9E || CPU_ARM10 */
#ifdef CPU_ARM11
struct cpu_option arm11_options[] = {
Index: netbsd_quilt/src/sys/arch/arm/conf/files.arm
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/conf/files.arm
+++ netbsd_quilt/src/sys/arch/arm/conf/files.arm
@@ -6,8 +6,8 @@
# CPU types. Make sure to update <arm/cpuconf.h> if you change this list.
defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3
defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8
- CPU_ARM9 CPU_ARM10 CPU_ARM11 CPU_SA110
- CPU_SA1100 CPU_SA1110 CPU_IXP12X0
+ CPU_ARM9 CPU_ARM9E CPU_ARM10 CPU_ARM11
+ CPU_SA110 CPU_SA1100 CPU_SA1110 CPU_IXP12X0
CPU_XSCALE_80200 CPU_XSCALE_80321
CPU_XSCALE_PXA250 CPU_XSCALE_PXA270
CPU_XSCALE_IXP425
@@ -90,9 +90,10 @@
file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi
file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8
file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9
-file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm10
+file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm9e | cpu_arm10
file arch/arm/arm/cpufunc_asm_arm11.S cpu_arm11
-file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm10 |
+file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm9e |
+ cpu_arm10 |
cpu_sa110 |
cpu_sa1100 |
cpu_sa1110 |
@@ -103,6 +104,7 @@
cpu_xscale_pxa250 |
cpu_xscale_pxa270
file arch/arm/arm/cpufunc_asm_armv5.S cpu_arm10 | cpu_arm11
+file arch/arm/arm/cpufunc_asm_armv5_ec.S cpu_arm9e | cpu_arm10
file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 | cpu_sa1100 |
cpu_sa1110 |
cpu_ixp12x0
Index: netbsd_quilt/src/sys/arch/arm/include/cpuconf.h
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/include/cpuconf.h
+++ netbsd_quilt/src/sys/arch/arm/include/cpuconf.h
@@ -65,6 +65,7 @@
defined(CPU_ARM6) + defined(CPU_ARM7) + \
defined(CPU_ARM7TDMI) + \
defined(CPU_ARM8) + defined(CPU_ARM9) + \
+ defined(CPU_ARM9E) + \
defined(CPU_ARM10) + \
defined(CPU_ARM11) + \
defined(CPU_SA110) + defined(CPU_SA1100) + \
@@ -105,8 +106,9 @@
#endif
#if !defined(_KERNEL_OPT) || \
- (defined(CPU_ARM10) || defined(CPU_XSCALE_80200) || \
- defined(CPU_XSCALE_80321) || defined(__CPU_XSCALE_PXA2XX))
+ (defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(__CPU_XSCALE_PXA2XX))
#define ARM_ARCH_5 1
#else
#define ARM_ARCH_5 0
@@ -156,8 +158,8 @@
#if !defined(_KERNEL_OPT) || \
(defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
- defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM10) || \
- defined(CPU_ARM11))
+ defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) || \
+ defined(CPU_ARM10) || defined(CPU_ARM11))
#define ARM_MMU_GENERIC 1
#else
#define ARM_MMU_GENERIC 0
Index: netbsd_quilt/src/sys/arch/arm/include/cpufunc.h
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/include/cpufunc.h
+++ netbsd_quilt/src/sys/arch/arm/include/cpufunc.h
@@ -334,9 +334,7 @@
extern unsigned arm9_dcache_index_inc;
#endif
-#ifdef CPU_ARM10
-void arm10_setttb __P((u_int));
-
+#if defined(CPU_ARM9E) || defined(CPU_ARM10)
void arm10_tlb_flushID_SE __P((u_int));
void arm10_tlb_flushI_SE __P((u_int));
@@ -362,7 +360,24 @@
void arm11_drain_writebuf __P((void));
#endif
+#if defined(CPU_ARM9E) || defined (CPU_ARM10)
+void armv5_ec_setttb __P((u_int));
+
+void armv5_ec_icache_sync_all __P((void));
+void armv5_ec_icache_sync_range __P((vaddr_t, vsize_t));
+
+void armv5_ec_dcache_wbinv_all __P((void));
+void armv5_ec_dcache_wbinv_range __P((vaddr_t, vsize_t));
+void armv5_ec_dcache_inv_range __P((vaddr_t, vsize_t));
+void armv5_ec_dcache_wb_range __P((vaddr_t, vsize_t));
+
+void armv5_ec_idcache_wbinv_all __P((void));
+void armv5_ec_idcache_wbinv_range __P((vaddr_t, vsize_t));
+#endif
+
#if defined (CPU_ARM10) || defined (CPU_ARM11)
+void armv5_setttb __P((u_int));
+
void armv5_icache_sync_all __P((void));
void armv5_icache_sync_range __P((vaddr_t, vsize_t));
@@ -380,8 +395,8 @@
extern unsigned armv5_dcache_index_inc;
#endif
-#if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
- defined(CPU_SA1100) || defined(CPU_SA1110) || \
+#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+ defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
Index: netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_armv5_ec.S
===================================================================
--- /dev/null
+++ netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_armv5_ec.S
@@ -0,0 +1,206 @@
+/* $NetBSD: $ */
+
+/*
+ * Copyright (c) 2002, 2005 ARM Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ARMv5 assembly functions for manipulating caches.
+ * These routines can be used by any core that supports both the set/index
+ * operations and the test and clean operations for efficiently cleaning the
+ * entire DCache. If a core does not have the test and clean operations, but
+ * does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
+ * This source was derived from that file.
+ */
+
+#include <machine/cpu.h>
+#include <machine/asm.h>
+
+/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(armv5_ec_setttb)
+ /*
+ * Some other ARM ports save registers on the stack, call the
+ * idcache_wbinv_all function and then restore the registers from the
+ * stack before setting the TTB. I observed that this caused a
+ * problem when the old and new translation table entries' buffering
+ * bits were different. If I saved the registers in other registers
+ * or invalidated the caches when I returned from idcache_wbinv_all,
+ * it worked fine. If not, I ended up executing at an invalid PC.
+ * For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
+ * do it directly and entirely avoid the problem.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
+1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
+ bne 1b /* More to do? */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+
+ mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
+
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
+ RET
+
+/*
+ * Cache operations. For the entire cache we use the enhanced cache
+ * operations.
+ */
+
+ENTRY_NP(armv5_ec_icache_sync_range)
+ ldr ip, .Larmv5_ec_line_size
+ cmp r1, #0x4000
+ bcs .Larmv5_ec_icache_sync_all
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+ENTRY_NP(armv5_ec_icache_sync_all)
+.Larmv5_ec_icache_sync_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache cleaning code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Flush I cache */
+ /* Fall through to clean Dcache. */
+
+.Larmv5_ec_dcache_wb:
+1:
+ mrc p15, 0, r15, c7, c10, 3 /* Test and clean (don't invalidate) */
+ bne 1b /* More to do? */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+.Larmv5_ec_line_size:
+ .word _C_LABEL(arm_pdcache_line_size)
+
+ENTRY(armv5_ec_dcache_wb_range)
+ ldr ip, .Larmv5_ec_line_size
+ cmp r1, #0x4000
+ bcs .Larmv5_ec_dcache_wb
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c10, 1 /* Clean D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+ENTRY(armv5_ec_dcache_wbinv_range)
+ ldr ip, .Larmv5_ec_line_size
+ cmp r1, #0x4000
+ bcs .Larmv5_ec_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+/*
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(armv5_ec_dcache_inv_range)
+ ldr ip, .Larmv5_ec_line_size
+ cmp r1, #0x4000
+ bcs .Larmv5_ec_dcache_wbinv_all
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c6, 1 /* Invalidate D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+ENTRY(armv5_ec_idcache_wbinv_range)
+ ldr ip, .Larmv5_ec_line_size
+ cmp r1, #0x4000
+ bcs .Larmv5_ec_idcache_wbinv_all
+ ldr ip, [ip]
+ sub r1, r1, #1 /* Don't overrun */
+ sub r3, ip, #1
+ and r2, r0, r3
+ add r1, r1, r2
+ bic r0, r0, r3
+1:
+ mcr p15, 0, r0, c7, c5, 1 /* Invalidate I cache SE with VA */
+ mcr p15, 0, r0, c7, c14, 1 /* Purge D cache SE with VA */
+ add r0, r0, ip
+ subs r1, r1, ip
+ bpl 1b
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
+
+ENTRY_NP(armv5_ec_idcache_wbinv_all)
+.Larmv5_ec_idcache_wbinv_all:
+ /*
+ * We assume that the code here can never be out of sync with the
+ * dcache, so that we can safely flush the Icache and fall through
+ * into the Dcache purging code.
+ */
+ mcr p15, 0, r0, c7, c5, 0 /* Invalidate ICache */
+ /* Fall through to purge Dcache. */
+
+ENTRY(armv5_ec_dcache_wbinv_all)
+.Larmv5_ec_dcache_wbinv_all:
+1: mrc p15, 0, r15, c7, c14, 3 /* Test, clean and invalidate DCache */
+ bne 1b /* More to do? */
+ mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
+ RET
Index: netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_arm10.S
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/arm/cpufunc_asm_arm10.S
+++ netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_arm10.S
@@ -35,22 +35,6 @@
#include <machine/asm.h>
/*
- * Functions to set the MMU Translation Table Base register
- *
- * We need to clean and flush the cache as it uses virtual
- * addresses that are about to change.
- */
-ENTRY(arm10_setttb)
- stmfd sp!, {r0, lr}
- bl _C_LABEL(armv5_idcache_wbinv_all)
- ldmfd sp!, {r0, lr}
-
- mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
-
- mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
- RET
-
-/*
* TLB functions
*/
ENTRY(arm10_tlb_flushID_SE)
Index: netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_armv5.S
===================================================================
--- netbsd_quilt.orig/src/sys/arch/arm/arm/cpufunc_asm_armv5.S
+++ netbsd_quilt/src/sys/arch/arm/arm/cpufunc_asm_armv5.S
@@ -37,6 +37,22 @@
#include <machine/asm.h>
/*
+ * Functions to set the MMU Translation Table Base register
+ *
+ * We need to clean and flush the cache as it uses virtual
+ * addresses that are about to change.
+ */
+ENTRY(armv5_setttb)
+ stmfd sp!, {r0, lr}
+ bl _C_LABEL(armv5_idcache_wbinv_all)
+ ldmfd sp!, {r0, lr}
+
+ mcr p15, 0, r0, c2, c0, 0 /* load new TTB */
+
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate I+D TLBs */
+ RET
+
+/*
* Cache operations. For the entire cache we use the set/index
* operations.
*/
--------------020507050501000103030708--