Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Rework PIC method to be simplier. Change be more c...
details: https://anonhg.NetBSD.org/src/rev/8001c4727890
branches: trunk
changeset: 326905:8001c4727890
user: matt <matt%NetBSD.org@localhost>
date: Fri Feb 21 22:22:48 2014 +0000
description:
Rework PIC method to be simplier. Change be more cortex neutral.
diffstat:
sys/arch/arm/cortex/a9_mpsubr.S | 291 ++++++++++++++++++++-----------
sys/arch/evbarm/bcm53xx/bcm53xx_start.S | 38 +--
sys/arch/evbarm/cubie/cubie_start.S | 48 +++-
3 files changed, 235 insertions(+), 142 deletions(-)
diffs (truncated from 679 to 300 lines):
diff -r db17ef3dc932 -r 8001c4727890 sys/arch/arm/cortex/a9_mpsubr.S
--- a/sys/arch/arm/cortex/a9_mpsubr.S Fri Feb 21 22:18:47 2014 +0000
+++ b/sys/arch/arm/cortex/a9_mpsubr.S Fri Feb 21 22:22:48 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: a9_mpsubr.S,v 1.12 2014/01/24 05:14:11 matt Exp $ */
+/* $NetBSD: a9_mpsubr.S,v 1.13 2014/02/21 22:22:48 matt Exp $ */
/*-
* Copyright (c) 2012 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -111,27 +111,34 @@
#if defined(CPU_CORTEXA8)
#undef CPU_CONTROL_SWP_ENABLE // not present on A8
-#define CPU_CONTROL_SWP_ENABLE 0
+#define CPU_CONTROL_SWP_ENABLE 0
#endif
#ifdef __ARMEL__
-#undef CPU_CONTROL_EX_BEND // needs to clear on LE systems
-#define CPU_CONTROL_EX_BEND 0
+#define CPU_CONTROL_EX_BEND_SET 0
+#else
+#define CPU_CONTROL_EX_BEND_SET CPU_CONTROL_EX_BEND
#endif
#ifdef ARM32_DISABLE_ALIGNMENT_FAULTS
-#undef CPU_CONTROL_AFLT_ENABLE
-#define CPU_CONTROL_AFLT_ENABLE 0
+#define CPU_CONTROL_AFLT_ENABLE_CLR CPU_CONTROL_AFLT_ENABLE
+#define CPU_CONTROL_AFLT_ENABLE_SET 0
+#else
+#deifne CPU_CONTROL_AFLT_ENABLE_CLR 0
+#define CPU_CONTROL_AFLT_ENABLE_SET CPU_CONTROL_AFLT_ENABLE
#endif
-#define CPU_CONTROL_SET \
+#define CPU_CONTROL_SET \
(CPU_CONTROL_MMU_ENABLE | \
- CPU_CONTROL_AFLT_ENABLE | \
- CPU_CONTROL_EX_BEND | \
+ CPU_CONTROL_AFLT_ENABLE_SET | \
CPU_CONTROL_DC_ENABLE | \
CPU_CONTROL_SWP_ENABLE | \
CPU_CONTROL_BPRD_ENABLE | \
CPU_CONTROL_IC_ENABLE | \
+ CPU_CONTROL_EX_BEND_SET | \
CPU_CONTROL_UNAL_ENABLE)
+#define CPU_CONTROL_CLR \
+ (CPU_CONTROL_AFLT_ENABLE_CLR)
+
arm_cpuinit:
/*
* In theory, because the MMU is off, we shouldn't need all of this,
@@ -140,41 +147,57 @@
*/
mov ip, lr
mov r10, r0
+ mov r1, #0
- mcr p15, 0, r10, c7, c5, 0 /* invalidate I cache */
+ mcr p15, 0, r1, c7, c5, 0 // invalidate I cache
+
+ mrc p15, 0, r2, c1, c0, 0 // read SCTRL
+ movw r1, #(CPU_CONTROL_DC_ENABLE|CPU_CONTROL_IC_ENABLE)
+ bic r2, r2, r1 // clear I+D cache enable
- mrc p15, 0, r2, c1, c0, 0 /* " " " */
- bic r2, r2, #CPU_CONTROL_DC_ENABLE @ clear data cache enable
- bic r2, r2, #CPU_CONTROL_IC_ENABLE @ clear instruction cache enable
- mcr p15, 0, r2, c1, c0, 0 /* " " " */
+#ifdef __ARMEB__
+ /*
+ * SCTRL.EE determines the endianness of translation table lookups.
+ * So we need to make sure it's set before starting to use the new
+ * translation tables (which are big endian).
+ */
+ orr r2, r2, #CPU_CONTROL_EX_BEND
+ bic r2, r2, #CPU_CONTROL_MMU_ENABLE
+ pli [pc, #32] /* preload the next few cachelines */
+ pli [pc, #64]
+ pli [pc, #96]
+ pli [pc, #128]
+#endif
+
+ mcr p15, 0, r2, c1, c0, 0 /* write SCTRL */
XPUTC(#70)
- mov r1, #0
dsb /* Drain the write buffers. */
-
+1:
XPUTC(#71)
- mrc p15, 0, r2, c0, c0, 5 /* get MPIDR */
- cmp r2, #0
+ mrc p15, 0, r1, c0, c0, 5 /* get MPIDR */
+ cmp r1, #0
orrlt r10, r10, #0x5b /* MP, cachable (Normal WB) */
orrge r10, r10, #0x1b /* Non-MP, cacheable, normal WB */
mcr p15, 0, r10, c2, c0, 0 /* Set Translation Table Base */
- XPUTC(#49)
+ XPUTC(#72)
+ mov r1, #0
mcr p15, 0, r1, c2, c0, 2 /* Set Translation Table Control */
- XPUTC(#72)
+ XPUTC(#73)
mov r1, #0
mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLBs */
/* Set the Domain Access register. Very important! */
- XPUTC(#73)
+ XPUTC(#74)
mov r1, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT)
mcr p15, 0, r1, c3, c0, 0
/*
* Enable the MMU, etc.
*/
- XPUTC(#74)
+ XPUTC(#75)
mrc p15, 0, r0, c1, c0, 0
movw r3, #:lower16:CPU_CONTROL_SET
@@ -182,19 +205,22 @@
movt r3, #:upper16:CPU_CONTROL_SET
#endif
orr r0, r0, r3
+#if defined(CPU_CONTROL_CLR) && (CPU_CONTROL_CLR != 0)
+ bic r0, r0, #CPU_CONTROL_CLR
+#endif
+ pli 1f
dsb
- .align 5
@ turn mmu on!
- mov r0, r0
- mcr p15, 0, r0, c1, c0, 0
+ mov r0, r0 /* fetch instruction cacheline */
+1: mcr p15, 0, r0, c1, c0, 0
/*
* Ensure that the coprocessor has finished turning on the MMU.
*/
mrc p15, 0, r0, c0, c0, 0 /* Read an arbitrary value. */
mov r0, r0 /* Stall until read completes. */
- XPUTC(#76)
+1: XPUTC(#76)
bx ip /* return */
@@ -207,14 +233,17 @@
/* bits to set in the Control Register */
#if defined(VERBOSE_INIT_ARM) && XPUTC_COM
-#define TIMO 0x25000
+#define TIMO 0x25000
#ifndef COM_MULT
-#define COM_MULT 1
+#define COM_MULT 1
#endif
xputc:
#ifdef MULTIPROCESSOR
+ adr r3, xputc
+ movw r2, #:lower16:comlock
+ movt r2, #:upper16:comlock
+ bfi r3, r2, #0, #28
mov r2, #1
- ldr r3, .Lcomlock
10:
ldrex r1, [r3]
cmp r1, #0
@@ -226,7 +255,13 @@
#endif
mov r2, #TIMO
- ldr r3, .Luart0
+#ifdef CONADDR
+ movw r3, #:lower16:CONADDR
+ movt r3, #:upper16:CONADDR
+#elif defined(CONSADDR)
+ movw r3, #:lower16:CONSADDR
+ movt r3, #:upper16:CONSADDR
+#endif
1:
#if COM_MULT == 1
ldrb r1, [r3, #(COM_LSR*COM_MULT)]
@@ -278,42 +313,40 @@
bne 3b
4:
#ifdef MULTIPROCESSOR
- ldr r3, .Lcomlock
+ adr r3, xputc
+ movw r2, #:lower16:comlock
+ movt r2, #:upper16:comlock
+ bfi r3, r2, #0, #28
mov r0, #0
str r0, [r3]
dsb
#endif
bx lr
-.Luart0:
-#ifdef CONADDR
- .word CONADDR
-#elif defined(CONSADDR)
- .word CONSADDR
-#endif
-
#ifdef MULTIPROCESSOR
-.Lcomlock:
- .word comlock
-
.pushsection .data
comlock:
- .p2align 2
+ .p2align 4
.word 0 @ not in bss
+ .p2align 4
.popsection
#endif /* MULTIPROCESSOR */
#endif /* VERBOSE_INIT_ARM */
-#ifdef CPU_CORTEXA9
-a9_start:
+cortex_init:
mov r10, lr @ save lr
cpsid if, #PSR_SVC32_MODE
XPUTC(#64)
- bl _C_LABEL(armv7_icache_inv_all) @ invalidate i-cache
+ adr ip, cortex_init
+ movw r0, #:lower16:_C_LABEL(armv7_icache_inv_all)
+ movt r0, #:upper16:_C_LABEL(armv7_icache_inv_all)
+ bfi ip, r0, #0, #28
+ blx ip @ toss i-cache
+#ifdef CPU_CORTEXA9
/*
* Step 1a, invalidate the all cache tags in all ways on the SCU.
*/
@@ -327,14 +360,20 @@
str r1, [r3, #SCU_INV_ALL_REG] @ write scu invalidate all
dsb
isb
+#endif
/*
* Step 1b, invalidate the data cache
*/
XPUTC(#66)
- bl _C_LABEL(armv7_dcache_wbinv_all) @ writeback/invalidate d-cache
+ adr ip, cortex_init
+ movw r0, #:lower16:_C_LABEL(armv7_dcache_wbinv_all)
+ movt r0, #:upper16:_C_LABEL(armv7_dcache_wbinv_all)
+ bfi ip, r0, #0, #28
+ blx ip @ writeback & toss d-cache
XPUTC(#67)
+#ifdef CPU_CORTEXA9
/*
* Step 2, disable the data cache
*/
@@ -362,35 +401,59 @@
mcr p15, 0, r2, c1, c0, 0 @ reenable caches
isb
XPUTC(#51)
+#endif
#ifdef MULTIPROCESSOR
/*
- * Step 4b, set ACTLR.SMP=1 (and ACTRL.FX=1)
+ * Step 4b, set ACTLR.SMP=1 (and on A9, ACTRL.FX=1)
*/
mrc p15, 0, r0, c1, c0, 1 @ read aux ctl
orr r0, r0, #CORTEXA9_AUXCTL_SMP @ enable SMP
mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
isb
+#ifdef CPU_CORTEXA9
orr r0, r0, #CORTEXA9_AUXCTL_FW @ enable cache/tlb/coherency
mcr p15, 0, r0, c1, c0, 1 @ write aux ctl
isb
+#endif
XPUTC(#52)
-#endif
+#endif /* MULTIPROCESSOR */
bx r10
-ASEND(a9_start)
+ASEND(cortex_init)
/*
* Secondary processors come here after exiting the SKU ROM.
+ * Running native endian until we have SMP enabled. Since no data
+ * is accessed, that shouldn't be a problem.
*/
-a9_mpstart:
-#ifdef MULTIPROCESSOR
+cortex_mpstart:
Home |
Main Index |
Thread Index |
Old Index