Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 cleanup locore, and changed the way to map ...
details: https://anonhg.NetBSD.org/src/rev/ed38336a3ef9
branches: trunk
changeset: 433791:ed38336a3ef9
user: ryo <ryo%NetBSD.org@localhost>
date: Thu Oct 04 23:53:13 2018 +0000
description:
cleanup locore, and changed the way to map memories during boot.
- add functions bootpage_enter() and bootpage_alloc() to adapt various layout
of physical memory map. especially for 64bit physical memory layout.
pmapboot_alloc() allocates pagetable pages from _end[].
- changed to map only the required amount for PA=VA identity mapping
(kernel image, UART device, and FDT blob) with L2_BLOCK(2Mbyte).
- changing page permission for kernel image, and making KSEG mapping are done
at cpu_kernel_vm_init() instead of at locore.
- optimize PTE entries with PTE Contiguous bit. it is enabled on devmap only for now.
reviewed by skrll@, thanks.
diffstat:
sys/arch/aarch64/aarch64/aarch64_machdep.c | 62 +-
sys/arch/aarch64/aarch64/locore.S | 1637 +++++++++++++--------------
sys/arch/aarch64/aarch64/pmap.c | 204 +--
sys/arch/aarch64/aarch64/pmapboot.c | 420 +++++++
sys/arch/aarch64/conf/files.aarch64 | 3 +-
sys/arch/aarch64/include/pmap.h | 17 +-
6 files changed, 1349 insertions(+), 994 deletions(-)
diffs (truncated from 2748 to 300 lines):
diff -r 5a4c34f7fbf7 -r ed38336a3ef9 sys/arch/aarch64/aarch64/aarch64_machdep.c
--- a/sys/arch/aarch64/aarch64/aarch64_machdep.c Thu Oct 04 19:11:09 2018 +0000
+++ b/sys/arch/aarch64/aarch64/aarch64_machdep.c Thu Oct 04 23:53:13 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: aarch64_machdep.c,v 1.11 2018/08/26 18:15:49 ryo Exp $ */
+/* $NetBSD: aarch64_machdep.c,v 1.12 2018/10/04 23:53:13 ryo Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.11 2018/08/26 18:15:49 ryo Exp $");
+__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.12 2018/10/04 23:53:13 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -100,28 +100,57 @@
/* filled in before cleaning bss. keep in .data */
u_long kern_vtopdiff __attribute__((__section__(".data")));
+long kernend_extra; /* extra memory allocated from round_page(_end[]) */
+
void
cpu_kernel_vm_init(uint64_t memory_start, uint64_t memory_size)
{
-
extern char __kernel_text[];
extern char _end[];
+ extern char __data_start[];
+ extern char __rodata_start[];
vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text);
vaddr_t kernend = round_page((vaddr_t)_end);
-
- paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
+ paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
paddr_t kernend_phys = KERN_VTOPHYS(kernend);
+ vaddr_t data_start = (vaddr_t)__data_start;
+ vaddr_t rodata_start = (vaddr_t)__rodata_start;
- VPRINTF("%s: kernel phys start %lx end %lx\n", __func__,
- kernstart_phys, kernend_phys);
-
- fdt_add_reserved_memory_range(kernstart_phys,
- kernend_phys - kernstart_phys);
+ /* add KSEG mappings of whole memory */
+ VPRINTF("Creating KSEG tables for 0x%016lx-0x%016lx\n",
+ memory_start, memory_start + memory_size);
+ const pt_entry_t ksegattr =
+ LX_BLKPAG_ATTR_NORMAL_WB |
+ LX_BLKPAG_AP_RW |
+ LX_BLKPAG_PXN |
+ LX_BLKPAG_UXN;
+ pmapboot_enter(AARCH64_PA_TO_KVA(memory_start), memory_start,
+ memory_size, L1_SIZE, ksegattr, PMAPBOOT_ENTER_NOOVERWRITE,
+ bootpage_alloc, NULL);
/*
- * XXX whole bunch of stuff to map kernel correctly
+ * at this point, whole kernel image is mapped as "rwx".
+ * permission should be changed to:
+ *
+ * text rwx => r-x
+ * rodata rwx => r--
+ * data rwx => rw-
+ *
+ * kernel image has mapped by L2 block. (2Mbyte)
*/
+ pmapboot_protect(L2_TRUNC_BLOCK(kernstart),
+ L2_TRUNC_BLOCK(data_start), VM_PROT_WRITE);
+ pmapboot_protect(L2_ROUND_BLOCK(rodata_start),
+ L2_ROUND_BLOCK(kernend + kernend_extra), VM_PROT_EXECUTE);
+
+ aarch64_tlbi_all();
+
+
+ VPRINTF("%s: kernel phys start %lx end %lx+%lx\n", __func__,
+ kernstart_phys, kernend_phys, kernend_extra);
+ fdt_add_reserved_memory_range(kernstart_phys,
+ kernend_phys - kernstart_phys + kernend_extra);
}
@@ -172,8 +201,8 @@
kernstart = trunc_page((vaddr_t)__kernel_text);
kernend = round_page((vaddr_t)_end);
- kernstart_l2 = kernstart & -L2_SIZE; /* trunk L2_SIZE(2M) */
- kernend_l2 = (kernend + L2_SIZE - 1) & -L2_SIZE;/* round L2_SIZE(2M) */
+ kernstart_l2 = L2_TRUNC_BLOCK(kernstart);
+ kernend_l2 = L2_ROUND_BLOCK(kernend + kernend_extra);
kernelvmstart = kernend_l2;
#ifdef MODULAR
@@ -212,13 +241,15 @@
"kernel_start_l2 = 0x%016lx\n"
"kernel_start = 0x%016lx\n"
"kernel_end = 0x%016lx\n"
+ "pagetables = 0x%016lx\n"
+ "pagetables_end = 0x%016lx\n"
"kernel_end_l2 = 0x%016lx\n"
#ifdef MODULAR
"module_start = 0x%016lx\n"
"module_end = 0x%016lx\n"
#endif
"(kernel va area)\n"
- "(devmap va area)\n"
+ "(devmap va area) = 0x%016lx\n"
"VM_MAX_KERNEL_ADDRESS = 0x%016lx\n"
"------------------------------------------\n",
kern_vtopdiff,
@@ -230,11 +261,14 @@
kernstart_l2,
kernstart,
kernend,
+ round_page(kernend),
+ round_page(kernend) + kernend_extra,
kernend_l2,
#ifdef MODULAR
module_start,
module_end,
#endif
+ VM_KERNEL_IO_ADDRESS,
VM_MAX_KERNEL_ADDRESS);
/*
diff -r 5a4c34f7fbf7 -r ed38336a3ef9 sys/arch/aarch64/aarch64/locore.S
--- a/sys/arch/aarch64/aarch64/locore.S Thu Oct 04 19:11:09 2018 +0000
+++ b/sys/arch/aarch64/aarch64/locore.S Thu Oct 04 23:53:13 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.27 2018/10/04 09:09:29 ryo Exp $ */
+/* $NetBSD: locore.S,v 1.28 2018/10/04 23:53:13 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -26,888 +26,189 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_arm_debug.h"
+#include "opt_console.h"
#include "opt_cpuoptions.h"
+#include "opt_ddb.h"
+#include "opt_fdt.h"
#include "opt_multiprocessor.h"
-#include "opt_ddb.h"
-#include "opt_arm_debug.h"
#include <aarch64/asm.h>
#include <aarch64/hypervisor.h>
#include "assym.h"
-RCSID("$NetBSD: locore.S,v 1.27 2018/10/04 09:09:29 ryo Exp $")
+RCSID("$NetBSD: locore.S,v 1.28 2018/10/04 23:53:13 ryo Exp $")
+
-/* #define DEBUG_LOCORE */
-/* #define DEBUG_MMU */
+/*#define DEBUG_LOCORE /* debug print */
+/*#define DEBUG_LOCORE_PRINT_LOCK /* avoid mixing AP's output */
+/*#define DEBUG_MMU /* dump MMU table */
+
+#define LOCORE_EL2
+
+#define BOOT_AP_STACKSIZE 256 /* size of temporally stack for APs */
+#define BOOTPAGE_ALLOC_MAX (1024 * 1024) /* reserved size from _end[] */
#if (defined(VERBOSE_INIT_ARM) || defined(DEBUG_LOCORE)) && defined(EARLYCONS)
#define VERBOSE_LOCORE
#endif
-#define LOCORE_EL2
-
-#define PRINT(string) bl xprint;.asciz string;.align 2
+#ifdef VERBOSE_LOCORE
+#define VPRINT(string) PRINT(string)
+#else
+#define VPRINT(string)
+#endif
-#ifdef VERBOSE_LOCORE
-#define VERBOSE(string) PRINT(string)
+/* DPRINTREG macro use x19 internally. x0-x15 may be broken */
+#if (defined(DEBUG_LOCORE) && defined(EARLYCONS))
+#define DPRINT(string) PRINT(string)
+#define DPRINTREG(str, reg) mov x19,reg; PRINT(str); mov x0,x19; bl print_x0
+#define DPRINTSREG(str, reg) mrs x19,reg; PRINT(str); mov x0,x19; bl print_x0
#else
-#define VERBOSE(string)
+#define DPRINT(string)
+#define DPRINTREG(str, reg)
+#define DPRINTSREG(str, reg)
#endif
+#define PRINT(string) bl xprint; .asciz string; .align 2
+
+
/* load far effective address (pc relative) */
.macro ADDR, reg, addr
adrp \reg, \addr
add \reg, \reg, #:lo12:\addr
.endm
-ENTRY_NP(aarch64_start)
- /* Zero the BSS. The size must be aligned 16, usually it should be. */
- ADDR x0, __bss_start__
- ADDR x1, __bss_end__
- b 2f
-1: stp xzr, xzr, [x0], #16
-2: cmp x0, x1
- b.lo 1b
+ .text
+ .align 3
+ASENTRY_NP(aarch64_start)
+ /* keep lr & sp for return to bootloader if possible */
+ mov x27, lr
+ mov x28, sp
/* set stack pointer for boot */
ADDR x0, bootstk
mov sp, x0
-#ifdef VERBOSE_INIT_ARM
- PRINT("boot NetBSD/evbarm (aarch64)\r\n")
-#endif
-
-#ifdef DEBUG_LOCORE
- PRINT("PC = ")
- bl 1f
-1: mov x0, lr
- bl print_x0
-
- PRINT("SP = ")
- bl 1f
-1: mov x0, sp
- bl print_x0
+ bl clear_bss
- PRINT("CurrentEL = ")
- mrs x0, CurrentEL
- lsr x0, x0, #2
- bl print_x0
-
- cmp x0, #2
- bne 1f
-
- /* EL2 registers can be accessed in EL2 or higher */
- PRINT("SCTLR_EL2 = ")
- mrs x0, sctlr_el2
- bl print_x0
-
- PRINT("HCR_EL2 = ")
- mrs x0, hcr_el2
- bl print_x0
-1:
-
- PRINT("CNTFREQ_EL0 = ")
- mrs x0, cntfrq_el0
- bl print_x0
+ PRINT("boot NetBSD/aarch64\n")
- PRINT("DAIF = ")
- mrs x0, daif
- bl print_x0
-
- PRINT("MPIDR_EL1 = ")
- mrs x0, mpidr_el1
- bl print_x0
-
-#if 0
- PRINT("L2CTLR_EL1 = ")
- mrs x0, s3_1_c11_c0_2
- bl print_x0
-#endif
-
- PRINT("ID_AA64MPFR0_EL1 = ")
- mrs x0, id_aa64pfr0_el1
- bl print_x0
-
- PRINT("ID_AA64MPFR1_EL1 = ")
- mrs x0, id_aa64pfr1_el1
- bl print_x0
-
- PRINT("ID_AA64ISAR0_EL1 = ")
- mrs x0, id_aa64isar0_el1
- bl print_x0
-
- PRINT("ID_AA64ISAR1_EL1 = ")
- mrs x0, id_aa64isar1_el1
- bl print_x0
-
-
- PRINT("ID_AA64MMFR0_EL1 = ")
- mrs x0, id_aa64mmfr0_el1
- bl print_x0
-
- PRINT("ID_AA64MMFR1_EL1 = ")
- mrs x0, id_aa64mmfr1_el1
Home |
Main Index |
Thread Index |
Old Index