Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 Replace the two copies of the ADDR macro wi...
details: https://anonhg.NetBSD.org/src/rev/cc1860967126
branches: trunk
changeset: 968534:cc1860967126
user: skrll <skrll%NetBSD.org@localhost>
date: Sun Jan 19 16:12:56 2020 +0000
description:
Replace the two copies of the ADDR macro with a centralised adrl macro.
The adrl name matches the one used by armasm.
diffstat:
sys/arch/aarch64/aarch64/locore.S | 54 +++++++++++++++++---------------------
sys/arch/aarch64/aarch64/start.S | 16 +++-------
sys/arch/aarch64/include/asm.h | 10 ++++++-
3 files changed, 38 insertions(+), 42 deletions(-)
diffs (291 lines):
diff -r 2a798478ee9d -r cc1860967126 sys/arch/aarch64/aarch64/locore.S
--- a/sys/arch/aarch64/aarch64/locore.S Sun Jan 19 16:12:00 2020 +0000
+++ b/sys/arch/aarch64/aarch64/locore.S Sun Jan 19 16:12:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.52 2020/01/15 08:34:04 mrg Exp $ */
+/* $NetBSD: locore.S,v 1.53 2020/01/19 16:12:56 skrll Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -38,7 +38,7 @@
#include <aarch64/hypervisor.h>
#include "assym.h"
-RCSID("$NetBSD: locore.S,v 1.52 2020/01/15 08:34:04 mrg Exp $")
+RCSID("$NetBSD: locore.S,v 1.53 2020/01/19 16:12:56 skrll Exp $")
#ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED
#define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRnE
@@ -80,12 +80,6 @@
#define PRINT(string) bl xprint; .asciz string; .align 2
-/* load far effective address (pc relative) */
-.macro ADDR, reg, addr
- adrp \reg, \addr
- add \reg, \reg, #:lo12:\addr
-.endm
-
.text
.align 3
ASENTRY_NP(aarch64_start)
@@ -94,7 +88,7 @@
mov x28, sp
/* set stack pointer for boot */
- ADDR x0, bootstk
+ adrl x0, bootstk
mov sp, x0
bl clear_bss
@@ -165,11 +159,11 @@
DPRINTREG("PC = ", x20)
/* set exception vector */
- ADDR x0, _C_LABEL(el1_vectors)
+ adrl x0, _C_LABEL(el1_vectors)
msr vbar_el1, x0
/* set lwp0 stack */
- ADDR x0, lwp0uspace
+ adrl x0, lwp0uspace
add x0, x0, #(UPAGES * PAGE_SIZE)
sub x0, x0, #TF_SIZE /* lwp0space + USPACE - TF_SIZE */
mov sp, x0 /* define lwp0 ksp bottom */
@@ -180,7 +174,7 @@
msr tpidrro_el0, xzr
/* set curcpu() */
- ADDR x0, cpu_info_store /* cpu_info_store is cpu_info[0] */
+ adrl x0, cpu_info_store /* cpu_info_store is cpu_info[0] */
msr tpidr_el1, x0 /* curcpu is cpu_info[0] */
DPRINTREG("curcpu = ", x0);
@@ -193,7 +187,7 @@
bl aarch64_getcacheinfo
#ifdef KASAN
- ADDR x0, lwp0uspace
+ adrl x0, lwp0uspace
bl _C_LABEL(kasan_early_init)
#endif
@@ -216,8 +210,8 @@
ASENTRY_NP(clear_bss)
/* Zero the BSS. The size must be aligned 16, usually it should be. */
- ADDR x14, __bss_start__
- ADDR x15, __bss_end__
+ adrl x14, __bss_start__
+ adrl x15, __bss_end__
b 2f
1: stp xzr, xzr, [x14], #16
2: cmp x14, x15
@@ -267,7 +261,7 @@
mov x3, xzr /* x3 = level */
levelloop:
/* lock_level[] and lock_turn[] are always accessed via PA(devmap) */
- ADDR x0, kern_vtopdiff
+ adrl x0, kern_vtopdiff
ldr x0, [x0]
ldr x4, =lock_level
sub x4, x4, x0
@@ -308,7 +302,7 @@
locore_lock_exit:
#ifdef DEBUG_LOCORE_PRINT_LOCK
/* lock_level[] and lock_turn[] are always accessed via PA(devmap) */
- ADDR x0, kern_vtopdiff
+ adrl x0, kern_vtopdiff
ldr x0, [x0]
ldr x1, =lock_level
sub x1, x1, x0
@@ -369,7 +363,7 @@
* resolve own cpuindex. my mpidr is stored in
* extern uint64_t cpu_mpidr[MAXCPUS]
*/
- ADDR x0, _C_LABEL(cpu_mpidr)
+ adrl x0, _C_LABEL(cpu_mpidr)
mov x1, xzr
1:
add x1, x1, #1
@@ -388,7 +382,7 @@
/* set stack pointer for boot */
mov x1, #BOOT_AP_STACKSIZE
mul x1, x1, x27
- ADDR x0, bootstk
+ adrl x0, bootstk
add sp, x0, x1 /* sp = bootstk + (BOOT_AP_STACKSIZE * cpuindex) */
bl 1f
@@ -434,7 +428,7 @@
CPU_DPRINTREG("PC = ", x20)
/* set exception vector */
- ADDR x0, _C_LABEL(el1_vectors)
+ adrl x0, _C_LABEL(el1_vectors)
msr vbar_el1, x0
/* lwp-private = NULL */
@@ -444,7 +438,7 @@
/* set curcpu(), and fill curcpu()->ci_{midr,mpidr} */
mov x0, #CPU_INFO_SIZE
mul x0, x27, x0
- ADDR x1, _C_LABEL(cpu_info_store)
+ adrl x1, _C_LABEL(cpu_info_store)
add x0, x0, x1 /* x0 = &cpu_info_store[cpuindex] */
msr tpidr_el1, x0 /* tpidr_el1 = curcpu() = x0 */
@@ -455,7 +449,7 @@
mov x0, #32
udiv x1, x27, x0
- ADDR x0, _C_LABEL(aarch64_cpu_hatched)
+ adrl x0, _C_LABEL(aarch64_cpu_hatched)
add x28, x0, x1, lsl #2 /* x28 = &aarch64_cpu_hatched[cpuindex/32] */
mov x0, #1
mov x2, #32
@@ -474,7 +468,7 @@
mov x0, #32
udiv x1, x27, x0
- ADDR x0, _C_LABEL(aarch64_cpu_mbox)
+ adrl x0, _C_LABEL(aarch64_cpu_mbox)
add x28, x0, x1, lsl #2 /* x28 = &aarch64_cpu_mbox[cpuindex/32] */
/* wait for the mailbox start bit to become true */
@@ -732,13 +726,13 @@
/* save ttbr[01]_el1 for AP */
mrs x0, ttbr0_el1
mrs x1, ttbr1_el1
- ADDR x2, ttbr_save
+ adrl x2, ttbr_save
stp x0, x1, [x2]
ret
load_ttbrs:
/* load ttbr[01]_el1 */
- ADDR x2, ttbr_save
+ adrl x2, ttbr_save
ldp x0, x1, [x2]
msr ttbr0_el1, x0
msr ttbr1_el1, x1
@@ -814,7 +808,7 @@
mov x4, #LX_BLKPAG_ATTR_NORMAL_NC|LX_BLKPAG_AP_RW /* attr */
mov x3, #L2_SIZE /* blocksize */
adr x0, start /* va = start */
- ADDR x2, _end
+ adrl x2, _end
sub x2, x2, x0 /* size = _end - start */
add x2, x2, #BOOTPAGE_ALLOC_MAX /* for bootpage_alloc() */
mov x1, x0 /* pa */
@@ -822,7 +816,7 @@
cbnz x0, init_mmutable_error
#ifdef FDT
- ADDR x8, _C_LABEL(fdt_addr_r)
+ adrl x8, _C_LABEL(fdt_addr_r)
ldr x8, [x8]
VPRINT("Creating VA=PA tables for FDT\n")
@@ -847,7 +841,7 @@
orr x4, x4, #LX_BLKPAG_UXN
mov x3, #L2_SIZE /* blocksize */
adr x1, start /* pa = start */
- ADDR x2, _end
+ adrl x2, _end
sub x2, x2, x1 /* size = _end - start */
ldr x0, =start /* va */
bl pmapboot_enter
@@ -865,7 +859,7 @@
/* return PA of allocated page */
ENTRY_NP(bootpage_alloc)
/* x2 = kernend_extra */
- ADDR x3, kernend_extra
+ adrl x3, kernend_extra
ldr x2, [x3]
/* if (kernend_extra < 0) return NULL */
mov x0, xzr
@@ -873,7 +867,7 @@
bmi bootpage_alloc_done
/* x0 = PA of _end[] */
- ADDR x1, kern_vtopdiff
+ adrl x1, kern_vtopdiff
ldr x1, [x1]
ldr x0, =ARM_BOOTSTRAP_LxPT
sub x0, x0, x1
diff -r 2a798478ee9d -r cc1860967126 sys/arch/aarch64/aarch64/start.S
--- a/sys/arch/aarch64/aarch64/start.S Sun Jan 19 16:12:00 2020 +0000
+++ b/sys/arch/aarch64/aarch64/start.S Sun Jan 19 16:12:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: start.S,v 1.6 2020/01/19 15:08:43 skrll Exp $ */
+/* $NetBSD: start.S,v 1.7 2020/01/19 16:12:56 skrll Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -32,13 +32,7 @@
#include <aarch64/asm.h>
-RCSID("$NetBSD: start.S,v 1.6 2020/01/19 15:08:43 skrll Exp $")
-
-/* load far effective address (pc relative) */
-.macro ADDR, reg, addr
- adrp \reg, \addr
- add \reg, \reg, #:lo12:\addr
-.endm
+RCSID("$NetBSD: start.S,v 1.7 2020/01/19 16:12:56 skrll Exp $")
/*
* Padding at start of kernel image to make room for 64-byte header
@@ -57,7 +51,7 @@
sub x10, x10, x9
/* address of kern_vtopdiff (relative) */
- ADDR x8, kern_vtopdiff
+ adrl x8, kern_vtopdiff
str x10, [x8] /* kern_vtopdiff = start(virt) - start(phys) */
/*
@@ -65,7 +59,7 @@
*/
/* address of uboot_args (relative) */
- ADDR x8, uboot_args
+ adrl x8, uboot_args
str x0, [x8, #(8*0)]
str x1, [x8, #(8*1)]
str x2, [x8, #(8*2)]
@@ -74,7 +68,7 @@
/*
* ARM64 boot protocol has FDT address in x0 *
*/
- ADDR x8, fdt_addr_r
+ adrl x8, fdt_addr_r
str x0, [x8]
b aarch64_start /* aarch64_start() @ aarch64/locore.S */
diff -r 2a798478ee9d -r cc1860967126 sys/arch/aarch64/include/asm.h
--- a/sys/arch/aarch64/include/asm.h Sun Jan 19 16:12:00 2020 +0000
+++ b/sys/arch/aarch64/include/asm.h Sun Jan 19 16:12:56 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: asm.h,v 1.5 2019/12/20 07:16:43 ryo Exp $ */
+/* $NetBSD: asm.h,v 1.6 2020/01/19 16:12:56 skrll Exp $ */
#ifndef _AARCH64_ASM_H_
#define _AARCH64_ASM_H_
@@ -6,6 +6,14 @@
#include <arm/asm.h>
#ifdef __aarch64__
+
+#ifdef __ASSEMBLER__
+.macro adrl reg, addr
+ adrp \reg, \addr
+ add \reg, \reg, #:lo12:\addr
+.endm
+#endif
+
#define fp x29
#define lr x30
Home |
Main Index |
Thread Index |
Old Index