Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 Fix pmap to work correctly with tagged addr...
details: https://anonhg.NetBSD.org/src/rev/03269f6f0056
branches: trunk
changeset: 745371:03269f6f0056
user: ryo <ryo%NetBSD.org@localhost>
date: Sat Feb 29 21:34:37 2020 +0000
description:
Fix pmap to work correctly with tagged addresses
- when fault, untag from address before passing to uvm/pmap functions
- pmap_extract() checks more strictly and consider the address tag
diffstat:
sys/arch/aarch64/aarch64/fault.c | 11 ++-
sys/arch/aarch64/aarch64/pmap.c | 97 +++++++++++++++++++++++++++---------
sys/arch/aarch64/aarch64/pmapboot.c | 56 +++++++++++---------
sys/arch/aarch64/include/pte.h | 4 +-
4 files changed, 111 insertions(+), 57 deletions(-)
diffs (truncated from 336 to 300 lines):
diff -r 3aade8ca6e62 -r 03269f6f0056 sys/arch/aarch64/aarch64/fault.c
--- a/sys/arch/aarch64/aarch64/fault.c Sat Feb 29 21:32:22 2020 +0000
+++ b/sys/arch/aarch64/aarch64/fault.c Sat Feb 29 21:34:37 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: fault.c,v 1.11 2020/01/09 01:38:34 ryo Exp $ */
+/* $NetBSD: fault.c,v 1.12 2020/02/29 21:34:37 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.11 2020/01/09 01:38:34 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.12 2020/02/29 21:34:37 ryo Exp $");
#include "opt_compat_netbsd32.h"
#include "opt_ddb.h"
@@ -157,6 +157,9 @@
p = l->l_proc;
va = trunc_page((vaddr_t)tf->tf_far);
+ /* eliminate addresss tag if ECR_EL1.TBI[01] is enabled */
+ va = aarch64_untag_address(va);
+
if ((VM_MIN_KERNEL_ADDRESS <= va) && (va < VM_MAX_KERNEL_ADDRESS)) {
map = kernel_map;
UVMHIST_LOG(pmaphist, "use kernel_map %p", map, 0, 0, 0);
@@ -200,8 +203,8 @@
if (user)
uvm_grow(p, va);
- UVMHIST_LOG(pmaphist, "uvm_fault success: va=%016llx",
- tf->tf_far, 0, 0, 0);
+ UVMHIST_LOG(pmaphist, "uvm_fault success: far=%016lx, va=%016llx",
+ tf->tf_far, va, 0, 0);
return;
}
diff -r 3aade8ca6e62 -r 03269f6f0056 sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Sat Feb 29 21:32:22 2020 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Sat Feb 29 21:34:37 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.65 2020/02/29 21:10:09 ryo Exp $ */
+/* $NetBSD: pmap.c,v 1.66 2020/02/29 21:34:37 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.65 2020/02/29 21:10:09 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.66 2020/02/29 21:34:37 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -49,6 +49,7 @@
#include <aarch64/pte.h>
#include <aarch64/armreg.h>
#include <aarch64/cpufunc.h>
+#include <aarch64/locore.h>
#include <aarch64/machdep.h>
#ifdef DDB
#include <aarch64/db_machdep.h>
@@ -281,15 +282,25 @@
#define IN_KSEG_ADDR(va) \
IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END)
-#define KASSERT_PM_ADDR(pm, va) \
+#ifdef DIAGNOSTIC
+#define KASSERT_PM_ADDR(pm,va) \
do { \
+ int space = aarch64_addressspace(va); \
if ((pm) == pmap_kernel()) { \
+ KASSERTMSG(space == AARCH64_ADDRSPACE_UPPER, \
+ "%s: kernel pm %p: va=%016lx" \
+ " is out of upper address space\n", \
+ __func__, (pm), (va)); \
KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \
VM_MAX_KERNEL_ADDRESS), \
"%s: kernel pm %p: va=%016lx" \
" is not kernel address\n", \
__func__, (pm), (va)); \
} else { \
+ KASSERTMSG(space == AARCH64_ADDRSPACE_LOWER, \
+ "%s: user pm %p: va=%016lx" \
+ " is out of lower address space\n", \
+ __func__, (pm), (va)); \
KASSERTMSG(IN_RANGE((va), \
VM_MIN_ADDRESS, VM_MAX_ADDRESS), \
"%s: user pm %p: va=%016lx" \
@@ -297,6 +308,9 @@
__func__, (pm), (va)); \
} \
} while (0 /* CONSTCOND */)
+#else /* DIAGNOSTIC */
+#define KASSERT_PM_ADDR(pm,va)
+#endif /* DIAGNOSTIC */
static const struct pmap_devmap *pmap_devmap_table;
@@ -739,25 +753,56 @@
pt_entry_t *ptep, pte;
paddr_t pa;
vsize_t blocksize = 0;
+ int space;
extern char __kernel_text[];
extern char _end[];
- if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) {
- /* fast loookup */
- pa = KERN_VTOPHYS(va);
- } else if (IN_KSEG_ADDR(va)) {
- /* fast loookup. should be used only if actually mapped? */
- pa = AARCH64_KVA_TO_PA(va);
+ space = aarch64_addressspace(va);
+ if (pm == pmap_kernel()) {
+ if (space != AARCH64_ADDRSPACE_UPPER)
+ return false;
+
+ if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) {
+ /* kernel text/data/bss are definitely linear mapped */
+ pa = KERN_VTOPHYS(va);
+ goto mapped;
+ } else if (IN_KSEG_ADDR(va)) {
+ /*
+ * also KSEG is linear mapped, but areas that have no
+ * physical memory haven't been mapped.
+ * fast lookup by using the S1E1R/PAR_EL1 registers.
+ */
+ register_t s = daif_disable(DAIF_I|DAIF_F);
+ reg_s1e1r_write(va);
+ __asm __volatile ("isb");
+ uint64_t par = reg_par_el1_read();
+ daif_enable(s);
+
+ if (par & PAR_F)
+ return false;
+ pa = (__SHIFTOUT(par, PAR_PA) << PAR_PA_SHIFT) +
+ (va & __BITS(PAR_PA_SHIFT - 1, 0));
+ goto mapped;
+ }
} else {
- ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
- if (ptep == NULL)
+ if (space != AARCH64_ADDRSPACE_LOWER)
return false;
- pte = *ptep;
- if (!lxpde_valid(pte))
- return false;
- pa = lxpde_pa(pte) + (va & (blocksize - 1));
}
+ /*
+ * other areas, it isn't able to examined using the PAR_EL1 register,
+ * because the page may be in an access fault state due to
+ * reference bit emulation.
+ */
+ ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
+ if (ptep == NULL)
+ return false;
+ pte = *ptep;
+ if (!lxpde_valid(pte))
+ return false;
+ pa = lxpde_pa(pte) + (va & (blocksize - 1));
+
+ mapped:
if (pap != NULL)
*pap = pa;
return true;
@@ -769,7 +814,8 @@
struct pmap *pm;
paddr_t pa;
- if (va & TTBR_SEL_VA)
+ /* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */
+ if ((uint64_t)va & AARCH64_ADDRTOP_TAG)
pm = pmap_kernel();
else
pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
@@ -791,13 +837,6 @@
vsize_t blocksize;
unsigned int idx;
- if (((pm == pmap_kernel()) && ((va & TTBR_SEL_VA) == 0)) ||
- ((pm != pmap_kernel()) && ((va & TTBR_SEL_VA) != 0))) {
- blocksize = 0;
- ptep = NULL;
- goto done;
- }
-
/*
* traverse L0 -> L1 -> L2 -> L3
*/
@@ -856,6 +895,8 @@
vaddr_t va;
vsize_t blocksize = 0;
+ KASSERT_PM_ADDR(pm, sva);
+
pm_lock(pm);
for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
@@ -2553,12 +2594,18 @@
paddr_t pa;
unsigned int idx;
- if (va & TTBR_SEL_VA) {
+ switch (aarch64_addressspace(va)) {
+ case AARCH64_ADDRSPACE_UPPER:
user = false;
ttbr = reg_ttbr1_el1_read();
- } else {
+ break;
+ case AARCH64_ADDRSPACE_LOWER:
user = true;
ttbr = reg_ttbr0_el1_read();
+ break;
+ default:
+ pr("illegal address space\n");
+ return;
}
pa = ttbr & TTBR_BADDR;
l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
diff -r 3aade8ca6e62 -r 03269f6f0056 sys/arch/aarch64/aarch64/pmapboot.c
--- a/sys/arch/aarch64/aarch64/pmapboot.c Sat Feb 29 21:32:22 2020 +0000
+++ b/sys/arch/aarch64/aarch64/pmapboot.c Sat Feb 29 21:34:37 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmapboot.c,v 1.5 2020/02/29 21:09:11 ryo Exp $ */
+/* $NetBSD: pmapboot.c,v 1.6 2020/02/29 21:34:37 ryo Exp $ */
/*
* Copyright (c) 2018 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmapboot.c,v 1.5 2020/02/29 21:09:11 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmapboot.c,v 1.6 2020/02/29 21:34:37 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -47,7 +47,6 @@
#define OPTIMIZE_TLB_CONTIG
-
static void
pmapboot_protect_entry(pt_entry_t *pte, vm_prot_t clrprot)
{
@@ -77,17 +76,21 @@
paddr_t pa;
pd_entry_t *l0, *l1, *l2, *l3;
+ switch (aarch64_addressspace(sva)) {
+ case AARCH64_ADDRSPACE_LOWER:
+ /* 0x0000xxxxxxxxxxxx */
+ pa = (reg_ttbr0_el1_read() & TTBR_BADDR);
+ break;
+ case AARCH64_ADDRSPACE_UPPER:
+ /* 0xFFFFxxxxxxxxxxxx */
+ pa = (reg_ttbr1_el1_read() & TTBR_BADDR);
+ break;
+ default:
+ return -1;
+ }
+ l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
+
for (va = sva; va < eva;) {
- /*
- * 0x0000xxxxxxxxxxxx -> l0 = (ttbr0_el1 & TTBR_BADDR)
- * 0xffffxxxxxxxxxxxx -> l0 = (ttbr1_el1 & TTBR_BADDR)
- */
- if (va & TTBR_SEL_VA)
- pa = (reg_ttbr1_el1_read() & TTBR_BADDR);
- else
- pa = (reg_ttbr0_el1_read() & TTBR_BADDR);
- l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
-
idx = l0pde_index(va);
if (!l0pde_valid(l0[idx]))
return -1;
@@ -233,19 +236,22 @@
attr |= LX_BLKPAG_OS_BOOT;
+ switch (aarch64_addressspace(va)) {
+ case AARCH64_ADDRSPACE_LOWER:
+ /* 0x0000xxxxxxxxxxxx */
+ l0 = (pd_entry_t *)(reg_ttbr0_el1_read() & TTBR_BADDR);
+ ttbr = 0;
+ break;
+ case AARCH64_ADDRSPACE_UPPER:
+ /* 0xFFFFxxxxxxxxxxxx */
+ l0 = (pd_entry_t *)(reg_ttbr1_el1_read() & TTBR_BADDR);
+ ttbr = 1;
+ break;
+ default:
+ return -1;
Home |
Main Index |
Thread Index |
Old Index