Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm Add a ARM_HAS_VBAR option which forces the use ...
details: https://anonhg.NetBSD.org/src/rev/c219a373d493
branches: trunk
changeset: 787364:c219a373d493
user: matt <matt%NetBSD.org@localhost>
date: Wed Jun 12 21:34:12 2013 +0000
description:
Add a ARM_HAS_VBAR option which forces the use of the VBAR register. This
allows much code to deal with vector_page mappings to be eliminated. On a
BEAGLEBONE kernel, this saves 8KB of text and instructions that never have
to be executed. (The PJ4B has VBAR but doesn't implement the security
extensions it is part of so a method was needed to allow it use VBAR with
relying on the default test for the security extensions.)
diffstat:
sys/arch/arm/arm/fiq.c | 6 ++++--
sys/arch/arm/arm/vectors.S | 24 +++++++++++++++++-------
sys/arch/arm/arm32/arm32_kvminit.c | 8 +++++---
sys/arch/arm/arm32/arm32_machdep.c | 14 +++++++++++---
sys/arch/arm/arm32/pmap.c | 28 ++++++++++++++++++++++------
sys/arch/arm/conf/files.arm | 3 ++-
sys/arch/arm/include/arm32/pmap.h | 4 +++-
7 files changed, 64 insertions(+), 23 deletions(-)
diffs (truncated from 381 to 300 lines):
diff -r 20a855800b2a -r c219a373d493 sys/arch/arm/arm/fiq.c
--- a/sys/arch/arm/arm/fiq.c Wed Jun 12 20:44:20 2013 +0000
+++ b/sys/arch/arm/arm/fiq.c Wed Jun 12 21:34:12 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $ */
+/* $NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $ */
/*
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -62,6 +62,7 @@
#define FIQ_BIT R15_FIQ_DISABLE
#endif /* __PROG32 */
+#ifndef ARM_HAS_VBAR
/*
* fiq_installhandler:
*
@@ -176,3 +177,4 @@
oldirqstate &= ~FIQ_BIT;
restore_interrupts(oldirqstate);
}
+#endif /* !ARM_HAS_VBAR */
diff -r 20a855800b2a -r c219a373d493 sys/arch/arm/arm/vectors.S
--- a/sys/arch/arm/arm/vectors.S Wed Jun 12 20:44:20 2013 +0000
+++ b/sys/arch/arm/arm/vectors.S Wed Jun 12 21:34:12 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vectors.S,v 1.6 2013/06/12 15:10:13 matt Exp $ */
+/* $NetBSD: vectors.S,v 1.7 2013/06/12 21:34:12 matt Exp $ */
/*
* Copyright (C) 1994-1997 Mark Brinicombe
@@ -33,6 +33,7 @@
#include "assym.h"
#include "opt_cputypes.h"
+#include "opt_cpuoptions.h"
#include <machine/asm.h>
/*
@@ -44,11 +45,9 @@
*/
.text
- .align 0
- .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
.global _C_LABEL(fiqvector)
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
/*
* ARMv[67] processors with the Security Extension have the VBAR
* which redirects the low vector to any 32-byte aligned address.
@@ -67,11 +66,14 @@
b irq_entry
#ifdef __ARM_FIQ_INDIRECT
b _C_LABEL(fiqvector)
-#else
+#elif !defined(ARM_HAS_VBAR)
b .Lfiqvector
#endif
-#endif
+#endif /* CPU_ARMV7 || CPU_ARM11 || ARM_HAS_VBAR */
+#ifndef ARM_HAS_VBAR
+ .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
+ .align 0
_C_LABEL(page0):
ldr pc, .Lreset_target
ldr pc, .Lundefined_target
@@ -82,13 +84,20 @@
ldr pc, .Lirq_target
#ifdef __ARM_FIQ_INDIRECT
ldr pc, .Lfiq_target
-#else
+#endif
+#endif /* !ARM_HAS_VBAR */
+#ifndef __ARM_FIQ_INDIRECT
.Lfiqvector:
+#ifdef ARM_HAS_VBAR
+ .set _C_LABEL(fiqvector), . - _C_LABEL(page0rel)
+#else
.set _C_LABEL(fiqvector), . - _C_LABEL(page0)
+#endif
subs pc, lr, #4
.org .Lfiqvector + 0x100
#endif
+#ifndef ARM_HAS_VBAR
_C_LABEL(page0_data):
.Lreset_target:
.word reset_entry
@@ -118,6 +127,7 @@
.word 0 /* pad it out */
#endif
_C_LABEL(page0_end):
+#endif /* ARM_HAS_VBAR */
#ifdef __ARM_FIQ_INDIRECT
.data
diff -r 20a855800b2a -r c219a373d493 sys/arch/arm/arm32/arm32_kvminit.c
--- a/sys/arch/arm/arm32/arm32_kvminit.c Wed Jun 12 20:44:20 2013 +0000
+++ b/sys/arch/arm/arm32/arm32_kvminit.c Wed Jun 12 21:34:12 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $ */
+/* $NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $ */
/*
* Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
@@ -122,7 +122,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $");
#include <sys/param.h>
#include <sys/device.h>
@@ -372,7 +372,9 @@
#else
const size_t cpu_num = 1;
#endif
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#ifdef ARM_HAS_VBAR
+ const bool map_vectors_p = false;
+#elif defined(CPU_ARMV7) || defined(CPU_ARM11)
const bool map_vectors_p = vectors == ARM_VECTORS_LOW
&& !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK);
#else
diff -r 20a855800b2a -r c219a373d493 sys/arch/arm/arm32/arm32_machdep.c
--- a/sys/arch/arm/arm32/arm32_machdep.c Wed Jun 12 20:44:20 2013 +0000
+++ b/sys/arch/arm/arm32/arm32_machdep.c Wed Jun 12 21:34:12 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $ */
+/* $NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $");
#include "opt_modular.h"
#include "opt_md.h"
@@ -130,14 +130,16 @@
void
arm32_vector_init(vaddr_t va, int which)
{
-#if defined(CPU_ARMV7) || defined(CPU_ARM11)
+#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
/*
* If this processor has the security extension, don't bother
* to move/map the vector page. Simply point VBAR to the copy
* that exists in the .text segment.
*/
+#ifndef ARM_HAS_VBAR
if (va == ARM_VECTORS_LOW
&& (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
+#endif
extern const uint32_t page0rel[];
vector_page = (vaddr_t)page0rel;
KASSERT((vector_page & 0x1f) == 0);
@@ -147,8 +149,11 @@
#endif
cpu_control(CPU_CONTROL_VECRELOC, 0);
return;
+#ifndef ARM_HAS_VBAR
}
#endif
+#endif
+#ifndef ARM_HAS_VBAR
if (CPU_IS_PRIMARY(curcpu())) {
extern unsigned int page0[], page0_data[];
unsigned int *vectors = (int *) va;
@@ -193,6 +198,7 @@
*/
cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
}
+#endif
}
/*
@@ -257,8 +263,10 @@
/* Set the CPU control register */
cpu_setup(boot_args);
+#ifndef ARM_HAS_VBAR
/* Lock down zero page */
vector_page_setprot(VM_PROT_READ);
+#endif
/*
* Give pmap a chance to set up a few more things now the vm
diff -r 20a855800b2a -r c219a373d493 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed Jun 12 20:44:20 2013 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed Jun 12 21:34:12 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
#include <arm/cpuconf.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $");
#ifdef PMAP_DEBUG
@@ -1554,6 +1554,7 @@
static void
pmap_pinit(pmap_t pm)
{
+#ifndef ARM_HAS_VBAR
struct l2_bucket *l2b;
if (vector_page < KERNEL_BASE) {
@@ -1571,6 +1572,7 @@
L1_C_DOM(pm->pm_domain);
} else
pm->pm_pl1vec = NULL;
+#endif
}
#ifdef PMAP_CACHE_VIVT
@@ -2823,6 +2825,11 @@
pt_entry_t *ptep, npte, opte;
u_int nflags;
u_int oflags;
+#ifdef ARM_HAS_VBAR
+ const bool vector_page_p = false;
+#else
+ const bool vector_page_p = (va == vector_page);
+#endif
NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
@@ -3014,8 +3021,8 @@
/*
* Make sure the vector table is mapped cacheable
*/
- if ((pm != pmap_kernel() && va == vector_page) ||
- (flags & ARM32_MMAP_CACHEABLE)) {
+ if ((vector_page_p && pm != pmap_kernel())
+ || (flags & ARM32_MMAP_CACHEABLE)) {
npte |= pte_l2_s_cache_mode;
} else if (flags & ARM32_MMAP_WRITECOMBINE) {
npte |= pte_l2_s_wc_mode;
@@ -3053,8 +3060,9 @@
/*
* Make sure userland mappings get the right permissions
*/
- if (pm != pmap_kernel() && va != vector_page)
+ if (!vector_page_p && pm != pmap_kernel()) {
npte |= L2_S_PROT_U;
+ }
/*
* Keep the stats up to date
@@ -3081,7 +3089,7 @@
* We only need to frob the cache/tlb if this pmap
* is current
*/
- if (va != vector_page && l2pte_valid(npte)) {
+ if (!vector_page_p && l2pte_valid(npte)) {
/*
* This mapping is likely to be accessed as
* soon as we return to userland. Fix up the
@@ -4282,6 +4290,7 @@
/* No interrupts while we frob the TTB/DACR */
oldirqstate = disable_interrupts(IF32_bits);
+#ifndef ARM_HAS_VBAR
/*
* For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
* entry corresponding to 'vector_page' in the incoming L1 table
@@ -4294,6 +4303,7 @@
*npm->pm_pl1vec = npm->pm_l1vec;
PTE_SYNC(npm->pm_pl1vec);
}
+#endif
cpu_domains(ndacr);
@@ -4439,6 +4449,7 @@
* reference count is zero, free pmap resources and then free pmap.
*/
Home |
Main Index |
Thread Index |
Old Index