Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm Add support for mapping SuperSection on armv6 a...
details: https://anonhg.NetBSD.org/src/rev/9e8f18ee1379
branches: trunk
changeset: 781115:9e8f18ee1379
user: matt <matt%NetBSD.org@localhost>
date: Mon Aug 20 13:03:41 2012 +0000
description:
Add support for mapping SuperSection on armv6 and armv7. These always
a domain of 0 so move the kernel from domain 15 to domain 0.
diffstat:
sys/arch/arm/arm32/pmap.c | 70 ++++++++++++++++++++++++++++----------
sys/arch/arm/include/arm32/pmap.h | 40 +++++++++++++++------
sys/arch/arm/include/arm32/pte.h | 6 ++-
3 files changed, 84 insertions(+), 32 deletions(-)
diffs (truncated from 369 to 300 lines):
diff -r f3e05bb1956a -r 9e8f18ee1379 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Mon Aug 20 12:48:47 2012 +0000
+++ b/sys/arch/arm/arm32/pmap.c Mon Aug 20 13:03:41 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.229 2012/07/13 06:02:58 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.230 2012/08/20 13:03:41 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.229 2012/07/13 06:02:58 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.230 2012/08/20 13:03:41 matt Exp $");
#ifdef PMAP_DEBUG
@@ -1149,7 +1149,7 @@
* Fix up the relevant bits in the pmap structure
*/
pm->pm_l1 = l1;
- pm->pm_domain = domain;
+ pm->pm_domain = domain + 1;
}
/*
@@ -1172,8 +1172,8 @@
/*
* Free up the domain number which was allocated to the pmap
*/
- l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first;
- l1->l1_domain_first = pm->pm_domain;
+ l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
+ l1->l1_domain_first = pm->pm_domain - 1;
l1->l1_domain_use_count--;
/*
@@ -5692,7 +5692,7 @@
pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
int prot, int cache)
{
- pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pd_entry_t *pdep = (pd_entry_t *) l1pt;
pt_entry_t *pte, f1, f2s, f2l;
vsize_t resid;
int i;
@@ -5731,15 +5731,37 @@
size = resid;
while (resid > 0) {
+#ifdef _ARM_ARCH_6
+ /* See if we can use a supersection mapping. */
+ if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
+ /* Supersection are always domain 0 */
+ pd_entry_t pde = L1_SS_PROTO | pa |
+ L1_S_PROT(PTE_KERNEL, prot) | f1;
+#ifdef VERBOSE_INIT_ARM
+ printf("sS");
+#endif
+ for (size_t s = va >> L1_S_SHIFT,
+ e = s + L1_SS_SIZE / L1_S_SIZE;
+ s < e;
+ s++) {
+ pdep[s] = pde;
+ PTE_SYNC(&pdep[s]);
+ }
+ va += L1_SS_SIZE;
+ pa += L1_SS_SIZE;
+ resid -= L1_SS_SIZE;
+ continue;
+ }
+#endif
/* See if we can use a section mapping. */
if (L1_S_MAPPABLE_P(va, pa, resid)) {
#ifdef VERBOSE_INIT_ARM
printf("S");
#endif
- pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
+ pdep[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
L1_S_PROT(PTE_KERNEL, prot) | f1 |
L1_S_DOM(PMAP_DOMAIN_KERNEL);
- PTE_SYNC(&pde[va >> L1_S_SHIFT]);
+ PTE_SYNC(&pdep[va >> L1_S_SHIFT]);
va += L1_S_SIZE;
pa += L1_S_SIZE;
resid -= L1_S_SIZE;
@@ -5751,15 +5773,15 @@
* one is actually in the corresponding L1 slot
* for the current VA.
*/
- if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
+ if ((pdep[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
#ifndef ARM32_NEW_VM_LAYOUT
pte = (pt_entry_t *)
- kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
+ kernel_pt_lookup(pdep[va >> L1_S_SHIFT] & L2_S_FRAME);
#else
pte = (pt_entry_t *) kernel_pt_lookup(
- pde[L1_IDX(va)] & L1_C_ADDR_MASK);
+ pdep[L1_IDX(va)] & L1_C_ADDR_MASK);
#endif
if (pte == NULL)
panic("pmap_map_chunk: can't find L2 table for VA"
@@ -5935,6 +5957,7 @@
pt_entry_t pte_l2_l_prot_ro;
pt_entry_t pte_l2_l_prot_mask;
+pt_entry_t pte_l1_ss_proto;
pt_entry_t pte_l1_s_proto;
pt_entry_t pte_l1_c_proto;
pt_entry_t pte_l2_s_proto;
@@ -5962,22 +5985,28 @@
/*
* If we have a write-through cache, set B and C. If
* we have a write-back cache, then we assume setting
- * only C will make those pages write-through.
+ * only C will make those pages write-through (except for those
+ * Cortex CPUs which can read the L1 caches).
*/
- if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
+ if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
+#if ARM_MMU_V7 > 1
+ || (CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
+ && !CPU_ID_CORTEX_A8_P(curcpu()->ci_arm_cpuid))
+#endif
+ || false) {
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
pte_l2_l_cache_mode_pt = L2_B|L2_C;
pte_l2_s_cache_mode_pt = L2_B|L2_C;
- } else {
#if ARM_MMU_V6 > 1
+ } else if (CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) {
pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; /* arm116 errata 399234 */
pte_l2_l_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */
pte_l2_s_cache_mode_pt = L2_B|L2_C; /* arm116 errata 399234 */
-#else
- pte_l1_s_cache_mode_pt = L1_S_C;
- pte_l2_l_cache_mode_pt = L2_C;
- pte_l2_s_cache_mode_pt = L2_C;
#endif
+ } else {
+ pte_l1_s_cache_mode_pt = L1_S_C; /* write through */
+ pte_l2_l_cache_mode_pt = L2_C; /* write through */
+ pte_l2_s_cache_mode_pt = L2_C; /* write through */
}
pte_l1_s_prot_u = L1_S_PROT_U_generic;
@@ -5995,6 +6024,7 @@
pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
+ pte_l1_ss_proto = L1_SS_PROTO_generic;
pte_l1_s_proto = L1_S_PROTO_generic;
pte_l1_c_proto = L1_C_PROTO_generic;
pte_l2_s_proto = L2_S_PROTO_generic;
@@ -6219,6 +6249,7 @@
pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
+ pte_l1_ss_proto = L1_SS_PROTO_xscale;
pte_l1_s_proto = L1_S_PROTO_xscale;
pte_l1_c_proto = L1_C_PROTO_xscale;
pte_l2_s_proto = L2_S_PROTO_xscale;
@@ -6403,6 +6434,7 @@
pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
+ pte_l1_ss_proto = L1_SS_PROTO_armv6;
pte_l1_s_proto = L1_S_PROTO_armv6;
pte_l1_c_proto = L1_C_PROTO_armv6;
pte_l2_s_proto = L2_S_PROTO_armv6c;
@@ -6412,6 +6444,7 @@
pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n;
pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n;
+ pte_l1_ss_proto = L1_SS_PROTO_armv6;
pte_l1_s_proto = L1_S_PROTO_armv6;
pte_l1_c_proto = L1_C_PROTO_armv6;
pte_l2_s_proto = L2_S_PROTO_armv6n;
@@ -6455,6 +6488,7 @@
pte_l2_l_prot_ro = L2_L_PROT_RO_armv7;
pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7;
+ pte_l1_ss_proto = L1_SS_PROTO_armv7;
pte_l1_s_proto = L1_S_PROTO_armv7;
pte_l1_c_proto = L1_C_PROTO_armv7;
pte_l2_s_proto = L2_S_PROTO_armv7;
diff -r f3e05bb1956a -r 9e8f18ee1379 sys/arch/arm/include/arm32/pmap.h
--- a/sys/arch/arm/include/arm32/pmap.h Mon Aug 20 12:48:47 2012 +0000
+++ b/sys/arch/arm/include/arm32/pmap.h Mon Aug 20 13:03:41 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.102 2012/07/29 00:07:10 matt Exp $ */
+/* $NetBSD: pmap.h,v 1.103 2012/08/20 13:03:41 matt Exp $ */
/*
* Copyright (c) 2002, 2003 Wasabi Systems, Inc.
@@ -207,16 +207,18 @@
paddr_t pv_pa;
vaddr_t pv_va;
vsize_t pv_size;
+ uint8_t pv_cache;
+ uint8_t pv_prot;
} pv_addr_t;
typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
extern pv_addrqh_t pmap_freeq;
-extern pv_addr_t kernelpages;
extern pv_addr_t kernelstack;
extern pv_addr_t abtstack;
extern pv_addr_t fiqstack;
extern pv_addr_t irqstack;
extern pv_addr_t undstack;
+extern pv_addr_t idlestack;
extern pv_addr_t systempage;
extern pv_addr_t kernel_l1pt;
@@ -472,12 +474,15 @@
#if defined(CPU_ARM10)
void pmap_pte_init_arm10(void);
#endif /* CPU_ARM10 */
-#if defined(CPU_ARM11)
+#if defined(CPU_ARM11) /* ARM_MMU_V6 */
void pmap_pte_init_arm11(void);
#endif /* CPU_ARM11 */
-#if defined(CPU_ARM11MPCORE)
+#if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
void pmap_pte_init_arm11mpcore(void);
#endif
+#if ARM_MMU_V7 == 1
+void pmap_pte_init_armv7(void);
+#endif /* ARM_MMU_V7 */
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
#if ARM_MMU_SA1 == 1
@@ -496,10 +501,6 @@
void pmap_uarea(vaddr_t);
#endif /* ARM_MMU_XSCALE == 1 */
-#if ARM_MMU_V7 == 1
-void pmap_pte_init_armv7(void);
-#endif /* ARM_MMU_V7 */
-
extern pt_entry_t pte_l1_s_cache_mode;
extern pt_entry_t pte_l1_s_cache_mask;
@@ -532,6 +533,7 @@
extern pt_entry_t pte_l2_l_prot_ro;
extern pt_entry_t pte_l2_l_prot_mask;
+extern pt_entry_t pte_l1_ss_proto;
extern pt_entry_t pte_l1_s_proto;
extern pt_entry_t pte_l1_c_proto;
extern pt_entry_t pte_l2_s_proto;
@@ -546,8 +548,8 @@
/*
* Definitions for MMU domains
*/
-#define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
-#define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
+#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
+#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
/*
* These macros define the various bit masks in the PTE.
@@ -642,6 +644,11 @@
#define L1_S_PROTO_armv6 (L1_TYPE_S)
#define L1_S_PROTO_armv7 (L1_TYPE_S)
+#define L1_SS_PROTO_generic 0
+#define L1_SS_PROTO_xscale 0
+#define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
+#define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
+
#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
#define L1_C_PROTO_xscale (L1_TYPE_C)
#define L1_C_PROTO_armv6 (L1_TYPE_C)
@@ -684,6 +691,7 @@
#define L2_L_CACHE_MASK pte_l2_l_cache_mask
#define L2_S_CACHE_MASK pte_l2_s_cache_mask
+#define L1_SS_PROTO pte_l1_ss_proto
#define L1_S_PROTO pte_l1_s_proto
#define L1_C_PROTO pte_l1_c_proto
#define L2_S_PROTO pte_l2_s_proto
@@ -710,6 +718,7 @@
#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
+#define L1_SS_PROTO L1_SS_PROTO_generic
#define L1_S_PROTO L1_S_PROTO_generic
#define L1_C_PROTO L1_C_PROTO_generic
#define L2_S_PROTO L2_S_PROTO_generic
@@ -738,6 +747,7 @@
/* These prototypes make writeable mappings, while the other MMU types
* make read-only mappings. */
+#define L1_SS_PROTO L1_SS_PROTO_armv6
Home |
Main Index |
Thread Index |
Old Index