Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Add a kernel for the CUBIETRUCK (CUBIEBOARD3). All...
details: https://anonhg.NetBSD.org/src/rev/3b751acf6a0a
branches: trunk
changeset: 328637:3b751acf6a0a
user: matt <matt%NetBSD.org@localhost>
date: Fri Apr 11 04:19:47 2014 +0000
description:
Add a kernel for the CUBIETRUCK (CUBIEBOARD3). Allow direct mapping of all
memory (but for now allow the memory mapped above KERNEL_BASE to used for
poolpages).
diffstat:
sys/arch/arm/arm32/arm32_kvminit.c | 46 +++++++++-
sys/arch/arm/arm32/arm32_machdep.c | 14 +-
sys/arch/arm/arm32/pmap.c | 151 ++++++++++++++++++++++++++-----
sys/arch/arm/include/arm32/pmap.h | 26 ++++-
sys/arch/evbarm/conf/CUBIETRUCK | 19 ++++
sys/arch/evbarm/conf/CUBIETRUCK_INSTALL | 10 ++
sys/arch/evbarm/cubie/cubie_machdep.c | 33 ++++++-
sys/arch/evbarm/cubie/cubie_start.S | 100 +++++++++++++++-----
8 files changed, 326 insertions(+), 73 deletions(-)
diffs (truncated from 764 to 300 lines):
diff -r ae4aba0915e4 -r 3b751acf6a0a sys/arch/arm/arm32/arm32_kvminit.c
--- a/sys/arch/arm/arm32/arm32_kvminit.c Fri Apr 11 03:10:13 2014 +0000
+++ b/sys/arch/arm/arm32/arm32_kvminit.c Fri Apr 11 04:19:47 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_kvminit.c,v 1.27 2014/04/05 22:36:18 matt Exp $ */
+/* $NetBSD: arm32_kvminit.c,v 1.28 2014/04/11 04:19:47 matt Exp $ */
/*
* Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
@@ -122,7 +122,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.27 2014/04/05 22:36:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.28 2014/04/11 04:19:47 matt Exp $");
#include <sys/param.h>
#include <sys/device.h>
@@ -161,10 +161,22 @@
* Macros to translate between physical and virtual for a subset of the
* kernel address space. *Not* for general use.
*/
+#if defined(KERNEL_BASE_VOFFSET)
+#define KERN_VTOPHYS(bmi, va) \
+ ((paddr_t)((vaddr_t)(va) - KERNEL_BASE_VOFFSET))
+#define KERN_PHYSTOV(bmi, pa) \
+ ((vaddr_t)((paddr_t)(pa) + KERNEL_BASE_VOFFSET))
+#elif defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+#define KERN_VTOPHYS(bmi, va) \
+ ((paddr_t)((vaddr_t)(va) - pmap_directbase + (bmi)->bmi_start))
+#define KERN_PHYSTOV(bmi, pa) \
+ ((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + pmap_directbase))
+#else
#define KERN_VTOPHYS(bmi, va) \
((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start))
#define KERN_PHYSTOV(bmi, pa) \
((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE))
+#endif
void
arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
@@ -210,7 +222,11 @@
*/
if (bmi->bmi_start < bmi->bmi_kernelstart) {
pv->pv_pa = bmi->bmi_start;
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+ pv->pv_va = pmap_directbase;
+#else
pv->pv_va = KERNEL_BASE;
+#endif
pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start;
bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
#ifdef VERBOSE_INIT_ARM
@@ -387,7 +403,27 @@
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
KASSERT(mapallmem_p);
-#endif
+#ifdef ARM_MMU_EXTENDED
+ /*
+ * We can only use address beneath kernel_vm_base to map physical
+ * memory.
+ */
+ KASSERT(kernel_vm_base >= physical_end - physical_start);
+ /*
+ * If we don't have enough memory via TTBR1, we have use addresses
+ * from TTBR0 to map some of the physical memory. But try to use as
+ * much high memory space as possible.
+ */
+ if (kernel_vm_base - KERNEL_BASE < physical_end - physical_start) {
+ pmap_directbase = kernel_vm_base
+ - (physical_end - physical_start);
+ printf("%s: changing pmap_directbase to %#lx\n", __func__,
+ pmap_directbase);
+ }
+#else
+ KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
+#endif /* ARM_MMU_EXTENDED */
+#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
/*
* Calculate the number of L2 pages needed for mapping the
@@ -700,7 +736,11 @@
cur_pv = *pv;
pv = SLIST_NEXT(pv, pv_list);
} else {
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+ cur_pv.pv_va = pmap_directbase;
+#else
cur_pv.pv_va = KERNEL_BASE;
+#endif
cur_pv.pv_pa = bmi->bmi_start;
cur_pv.pv_size = pv->pv_pa - bmi->bmi_start;
cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
diff -r ae4aba0915e4 -r 3b751acf6a0a sys/arch/arm/arm32/arm32_machdep.c
--- a/sys/arch/arm/arm32/arm32_machdep.c Fri Apr 11 03:10:13 2014 +0000
+++ b/sys/arch/arm/arm32/arm32_machdep.c Fri Apr 11 04:19:47 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_machdep.c,v 1.103 2014/04/05 22:36:18 matt Exp $ */
+/* $NetBSD: arm32_machdep.c,v 1.104 2014/04/11 04:19:47 matt Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.103 2014/04/05 22:36:18 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.104 2014/04/11 04:19:47 matt Exp $");
#include "opt_modular.h"
#include "opt_md.h"
@@ -712,11 +712,11 @@
bool
mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
{
- if (physical_start <= pa && pa < physical_end) {
- *vap = KERNEL_BASE + (pa - physical_start);
- return true;
+ bool rv;
+ vaddr_t va = pmap_direct_mapped_phys(pa, &rv, 0);
+ if (rv) {
+ *vap = va;
}
-
- return false;
+ return rv;
}
#endif
diff -r ae4aba0915e4 -r 3b751acf6a0a sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Fri Apr 11 03:10:13 2014 +0000
+++ b/sys/arch/arm/arm32/pmap.c Fri Apr 11 04:19:47 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.283 2014/04/10 02:45:55 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.284 2014/04/11 04:19:47 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -216,7 +216,7 @@
#include <arm/locore.h>
//#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.283 2014/04/10 02:45:55 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.284 2014/04/11 04:19:47 matt Exp $");
//#define PMAP_DEBUG
#ifdef PMAP_DEBUG
@@ -512,6 +512,13 @@
*/
bool pmap_initialized;
+#if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+/*
+ * Start of direct-mapped memory
+ */
+vaddr_t pmap_directbase = KERNEL_BASE;
+#endif
+
/*
* Misc. locking data structures
*/
@@ -1294,9 +1301,12 @@
#else
struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
#endif
+ bool ok __diagused;
KASSERT(pg != NULL);
pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg);
- vaddr_t va = KERNEL_BASE + (pm->pm_l1_pa - physical_start);
+ vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0xdeadbeef);
+ KASSERT(ok);
+ KASSERT(va >= KERNEL_BASE);
#else
KASSERTMSG(kernel_map != NULL, "pm %p", pm);
@@ -2632,11 +2642,16 @@
KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
#endif
+ pt_entry_t * const ptep = cpu_cdst_pte(0);
+ const vaddr_t dstp = cpu_cdstp(0);
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
- if (way_size == PAGE_SIZE) {
- vaddr_t vdstp = KERNEL_BASE + (pa - physical_start);
- cpu_icache_sync_range(vdstp, way_size);
- return;
+ if (way_size <= PAGE_SIZE) {
+ bool ok = false;
+ vaddr_t vdstp = pmap_direct_mapped_phys(pa, &ok, dstp);
+ if (ok) {
+ cpu_icache_sync_range(vdstp, way_size);
+ return;
+ }
}
#endif
@@ -2645,8 +2660,6 @@
* same page to pages in the way and then do the icache_sync on
* the entire way making sure we are cleaned.
*/
- pt_entry_t * const ptep = cpu_cdst_pte(0);
- const vaddr_t dstp = cpu_cdstp(0);
const pt_entry_t npte = L2_S_PROTO | pa | pte_l2_s_cache_mode
| L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
@@ -5112,10 +5125,10 @@
* Is this page mapped at its natural color?
* If we have all of memory mapped, then just convert PA to VA.
*/
- const bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+ bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
|| va_offset == (pa & arm_cache_prefer_mask);
const vaddr_t vdstp = okcolor
- ? KERNEL_BASE + (pa - physical_start)
+ ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
: cpu_cdstp(va_offset);
#else
const bool okcolor = false;
@@ -5142,7 +5155,8 @@
PTE_SYNC(ptep);
cpu_tlb_flushD_SE(vdstp);
cpu_cpwait();
-#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \
+ && !defined(ARM_MMU_EXTENDED)
/*
* If we are direct-mapped and our color isn't ok, then before
* we bzero the page invalidate its contents from the cache and
@@ -5239,10 +5253,10 @@
const vsize_t va_offset = 0;
#endif
#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
- const bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+ bool okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
|| va_offset == (pa & arm_cache_prefer_mask);
const vaddr_t vdstp = okcolor
- ? KERNEL_BASE + (pa - physical_start)
+ ? pmap_direct_mapped_phys(pa, &okcolor, cpu_cdstp(va_offset))
: cpu_cdstp(va_offset);
#else
const bool okcolor = false;
@@ -5350,14 +5364,16 @@
* Is this page mapped at its natural color?
* If we have all of memory mapped, then just convert PA to VA.
*/
- const bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+ bool src_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
|| src_va_offset == (src & arm_cache_prefer_mask);
- const bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
+ bool dst_okcolor = arm_pcache.dcache_type == CACHE_TYPE_PIPT
|| dst_va_offset == (dst & arm_cache_prefer_mask);
const vaddr_t vsrcp = src_okcolor
- ? KERNEL_BASE + (src - physical_start)
+ ? pmap_direct_mapped_phys(src, &src_okcolor,
+ cpu_csrcp(src_va_offset))
: cpu_csrcp(src_va_offset);
- const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
+ const vaddr_t vdstp = pmap_direct_mapped_phys(dst, &dst_okcolor,
+ cpu_cdstp(dst_va_offset));
#else
const bool src_okcolor = false;
const bool dst_okcolor = false;
@@ -6602,6 +6618,9 @@
#ifdef ARM_MMU_EXTENDED_XXX
| ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
#endif
+#ifdef ARM_MMU_EXTENDED
+ | (va & 0x80000000 ? 0 : L1_S_V6_nG)
+#endif
| L1_S_PROT(PTE_KERNEL, prot) | f1;
#ifdef VERBOSE_INIT_ARM
printf("sS");
@@ -6620,6 +6639,9 @@
#ifdef ARM_MMU_EXTENDED_XXX
| ((prot & VM_PROT_EXECUTE) ? 0 : L1_S_V6_XN)
#endif
+#ifdef ARM_MMU_EXTENDED
+ | (va & 0x80000000 ? 0 : L1_S_V6_nG)
+#endif
| L1_S_PROT(PTE_KERNEL, prot) | f1
| L1_S_DOM(PMAP_DOMAIN_KERNEL);
#ifdef VERBOSE_INIT_ARM
@@ -6654,6 +6676,9 @@
#ifdef ARM_MMU_EXTENDED_XXX
| ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_L_XN)
#endif
+#ifdef ARM_MMU_EXTENDED
+ | (va & 0x80000000 ? 0 : L2_XS_nG)
+#endif
| L2_L_PROT(PTE_KERNEL, prot) | f2l;
#ifdef VERBOSE_INIT_ARM
printf("L");
@@ -6674,6 +6699,9 @@
#ifdef ARM_MMU_EXTENDED_XXX
| ((prot & VM_PROT_EXECUTE) ? 0 : L2_XS_XN)
#endif
+#ifdef ARM_MMU_EXTENDED
+ | (va & 0x80000000 ? 0 : L2_XS_nG)
+#endif
| L2_S_PROT(PTE_KERNEL, prot) | f2s;
Home |
Main Index |
Thread Index |
Old Index