Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch map_chunk() -> pmap_map_chunk(), and move it to pmap.c
details: https://anonhg.NetBSD.org/src/rev/3b1cfa10cb52
branches: trunk
changeset: 522543:3b1cfa10cb52
user: thorpej <thorpej%NetBSD.org@localhost>
date: Thu Feb 21 02:52:19 2002 +0000
description:
map_chunk() -> pmap_map_chunk(), and move it to pmap.c
diffstat:
sys/arch/acorn32/acorn32/rpc_machdep.c | 61 +++++++--------
sys/arch/arm/arm32/arm32_machdep.c | 82 +--------------------
sys/arch/arm/arm32/pmap.c | 93 ++++++++++++++++++++++-
sys/arch/arm/include/arm32/machdep.h | 5 +-
sys/arch/arm/include/arm32/pmap.h | 4 +-
sys/arch/cats/cats/cats_machdep.c | 43 +++++-----
sys/arch/evbarm/integrator/integrator_machdep.c | 48 +++++------
sys/arch/evbarm/iq80310/iq80310_machdep.c | 54 +++++++------
sys/arch/hpcarm/hpcarm/hpc_machdep.c | 45 +++++-----
sys/arch/netwinder/netwinder/netwinder_machdep.c | 43 +++++-----
10 files changed, 238 insertions(+), 240 deletions(-)
diffs (truncated from 804 to 300 lines):
diff -r 284af350bec1 -r 3b1cfa10cb52 sys/arch/acorn32/acorn32/rpc_machdep.c
--- a/sys/arch/acorn32/acorn32/rpc_machdep.c Thu Feb 21 02:43:57 2002 +0000
+++ b/sys/arch/acorn32/acorn32/rpc_machdep.c Thu Feb 21 02:52:19 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: rpc_machdep.c,v 1.19 2002/02/20 20:41:15 thorpej Exp $ */
+/* $NetBSD: rpc_machdep.c,v 1.20 2002/02/21 02:52:19 thorpej Exp $ */
/*
* Copyright (c) 2000-2001 Reinoud Zandijk.
@@ -57,7 +57,7 @@
#include <sys/param.h>
-__RCSID("$NetBSD: rpc_machdep.c,v 1.19 2002/02/20 20:41:15 thorpej Exp $");
+__RCSID("$NetBSD: rpc_machdep.c,v 1.20 2002/02/21 02:52:19 thorpej Exp $");
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -186,10 +186,6 @@
void physcon_display_base __P((u_int addr));
extern void consinit __P((void));
-vm_size_t map_chunk __P((vm_offset_t pd, vm_offset_t pt, vm_offset_t va,
- vm_offset_t pa, vm_size_t size, u_int acc,
- u_int flg));
-
void data_abort_handler __P((trapframe_t *frame));
void prefetch_abort_handler __P((trapframe_t *frame));
void undefinedinstruction_bounce __P((trapframe_t *frame));
@@ -713,25 +709,26 @@
*/
if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
#if defined(CPU_ARM6) || defined(CPU_ARM7)
- logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
- physical_start, kernexec->a_text,
- AP_KRW, PT_CACHEABLE);
+ logical = pmap_map_chunk(l1pagetable, l2pagetable,
+ KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#else /* CPU_ARM6 || CPU_ARM7 */
- logical = map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
- physical_start, kernexec->a_text,
- AP_KR, PT_CACHEABLE);
+ logical = pmap_map_chunk(l1pagetable, l2pagetable,
+ KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+ VM_PROT_READ, PTE_CACHE);
#endif /* CPU_ARM6 || CPU_ARM7 */
- logical += map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE + logical,
- physical_start + logical, kerneldatasize - kernexec->a_text,
- AP_KRW, PT_CACHEABLE);
+ logical += pmap_map_chunk(l1pagetable, l2pagetable,
+ KERNEL_TEXT_BASE + logical, physical_start + logical,
+ kerneldatasize - kernexec->a_text,
+ VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
} else { /* !ZMAGIC */
/*
* Most likely an ELF kernel ...
* XXX no distinction yet between read only and read/write area's ...
*/
- map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
+ pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
physical_start, kerneldatasize,
- AP_KRW, PT_CACHEABLE);
+ VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
};
@@ -740,16 +737,17 @@
#endif
/* Map the stack pages */
- map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
- IRQ_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
- map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
- ABT_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
- map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
- UND_STACK_SIZE * NBPG, AP_KRW, PT_CACHEABLE);
- map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
- UPAGES * NBPG, AP_KRW, PT_CACHEABLE);
- map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
- PD_SIZE, AP_KRW, 0);
+ pmap_map_chunk(0, l2pagetable, irqstack.pv_va, irqstack.pv_pa,
+ IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(0, l2pagetable, abtstack.pv_va, abtstack.pv_pa,
+ ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(0, l2pagetable, undstack.pv_va, undstack.pv_pa,
+ UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(0, l2pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+ UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+
+ pmap_map_chunk(0, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+ PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
pmap_map_entry(l2pagetable, kernel_ptpt.pv_pa - physical_start,
@@ -767,11 +765,12 @@
*/
l2pagetable = kernel_pt_table[KERNEL_PT_VMEM];
- map_chunk(l1pagetable, l2pagetable, VMEM_VBASE, videomemory.vidm_pbase,
- videomemory.vidm_size, AP_KRW, PT_CACHEABLE);
- map_chunk(l1pagetable, l2pagetable, VMEM_VBASE + videomemory.vidm_size,
+ pmap_map_chunk(l1pagetable, l2pagetable, VMEM_VBASE,
videomemory.vidm_pbase, videomemory.vidm_size,
- AP_KRW, PT_CACHEABLE);
+ VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, l2pagetable,
+ VMEM_VBASE + videomemory.vidm_size, videomemory.vidm_pbase,
+ videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
diff -r 284af350bec1 -r 3b1cfa10cb52 sys/arch/arm/arm32/arm32_machdep.c
--- a/sys/arch/arm/arm32/arm32_machdep.c Thu Feb 21 02:43:57 2002 +0000
+++ b/sys/arch/arm/arm32/arm32_machdep.c Thu Feb 21 02:52:19 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_machdep.c,v 1.15 2002/02/20 20:41:15 thorpej Exp $ */
+/* $NetBSD: arm32_machdep.c,v 1.16 2002/02/21 02:52:20 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@@ -151,86 +151,6 @@
}
/*
- * A few functions that are used to help construct the page tables
- * during the bootstrap process.
- */
-
-/* cats kernels have a 2nd l2 pt, so the range is bigger hence the 0x7ff etc */
-vsize_t
-map_chunk(pd, pt, va, pa, size, acc, flg)
- vaddr_t pd;
- vaddr_t pt;
- vaddr_t va;
- paddr_t pa;
- vsize_t size;
- u_int acc;
- u_int flg;
-{
- pd_entry_t *l1pt = (pd_entry_t *)pd;
- pt_entry_t *l2pt = (pt_entry_t *)pt;
- vsize_t remain;
- u_int loop;
-
- remain = (size + (NBPG - 1)) & ~(NBPG - 1);
-#ifdef VERBOSE_INIT_ARM
- printf("map_chunk: pa=%lx va=%lx sz=%lx rem=%lx acc=%x flg=%x\n",
- pa, va, size, remain, acc, flg);
- printf("map_chunk: ");
-#endif
- size = remain;
-
- while (remain > 0) {
- /* Can we do a section mapping ? */
- if (l1pt && !((pa | va) & (L1_SEC_SIZE - 1))
- && remain >= L1_SEC_SIZE) {
-#ifdef VERBOSE_INIT_ARM
- printf("S");
-#endif
- l1pt[(va >> PDSHIFT)] = L1_SECPTE(pa, acc, flg);
- va += L1_SEC_SIZE;
- pa += L1_SEC_SIZE;
- remain -= L1_SEC_SIZE;
- } else
- /* Can we do a large page mapping ? */
- if (!((pa | va) & (L2_LPAGE_SIZE - 1))
- && (remain >= L2_LPAGE_SIZE)) {
-#ifdef VERBOSE_INIT_ARM
- printf("L");
-#endif
- for (loop = 0; loop < 16; ++loop)
-#ifndef cats
- l2pt[((va >> PGSHIFT) & 0x3f0) + loop] =
- L2_LPTE(pa, acc, flg);
-#else
- l2pt[((va >> PGSHIFT) & 0x7f0) + loop] =
- L2_LPTE(pa, acc, flg);
-#endif
- va += L2_LPAGE_SIZE;
- pa += L2_LPAGE_SIZE;
- remain -= L2_LPAGE_SIZE;
- } else
- /* All we can do is a small page mapping */
- {
-#ifdef VERBOSE_INIT_ARM
- printf("P");
-#endif
-#ifndef cats
- l2pt[((va >> PGSHIFT) & 0x3ff)] = L2_SPTE(pa, acc, flg);
-#else
- l2pt[((va >> PGSHIFT) & 0x7ff)] = L2_SPTE(pa, acc, flg);
-#endif
- va += NBPG;
- pa += NBPG;
- remain -= NBPG;
- }
- }
-#ifdef VERBOSE_INIT_ARM
- printf("\n");
-#endif
- return(size);
-}
-
-/*
* void cpu_startup(void)
*
* Machine dependant startup code.
diff -r 284af350bec1 -r 3b1cfa10cb52 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Thu Feb 21 02:43:57 2002 +0000
+++ b/sys/arch/arm/arm32/pmap.c Thu Feb 21 02:52:19 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.42 2002/02/20 20:41:16 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.43 2002/02/21 02:52:20 thorpej Exp $ */
/*
* Copyright (c) 2001 Richard Earnshaw
@@ -142,7 +142,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.42 2002/02/20 20:41:16 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.43 2002/02/21 02:52:20 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@@ -3731,13 +3731,12 @@
pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pd_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
+ pd_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
- /* XXXJRT Always creates r/w mappings for now */
-
- pde[va >> PDSHIFT] = L1_SEC(pa & PD_MASK,
- cache == PTE_CACHE ? pte_cache_mode : 0);
+ pde[va >> PDSHIFT] = L1_SECPTE(pa & PD_MASK, ap, fl);
}
/*
@@ -3780,3 +3779,85 @@
pde[slot + 2] = L1_PTE(l2pa + 0x800);
pde[slot + 3] = L1_PTE(l2pa + 0xc00);
}
+
+/*
+ * pmap_map_chunk:
+ *
+ * Map a chunk of memory using the most efficient mappings
+ * possible (section, large page, small page) into the
+ * provided L1 and L2 tables at the specified virtual address.
+ */
+vsize_t
+pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
+ vsize_t size, int prot, int cache)
+{
+ pd_entry_t *pde = (pd_entry_t *) l1pt;
+ pt_entry_t *pte = (pt_entry_t *) l2pt;
+ pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
+ pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
+ vsize_t resid;
+ int i;
+
+ resid = (size + (NBPG - 1)) & ~(NBPG - 1);
+
+#ifdef VERBOSE_INIT_ARM
+ printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
+ "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
+#endif
+
+ size = resid;
+
+ while (resid > 0) {
+ /* See if we can use a section mapping. */
+ if (l1pt &&
+ ((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
+ resid >= L1_SEC_SIZE) {
+#ifdef VERBOSE_INIT_ARM
+ printf("S");
+#endif
+ pde[va >> PDSHIFT] = L1_SECPTE(pa, ap, fl);
+ va += L1_SEC_SIZE;
+ pa += L1_SEC_SIZE;
+ resid -= L1_SEC_SIZE;
+ continue;
+ }
+
+ /* See if we can use a L2 large page mapping. */
+ if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
+ resid >= L2_LPAGE_SIZE) {
+#ifdef VERBOSE_INIT_ARM
+ printf("L");
+#endif
+ for (i = 0; i < 16; i++) {
+#ifdef cats /* XXXJRT */
+ pte[((va >> PGSHIFT) & 0x7f0) + i] =
+ L2_LPTE(pa, ap, fl);
Home |
Main Index |
Thread Index |
Old Index