Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Keep track of which kernel PTs are available during...
details: https://anonhg.NetBSD.org/src/rev/89d4ad1859bb
branches: trunk
changeset: 522574:89d4ad1859bb
user: thorpej <thorpej%NetBSD.org@localhost>
date: Thu Feb 21 21:58:00 2002 +0000
description:
Keep track of which kernel PTs are available during bootstrap,
and let pmap_map_chunk() lookup the correct one to use for the
current VA. Eliminate the "l2table" argument to pmap_map_chunk().
Add a second L2 table for mapping kernel text/data/bss on the
IQ80310 (fixes booting kernels with ramdisks).
diffstat:
sys/arch/acorn32/acorn32/rpc_machdep.c | 76 +++++++--------
sys/arch/arm/arm32/pmap.c | 65 +++++++++-----
sys/arch/arm/include/arm32/pmap.h | 14 +-
sys/arch/cats/cats/cats_machdep.c | 105 +++++++++-------------
sys/arch/evbarm/integrator/integrator_machdep.c | 60 ++++++------
sys/arch/evbarm/iq80310/iq80310_machdep.c | 103 +++++++++++----------
sys/arch/hpcarm/hpcarm/hpc_machdep.c | 74 +++++++--------
sys/arch/netwinder/netwinder/netwinder_machdep.c | 60 ++++++------
sys/arch/shark/ofw/ofw.c | 14 +-
9 files changed, 279 insertions(+), 292 deletions(-)
diffs (truncated from 1256 to 300 lines):
diff -r 521ef9efb537 -r 89d4ad1859bb sys/arch/acorn32/acorn32/rpc_machdep.c
--- a/sys/arch/acorn32/acorn32/rpc_machdep.c Thu Feb 21 21:53:00 2002 +0000
+++ b/sys/arch/acorn32/acorn32/rpc_machdep.c Thu Feb 21 21:58:00 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: rpc_machdep.c,v 1.21 2002/02/21 05:25:23 thorpej Exp $ */
+/* $NetBSD: rpc_machdep.c,v 1.22 2002/02/21 21:58:00 thorpej Exp $ */
/*
* Copyright (c) 2000-2001 Reinoud Zandijk.
@@ -57,7 +57,7 @@
#include <sys/param.h>
-__RCSID("$NetBSD: rpc_machdep.c,v 1.21 2002/02/21 05:25:23 thorpej Exp $");
+__RCSID("$NetBSD: rpc_machdep.c,v 1.22 2002/02/21 21:58:00 thorpej Exp $");
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -173,7 +173,7 @@
#define KERNEL_PT_VMDATA_NUM (KERNEL_VM_SIZE >> (PDSHIFT + 2))
#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
-pt_entry_t kernel_pt_table[NUM_KERNEL_PTS];
+pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
struct user *proc0paddr;
@@ -615,7 +615,10 @@
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, PD_SIZE / NBPG);
} else {
- alloc_pages(kernel_pt_table[loop1], PT_SIZE / NBPG);
+ alloc_pages(kernel_pt_table[loop1].pv_pa,
+ PT_SIZE / NBPG);
+ kernel_pt_table[loop1].pv_va =
+ kernel_pt_table[loop1].pv_pa;
++loop1;
}
}
@@ -683,16 +686,16 @@
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, 0x00000000,
- kernel_pt_table[KERNEL_PT_SYS]);
+ &kernel_pt_table[KERNEL_PT_SYS]);
pmap_link_l2pt(l1pagetable, KERNEL_BASE,
- kernel_pt_table[KERNEL_PT_KERNEL]);
+ &kernel_pt_table[KERNEL_PT_KERNEL]);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
- kernel_pt_table[KERNEL_PT_VMDATA + loop]);
+ &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
pmap_link_l2pt(l1pagetable, PROCESS_PAGE_TBLS_BASE,
- kernel_ptpt.pv_pa);
+ &kernel_ptpt);
pmap_link_l2pt(l1pagetable, VMEM_VBASE,
- kernel_pt_table[KERNEL_PT_VMEM]);
+ &kernel_pt_table[KERNEL_PT_VMEM]);
#ifdef VERBOSE_INIT_ARM
@@ -700,7 +703,7 @@
#endif
/* Now we fill in the L2 pagetable for the kernel code/data */
- l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL];
+ l2pagetable = kernel_pt_table[KERNEL_PT_KERNEL].pv_pa;
/*
* The defines are a workaround for a recent problem that occurred
@@ -709,15 +712,15 @@
*/
if (N_GETMAGIC(kernexec[0]) == ZMAGIC) {
#if defined(CPU_ARM6) || defined(CPU_ARM7)
- logical = pmap_map_chunk(l1pagetable, l2pagetable,
- KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+ logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
+ physical_start, kernexec->a_text,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#else /* CPU_ARM6 || CPU_ARM7 */
- logical = pmap_map_chunk(l1pagetable, l2pagetable,
- KERNEL_TEXT_BASE, physical_start, kernexec->a_text,
+ logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
+ physical_start, kernexec->a_text,
VM_PROT_READ, PTE_CACHE);
#endif /* CPU_ARM6 || CPU_ARM7 */
- logical += pmap_map_chunk(l1pagetable, l2pagetable,
+ logical += pmap_map_chunk(l1pagetable,
KERNEL_TEXT_BASE + logical, physical_start + logical,
kerneldatasize - kernexec->a_text,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@@ -726,7 +729,7 @@
* Most likely an ELF kernel ...
* XXX no distinction yet between read only and read/write area's ...
*/
- pmap_map_chunk(l1pagetable, l2pagetable, KERNEL_TEXT_BASE,
+ pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE,
physical_start, kerneldatasize,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
};
@@ -737,20 +740,16 @@
#endif
/* Map the stack pages */
- pmap_map_chunk(l1pagetable, l2pagetable, irqstack.pv_va,
- irqstack.pv_pa, IRQ_STACK_SIZE * NBPG,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- pmap_map_chunk(l1pagetable, l2pagetable, abtstack.pv_va,
- abtstack.pv_pa, ABT_STACK_SIZE * NBPG,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- pmap_map_chunk(l1pagetable, l2pagetable, undstack.pv_va,
- undstack.pv_pa, UND_STACK_SIZE * NBPG,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- pmap_map_chunk(l1pagetable, l2pagetable, kernelstack.pv_va,
- kernelstack.pv_pa, UPAGES * NBPG,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
+ IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
+ ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
+ UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
+ UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- pmap_map_chunk(l1pagetable, l2pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
+ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
PD_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
/* Map the page table that maps the kernel pages */
@@ -767,14 +766,11 @@
* it but we would need the page tables if DRAM was in use.
* XXX please map two adjacent virtual areas to ONE physical area
*/
- l2pagetable = kernel_pt_table[KERNEL_PT_VMEM];
-
- pmap_map_chunk(l1pagetable, l2pagetable, VMEM_VBASE,
+ pmap_map_chunk(l1pagetable, VMEM_VBASE, videomemory.vidm_pbase,
+ videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ pmap_map_chunk(l1pagetable, VMEM_VBASE + videomemory.vidm_size,
videomemory.vidm_pbase, videomemory.vidm_size,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- pmap_map_chunk(l1pagetable, l2pagetable,
- VMEM_VBASE + videomemory.vidm_size, videomemory.vidm_pbase,
- videomemory.vidm_size, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
@@ -784,20 +780,20 @@
/* The -2 is slightly bogus, it should be -log2(sizeof(pt_entry_t)) */
l2pagetable = kernel_ptpt.pv_pa;
pmap_map_entry(l2pagetable, (KERNEL_BASE >> (PGSHIFT-2)),
- kernel_pt_table[KERNEL_PT_KERNEL], VM_PROT_READ|VM_PROT_WRITE,
+ kernel_pt_table[KERNEL_PT_KERNEL].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT-2)),
kernel_ptpt.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
pmap_map_entry(l2pagetable, (VMEM_VBASE >> (PGSHIFT-2)),
- kernel_pt_table[KERNEL_PT_VMEM], VM_PROT_READ|VM_PROT_WRITE,
+ kernel_pt_table[KERNEL_PT_VMEM].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
pmap_map_entry(l2pagetable, (0x00000000 >> (PGSHIFT-2)),
- kernel_pt_table[KERNEL_PT_SYS], VM_PROT_READ|VM_PROT_WRITE,
+ kernel_pt_table[KERNEL_PT_SYS].pv_pa, VM_PROT_READ|VM_PROT_WRITE,
PTE_NOCACHE);
for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) {
pmap_map_entry(l2pagetable, ((KERNEL_VM_BASE +
(loop * 0x00400000)) >> (PGSHIFT-2)),
- kernel_pt_table[KERNEL_PT_VMDATA + loop],
+ kernel_pt_table[KERNEL_PT_VMDATA + loop].pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
}
@@ -805,7 +801,7 @@
* Map the system page in the kernel page table for the bottom 1Meg
* of the virtual memory map.
*/
- l2pagetable = kernel_pt_table[KERNEL_PT_SYS];
+ l2pagetable = kernel_pt_table[KERNEL_PT_SYS].pv_pa;
pmap_map_entry(l2pagetable, 0x0000000, systempage.pv_pa,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
diff -r 521ef9efb537 -r 89d4ad1859bb sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Thu Feb 21 21:53:00 2002 +0000
+++ b/sys/arch/arm/arm32/pmap.c Thu Feb 21 21:58:00 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.46 2002/02/21 21:58:01 thorpej Exp $ */
/*
* Copyright (c) 2001 Richard Earnshaw
@@ -142,7 +142,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.46 2002/02/21 21:58:01 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@@ -3723,6 +3723,27 @@
/************************ Bootstrapping routines ****************************/
/*
+ * This list exists for the benefit of pmap_map_chunk(). It keeps track
+ * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
+ * find them as necessary.
+ *
+ * Note that the data on this list is not valid after initarm() returns.
+ */
+SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
+
+static vaddr_t
+kernel_pt_lookup(paddr_t pa)
+{
+ pv_addr_t *pv;
+
+ SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
+ if (pv->pv_pa == pa)
+ return (pv->pv_va);
+ }
+ return (0);
+}
+
+/*
* pmap_map_section:
*
* Create a single section mapping.
@@ -3767,17 +3788,19 @@
* page table at the slot for "va".
*/
void
-pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, paddr_t l2pa)
+pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
u_int slot = va >> PDSHIFT;
- KASSERT((l2pa & PGOFSET) == 0);
-
- pde[slot + 0] = L1_PTE(l2pa + 0x000);
- pde[slot + 1] = L1_PTE(l2pa + 0x400);
- pde[slot + 2] = L1_PTE(l2pa + 0x800);
- pde[slot + 3] = L1_PTE(l2pa + 0xc00);
+ KASSERT((l2pv->pv_pa & PGOFSET) == 0);
+
+ pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
+ pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
+ pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
+ pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
+
+ SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
}
/*
@@ -3788,13 +3811,13 @@
* provided L1 and L2 tables at the specified virtual address.
*/
vsize_t
-pmap_map_chunk(vaddr_t l1pt, vaddr_t l2pt, vaddr_t va, paddr_t pa,
- vsize_t size, int prot, int cache)
+pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
+ int prot, int cache)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
- pt_entry_t *pte = (pt_entry_t *) l2pt;
pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
+ pt_entry_t *pte;
vsize_t resid;
int i;
@@ -3830,8 +3853,13 @@
* for the current VA.
*/
if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
- panic("pmap_map_chunk: no L2 table for VA 0x%08lx\n",
- va);
+ panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
+
+ pte = (pt_entry_t *)
+ kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
+ if (pte == NULL)
+ panic("pmap_map_chunk: can't find L2 table for VA"
+ "0x%08lx", va);
/* See if we can use a L2 large page mapping. */
if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
@@ -3840,13 +3868,8 @@
printf("L");
#endif
for (i = 0; i < 16; i++) {
-#ifdef cats /* XXXJRT */
- pte[((va >> PGSHIFT) & 0x7f0) + i] =
- L2_LPTE(pa, ap, fl);
-#else
pte[((va >> PGSHIFT) & 0x3f0) + i] =
L2_LPTE(pa, ap, fl);
-#endif
}
Home |
Main Index |
Thread Index |
Old Index