Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Rework pmap_growkernel() to *not* use the...
details: https://anonhg.NetBSD.org/src/rev/3e55d7c69812
branches: trunk
changeset: 546614:3e55d7c69812
user: scw <scw%NetBSD.org@localhost>
date: Fri May 02 19:01:00 2003 +0000
description:
Rework pmap_growkernel() to *not* use the regular pmap_alloc_l2_bucket()
for L2 allocation. This avoids potential recursive calls into
uvm_km_kmemalloc() via the pool allocator.
Bug spotted by Allen Briggs while trying to boot on a machine with 512MB
of memory.
diffstat:
sys/arch/arm/arm32/pmap_new.c | 374 +++++++++++++++++++++++------------------
1 files changed, 207 insertions(+), 167 deletions(-)
diffs (truncated from 550 to 300 lines):
diff -r d46efac9f246 -r 3e55d7c69812 sys/arch/arm/arm32/pmap_new.c
--- a/sys/arch/arm/arm32/pmap_new.c Fri May 02 18:05:46 2003 +0000
+++ b/sys/arch/arm/arm32/pmap_new.c Fri May 02 19:01:00 2003 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_new.c,v 1.6 2003/04/28 15:57:23 scw Exp $ */
+/* $NetBSD: pmap_new.c,v 1.7 2003/05/02 19:01:00 scw Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -210,7 +210,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_new.c,v 1.6 2003/04/28 15:57:23 scw Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_new.c,v 1.7 2003/05/02 19:01:00 scw Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
@@ -276,6 +276,11 @@
* Pool of PV structures
*/
static struct pool pmap_pv_pool;
+static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
+static void pmap_bootstrap_pv_page_free(struct pool *, void *);
+static struct pool_allocator pmap_bootstrap_pv_allocator = {
+ pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
+};
/*
* Pool and cache of l2_dtable structures.
@@ -284,11 +289,7 @@
*/
static struct pool pmap_l2dtable_pool;
static struct pool_cache pmap_l2dtable_cache;
-static void *pmap_bootstrap_page_alloc(struct pool *, int);
-static void pmap_bootstrap_page_free(struct pool *, void *);
-static struct pool_allocator pmap_bootstrap_allocator = {
- pmap_bootstrap_page_alloc, pmap_bootstrap_page_free
-};
+static vaddr_t pmap_kernel_l2dtable_kva;
/*
* Pool and cache of L2 page descriptors.
@@ -297,6 +298,8 @@
*/
static struct pool pmap_l2ptp_pool;
static struct pool_cache pmap_l2ptp_cache;
+static vaddr_t pmap_kernel_l2ptp_kva;
+static paddr_t pmap_kernel_l2ptp_phys;
/*
* pmap copy/zero page, and mem(5) hook point
@@ -310,7 +313,6 @@
* Flag to indicate if pmap_init() has done its thing
*/
boolean_t pmap_initialized;
-boolean_t pmap_postinit_done;
/*
* Misc. locking data structures
@@ -440,13 +442,9 @@
pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
#define pmap_free_l2_dtable(l2) \
pool_cache_put(&pmap_l2dtable_cache, (l2))
-
-static pt_entry_t *pmap_alloc_l2_ptp(paddr_t *);
-#ifndef PMAP_INCLUDE_PTE_SYNC
-static void pmap_free_l2_ptp(pt_entry_t *, paddr_t);
-#else
-static void pmap_free_l2_ptp(boolean_t, pt_entry_t *, paddr_t);
-#endif
+#define pmap_alloc_l2_ptp(pap) \
+ ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
+ PR_NOWAIT, (pap)))
/*
* We try to map the page tables write-through, if possible. However, not
@@ -484,10 +482,8 @@
* Local prototypes
*/
static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
-#ifdef ARM32_NEW_VM_LAYOUT
static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
pt_entry_t **);
-#endif
static boolean_t pmap_is_current(pmap_t);
static boolean_t pmap_is_cached(pmap_t);
static void pmap_enter_pv(struct vm_page *, struct pv_entry *,
@@ -637,111 +633,6 @@
}
}
-/*
- * During bootstrap, we steal pages to use as L2 descriptor tables
- * to avoid calling into the pool(9) module. This variable is a
- * free-list of those stolen pages (we get four L2s per 4KB page,
- * so we need somewhere to track the excess).
- *
- * Since these are allocated for the kernel pmap, they are never freed
- * so we don't risk polluting the pool with 'unmanaged' memory.
- */
-static volatile void *pmap_static_l2_ptps;
-
-/*
- * void *pmap_alloc_l2_ptp(paddr_t *)
- *
- * Allocate an L2 descriptor table.
- *
- * We return both the kernel virtual address *and* physical address of
- * the table.
- */
-static pt_entry_t *
-pmap_alloc_l2_ptp(paddr_t *pap)
-{
- pt_entry_t *ptep;
- vaddr_t va;
- int i;
-
- /*
- * In the normal case, use the pool cache to allocate L2s
- */
- if (__predict_true(pmap_postinit_done)) {
- return ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,
- PR_NOWAIT, pap));
- }
-
- /*
- * Otherwise, check the bootstrap free list
- */
- if (pmap_static_l2_ptps) {
- ptep = (pt_entry_t *)pmap_static_l2_ptps;
- pmap_static_l2_ptps = (volatile void *)ptep[0];
- *pap = ptep[1];
- ptep[0] = ptep[1] = 0;
- PTE_SYNC_RANGE(ptep, 2);
- return (ptep);
- }
-
- /*
- * Steal a page from UVM
- */
- va = uvm_km_kmemalloc(kernel_map, NULL, PAGE_SIZE, UVM_KMF_NOWAIT);
- pmap_extract(pmap_kernel(), va, pap);
- ptep = (pt_entry_t *)va;
- memset(ptep, 0, PAGE_SIZE);
-
- /*
- * What follows is gross. Look away now.
- */
- va += L2_TABLE_SIZE_REAL;
- for (i = 1; i < (PAGE_SIZE / L2_TABLE_SIZE_REAL); i++) {
- *((volatile void **)va) = pmap_static_l2_ptps;
- ((paddr_t *)va)[1] = *pap + (i * L2_TABLE_SIZE_REAL);
- pmap_static_l2_ptps = (void *)va;
- va += L2_TABLE_SIZE_REAL;
- }
-
- PTE_SYNC_RANGE(ptep, PAGE_SIZE/sizeof(pt_entry_t));
- return (ptep);
-}
-
-/*
- * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
- *
- * Free an L2 descriptor table.
- */
-static __inline void
-#ifndef PMAP_INCLUDE_PTE_SYNC
-pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
-#else
-pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2, paddr_t pa)
-#endif
-{
-
- if (__predict_true(uvm.page_init_done)) {
-#ifdef PMAP_INCLUDE_PTE_SYNC
- /*
- * Note: With a write-back cache, we may need to sync this
- * L2 table before re-using it.
- * This is because it may have belonged to a non-current
- * pmap, in which case the cache syncs would have been
- * skipped when the pages were being unmapped. If the
- * L2 table were then to be immediately re-allocated to
- * the *current* pmap, it may well contain stale mappings
- * which have not yet been cleared by a cache write-back
- * and so would still be visible to the mmu.
- */
- if (need_sync) {
- PTE_SYNC_RANGE(l2,
- L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
- }
-#endif
- pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
- } else
- panic("pmap_free_l2_ptp: called during initialisation!");
-}
-
static __inline boolean_t
pmap_is_current(pmap_t pm)
{
@@ -1098,6 +989,36 @@
}
/*
+ * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
+ *
+ * Free an L2 descriptor table.
+ */
+static __inline void
+#ifndef PMAP_INCLUDE_PTE_SYNC
+pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
+#else
+pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2, paddr_t pa)
+#endif
+{
+#ifdef PMAP_INCLUDE_PTE_SYNC
+ /*
+ * Note: With a write-back cache, we may need to sync this
+ * L2 table before re-using it.
+ * This is because it may have belonged to a non-current
+ * pmap, in which case the cache syncs would have been
+ * skipped when the pages were being unmapped. If the
+ * L2 table were then to be immediately re-allocated to
+ * the *current* pmap, it may well contain stale mappings
+ * which have not yet been cleared by a cache write-back
+ * and so would still be visible to the mmu.
+ */
+ if (need_sync)
+ PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
+#endif
+ pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
+}
+
+/*
* Returns a pointer to the L2 bucket associated with the specified pmap
* and VA, or NULL if no L2 bucket exists for the address.
*/
@@ -3537,6 +3458,126 @@
*end = virtual_end;
}
+/*
+ * Helper function for pmap_grow_l2_bucket()
+ */
+static __inline int
+pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
+{
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep;
+ paddr_t pa;
+
+ if (uvm.page_init_done == FALSE) {
+ if (uvm_page_physget(&pa) == FALSE)
+ return (1);
+ } else {
+ struct vm_page *pg;
+ pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
+ if (pg == NULL)
+ return (1);
+ pa = VM_PAGE_TO_PHYS(pg);
+ }
+
+ if (pap)
+ *pap = pa;
+
+ l2b = pmap_get_l2_bucket(pmap_kernel(), va);
+ KDASSERT(l2b != NULL);
+
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ *ptep = L2_S_PROTO | pa | cache_mode |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
+ PTE_SYNC(ptep);
+ memset((void *)va, 0, PAGE_SIZE);
+ return (0);
+}
+
+/*
+ * This is the same as pmap_alloc_l2_bucket(), except that it is only
+ * used by pmap_growkernel().
+ */
+static __inline struct l2_bucket *
+pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
+{
+ struct l2_dtable *l2;
+ struct l2_bucket *l2b;
+ u_short l1idx;
+ vaddr_t nva;
+
+ l1idx = L1_IDX(va);
+
+ if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ /*
+ * No mapping at this address, as there is
+ * no entry in the L1 table.
+ * Need to allocate a new l2_dtable.
+ */
+ nva = pmap_kernel_l2dtable_kva;
+ if ((nva & PGOFSET) == 0) {
+ /*
+ * Need to allocate a backing page
Home |
Main Index |
Thread Index |
Old Index