Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/powerpc/mpc6xx Rework pmap_bootstrap. Fix some com...
details: https://anonhg.NetBSD.org/src/rev/b692cb93cd94
branches: trunk
changeset: 511528:b692cb93cd94
user: matt <matt%NetBSD.org@localhost>
date: Thu Jun 21 03:26:12 2001 +0000
description:
Rework pmap_bootstrap. Fix some comments. Add old copyright until i finish
excising that code.
diffstat:
sys/arch/powerpc/mpc6xx/pmap.c | 472 ++++++++++++++++++++++++----------------
1 files changed, 281 insertions(+), 191 deletions(-)
diffs (truncated from 656 to 300 lines):
diff -r 31f8c7757801 -r b692cb93cd94 sys/arch/powerpc/mpc6xx/pmap.c
--- a/sys/arch/powerpc/mpc6xx/pmap.c Thu Jun 21 03:13:05 2001 +0000
+++ b/sys/arch/powerpc/mpc6xx/pmap.c Thu Jun 21 03:26:12 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.12 2001/06/16 03:32:48 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.13 2001/06/21 03:26:12 matt Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -35,6 +35,36 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * Copyright (C) 1995, 1996 Wolfgang Solfrank.
+ * Copyright (C) 1995, 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
#include <sys/param.h>
#include <sys/malloc.h>
@@ -83,7 +113,9 @@
u_long pmap_pte_overflow;
u_long pmap_pte_replacements;
u_long pmap_pvo_entries;
+u_long pmap_pvo_enter_depth;
u_long pmap_pvo_enter_calls;
+u_long pmap_pvo_remove_depth;
u_long pmap_pvo_remove_calls;
u_int64_t pmap_pte_spills = 0;
struct pvo_entry *pmap_pvo_syncicache;
@@ -100,6 +132,7 @@
#endif
static struct mem_region *mem, *avail;
+static u_int mem_cnt, avail_cnt;
#ifdef __HAVE_PMAP_PHYSSEG
/*
@@ -194,6 +227,7 @@
STATIC void pmap_syncicache(paddr_t);
STATIC void pmap_release (pmap_t);
+STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
#define VSID_NBPW (sizeof(uint32_t) * 8)
static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
@@ -484,10 +518,10 @@
}
/*
- * Try to insert page table entry *pt into the pmap_pteg_table at idx.
+ * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
+ * (either primary or secondary location).
*
- * Note: *pt mustn't have PTE_VALID set.
- * This is done here as required by Book III, 4.12.
+ * Note: both the destination and source PTEs must not have PTE_VALID set.
*/
static int
pmap_pte_insert(int ptegidx, pte_t *pvo_pt)
@@ -616,9 +650,7 @@
* Restrict given range to physical memory
*/
void
-pmap_real_memory(start, size)
- paddr_t *start;
- psize_t *size;
+pmap_real_memory(paddr_t *start, psize_t *size)
{
struct mem_region *mp;
@@ -678,20 +710,21 @@
}
/*
- * How much virtual space is available to the kernel?
+ * How much virtual space does the kernel get?
*/
void
pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
/*
- * Reserve one segment for kernel virtual memory
+ * For now, reserve one segment (minus some overhead) for kernel
+ * virtual memory
*/
*start = VM_MIN_KERNEL_ADDRESS + pmap_rkva_count * NBPG;
*end = VM_MAX_KERNEL_ADDRESS;
}
/*
- * Create and return a physical map.
+ * Allocate, initialize, and return a new physical map.
*/
pmap_t
pmap_create(void)
@@ -973,8 +1006,8 @@
#endif
panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in "
"pmap_pteg_table but valid in pvo", pvo, pt);
+ }
#endif
- }
return NULL;
}
@@ -1256,9 +1289,14 @@
int ptegidx;
int i;
+ if (pmap_pvo_remove_depth > 0)
+ panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
+ if (++pmap_pvo_enter_depth > 1)
+ panic("pmap_pvo_enter: called recursively!");
+
pmap_pvo_enter_calls++;
/*
- * Compute the HTAB index.
+ * Compute the PTE Group index.
*/
va &= ~ADDR_POFF;
sr = va_to_sr(pm->pm_sr, va);
@@ -1311,6 +1349,7 @@
#endif
if ((flags & PMAP_CANFAIL) == 0)
panic("pmap_pvo_enter: failed");
+ pmap_pvo_enter_depth--;
return ENOMEM;
#if 0
}
@@ -1355,6 +1394,7 @@
#endif
}
PMAP_PVO_CHECK(pvo); /* sanity check */
+ pmap_pvo_enter_depth--;
return first ? ENOENT : 0;
}
@@ -1363,6 +1403,9 @@
{
volatile pte_t *pt;
+ if (++pmap_pvo_remove_depth > 1)
+ panic("pmap_pvo_remove: called recursively!");
+
PMAP_PVO_CHECK(pvo); /* sanity check */
/*
* If there is an active pte entry, we need to deactivate it
@@ -1411,6 +1454,7 @@
pmap_pvo_entries--;
pmap_pvo_remove_calls++;
}
+ pmap_pvo_remove_depth--;
}
/*
@@ -2340,17 +2384,96 @@
}
/*
+ * Find a chuck of memory with right size and alignment.
+ */
+void *
+pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
+{
+ struct mem_region *mp;
+ paddr_t s, e;
+ int i, j;
+
+ size = round_page(size);
+
+ DPRINTFN(6,("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
+ size, alignment, at_end));
+
+ if (alignment < NBPG || (alignment & (alignment-1)) != 0)
+ panic("pmap_boot_find_memory: invalid alignment %lx",
+ alignment);
+
+ if (at_end) {
+ if (alignment != NBPG)
+ panic("pmap_boot_find_memory: invalid ending "
+ "alignment %lx", alignment);
+
+ for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
+ s = mp->start + mp->size - size;
+ if (s >= mp->start) {
+ mp->size -= size;
+ printf(": %lx\n", s);
+ return (void *) s;
+ }
+ }
+ panic("pmap_boot_find_memory: no available memory");
+ }
+
+ for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
+ s = (mp->start + alignment - 1) & ~(alignment-1);
+ e = s + size;
+
+ /*
+ * Is the calculated region entirely within the region?
+ */
+ if (s < mp->start || e > mp->start + mp->size)
+ continue;
+
+ DPRINTFN(6,(": %lx\n", s));
+ if (s == mp->start) {
+ /*
+ * If the block starts at the beginning of region,
+ * adjust the size & start. (the region may now be
+ * zero in length)
+ */
+ mp->start += size;
+ mp->size -= size;
+ } else if (e == mp->start + mp->size) {
+ /*
+ * If the block starts at the beginning of region,
+ * adjust only the size.
+ */
+ mp->size -= size;
+ } else {
+ /*
+ * Block is in the middle of the region, so we
+ * have to split it in two.
+ */
+ for (j = avail_cnt-1; j > i + 1; j--) {
+ avail[j] = avail[j-1];
+ }
+ mp[1].start = e;
+ mp[1].size = mp[0].start + mp[0].size - e;
+ mp[0].size = s - mp[0].start;
+ avail_cnt++;
+ }
+ return (void *) s;
+ }
+ panic("pmap_boot_find_memory: not enough memory for "
+ "%lx/%lx allocation?", size, alignment);
+}
+
+/*
* This is not part of the defined PMAP interface and is specific to the
- * PowerPC architecture.
- * This is called during initppc, before the system is really initialized.
+ * PowerPC architecture. This is called during initppc, before the system
+ * is really initialized.
*/
void
-pmap_bootstrap(vaddr_t kernelstart, vaddr_t kernelend)
+pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
{
- struct mem_region *mp, *mp1;
- int cnt, i;
- u_int npgs = 0;
- u_int s, e, sz;
+ struct mem_region *mp, tmp;
+ paddr_t s, e;
+ psize_t size;
+ int i, j;
/*
* Get memory.
@@ -2368,230 +2491,197 @@
}
#endif
- for (mp = mem; mp->size; mp++)
+ /*
+ * Find out how much physical memory we have and in how many chunks.
+ */
+ for (mem_cnt = 0, mp = mem; mp->size; mp++) {
+#ifdef PMAP_MEMLIMIT
+ if (mp->start >= PMAP_MEMLIMIT * 1024*1024)
+ continue;
+ if (mp->start + mp->size > PMAP_MEMLIMIT * 1024*1024) {
+ size = PMAP_MEMLIMIT * 1024*1024 - mp->start;
Home |
Main Index |
Thread Index |
Old Index