Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm Use vm_page_md rather than pmap_physseg. Saves...
details: https://anonhg.NetBSD.org/src/rev/9dd9c8b7eea2
branches: trunk
changeset: 521789:9dd9c8b7eea2
user: thorpej <thorpej%NetBSD.org@localhost>
date: Tue Feb 05 21:14:36 2002 +0000
description:
Use vm_page_md rather than pmap_physseg. Saves lots of cycles in
common operations.
diffstat:
sys/arch/arm/arm32/pmap.c | 448 +++++++++++++---------------------
sys/arch/arm/include/arm32/pmap.h | 21 +-
sys/arch/arm/include/arm32/vmparam.h | 29 +-
3 files changed, 194 insertions(+), 304 deletions(-)
diffs (truncated from 1140 to 300 lines):
diff -r 37e3b19ff861 -r 9dd9c8b7eea2 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Tue Feb 05 21:00:09 2002 +0000
+++ b/sys/arch/arm/arm32/pmap.c Tue Feb 05 21:14:36 2002 +0000
@@ -1,6 +1,7 @@
-/* $NetBSD: pmap.c,v 1.36 2002/01/25 19:19:25 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.37 2002/02/05 21:14:36 thorpej Exp $ */
/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
* Copyright (c) 2001 Richard Earnshaw
* Copyright (c) 2001 Christopher Gilbert
* All rights reserved.
@@ -142,7 +143,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.36 2002/01/25 19:19:25 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.37 2002/02/05 21:14:36 thorpej Exp $");
#ifdef PMAP_DEBUG
#define PDEBUG(_lev_,_stat_) \
if (pmap_debug_level >= (_lev_)) \
@@ -241,7 +242,7 @@
#define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
#define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
-static void pmap_enter_pv __P((struct pv_head *,
+static void pmap_enter_pv __P((struct vm_page *,
struct pv_entry *, struct pmap *,
vaddr_t, struct vm_page *, int));
static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
@@ -249,27 +250,26 @@
static void pmap_free_pv_doit __P((struct pv_entry *));
static void pmap_free_pvpage __P((void));
static boolean_t pmap_is_curpmap __P((struct pmap *));
-static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
+static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
vaddr_t));
#define PMAP_REMOVE_ALL 0 /* remove all mappings */
#define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
-static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct pv_head *,
+static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
u_int, u_int));
static void pmap_free_l1pt __P((struct l1pt *));
static int pmap_allocpagedir __P((struct pmap *));
static int pmap_clean_page __P((struct pv_entry *, boolean_t));
-static struct pv_head *pmap_find_pvh __P((paddr_t));
-static void pmap_remove_all __P((paddr_t));
+static void pmap_remove_all __P((struct vm_page *));
vsize_t npages;
static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
-__inline static void pmap_clearbit __P((paddr_t, unsigned int));
-__inline static boolean_t pmap_testbit __P((paddr_t, unsigned int));
+__inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
+__inline static boolean_t pmap_testbit __P((struct vm_page *, unsigned int));
extern paddr_t physical_start;
extern paddr_t physical_freestart;
@@ -303,7 +303,7 @@
/* Local function prototypes (not used outside this file) */
pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
-void pmap_copy_on_write __P((paddr_t pa));
+void pmap_copy_on_write __P((struct vm_page *));
void pmap_pinit __P((struct pmap *));
void pmap_freepagedir __P((struct pmap *));
@@ -318,11 +318,11 @@
static pt_entry_t *pmap_map_ptes __P((struct pmap *));
static void pmap_unmap_ptes __P((struct pmap *));
-__inline static void pmap_vac_me_harder __P((struct pmap *, struct pv_head *,
+__inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
pt_entry_t *, boolean_t));
-static void pmap_vac_me_kpmap __P((struct pmap *, struct pv_head *,
+static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
pt_entry_t *, boolean_t));
-static void pmap_vac_me_user __P((struct pmap *, struct pv_head *,
+static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
pt_entry_t *, boolean_t));
/*
@@ -799,26 +799,26 @@
/*
* main pv_entry manipulation functions:
- * pmap_enter_pv: enter a mapping onto a pv_head list
- * pmap_remove_pv: remove a mappiing from a pv_head list
+ * pmap_enter_pv: enter a mapping onto a vm_page list
+ * pmap_remove_pv: remove a mappiing from a vm_page list
*
* NOTE: pmap_enter_pv expects to lock the pvh itself
* pmap_remove_pv expects te caller to lock the pvh before calling
*/
/*
- * pmap_enter_pv: enter a mapping onto a pv_head lst
+ * pmap_enter_pv: enter a mapping onto a vm_page lst
*
* => caller should hold the proper lock on pmap_main_lock
* => caller should have pmap locked
- * => we will gain the lock on the pv_head and allocate the new pv_entry
+ * => we will gain the lock on the vm_page and allocate the new pv_entry
* => caller should adjust ptp's wire_count before calling
* => caller should not adjust pmap's wire_count
*/
__inline static void
-pmap_enter_pv(pvh, pve, pmap, va, ptp, flags)
- struct pv_head *pvh;
+pmap_enter_pv(pg, pve, pmap, va, ptp, flags)
+ struct vm_page *pg;
struct pv_entry *pve; /* preallocated pve for us to use */
struct pmap *pmap;
vaddr_t va;
@@ -829,10 +829,10 @@
pve->pv_va = va;
pve->pv_ptp = ptp; /* NULL for kernel pmap */
pve->pv_flags = flags;
- simple_lock(&pvh->pvh_lock); /* lock pv_head */
- pve->pv_next = pvh->pvh_list; /* add to ... */
- pvh->pvh_list = pve; /* ... locked list */
- simple_unlock(&pvh->pvh_lock); /* unlock, done! */
+ simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
+ pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
+ pg->mdpage.pvh_list = pve; /* ... locked list */
+ simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
if (pve->pv_flags & PT_W)
++pmap->pm_stats.wired_count;
}
@@ -842,21 +842,21 @@
*
* => caller should hold proper lock on pmap_main_lock
* => pmap should be locked
- * => caller should hold lock on pv_head [so that attrs can be adjusted]
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
* => caller should adjust ptp's wire_count and free PTP if needed
* => caller should NOT adjust pmap's wire_count
* => we return the removed pve
*/
__inline static struct pv_entry *
-pmap_remove_pv(pvh, pmap, va)
- struct pv_head *pvh;
+pmap_remove_pv(pg, pmap, va)
+ struct vm_page *pg;
struct pmap *pmap;
vaddr_t va;
{
struct pv_entry *pve, **prevptr;
- prevptr = &pvh->pvh_list; /* previous pv_entry pointer */
+ prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
pve = *prevptr;
while (pve) {
if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
@@ -875,7 +875,7 @@
*
* pmap_modify_pv: Update pv flags
*
- * => caller should hold lock on pv_head [so that attrs can be adjusted]
+ * => caller should hold lock on vm_page [so that attrs can be adjusted]
* => caller should NOT adjust pmap's wire_count
* => caller must call pmap_vac_me_harder() if writable status of a page
* may have changed.
@@ -886,10 +886,10 @@
/*__inline */
static u_int
-pmap_modify_pv(pmap, va, pvh, bic_mask, eor_mask)
+pmap_modify_pv(pmap, va, pg, bic_mask, eor_mask)
struct pmap *pmap;
vaddr_t va;
- struct pv_head *pvh;
+ struct vm_page *pg;
u_int bic_mask;
u_int eor_mask;
{
@@ -900,7 +900,7 @@
* There is at least one VA mapping this page.
*/
- for (npv = pvh->pvh_list; npv; npv = npv->pv_next) {
+ for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
if (pmap == npv->pv_pmap && va == npv->pv_va) {
oflags = npv->pv_flags;
npv->pv_flags = flags =
@@ -1146,26 +1146,6 @@
TAILQ_INIT(&pv_unusedpgs);
/*
- * compute the number of pages we have and then allocate RAM
- * for each pages' pv_head and saved attributes.
- */
- {
- int npages, lcv;
- vsize_t s;
-
- npages = 0;
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
- npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
- s = (vsize_t) (sizeof(struct pv_head) * npages +
- sizeof(char) * npages);
- s = round_page(s); /* round up */
- boot_head = (char *)uvm_pageboot_alloc(s);
- bzero((char *)boot_head, s);
- if (boot_head == 0)
- panic("pmap_init: unable to allocate pv_heads");
- }
-
- /*
* initialize the pmap pool.
*/
@@ -1188,11 +1168,6 @@
void
pmap_init()
{
- int lcv, i;
-
-#ifdef MYCROFT_HACK
- printf("physmem = %d\n", physmem);
-#endif
/*
* Set the available memory vars - These do not map to real memory
@@ -1204,25 +1179,6 @@
avail_start = 0;
avail_end = physmem * NBPG;
- /* allocate pv_head stuff first */
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- vm_physmem[lcv].pmseg.pvhead = (struct pv_head *)boot_head;
- boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.pvhead +
- (vm_physmem[lcv].end - vm_physmem[lcv].start));
- for (i = 0;
- i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {
- simple_lock_init(
- &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);
- }
- }
-
- /* now allocate attrs */
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- vm_physmem[lcv].pmseg.attrs = (char *) boot_head;
- boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.attrs +
- (vm_physmem[lcv].end - vm_physmem[lcv].start));
- }
-
/*
* now we need to free enough pv_entry structures to allow us to get
* the kmem_map/kmem_object allocated and inited (done after this
@@ -1238,14 +1194,6 @@
pv_nfpvents = 0;
(void) pmap_add_pvpage(pv_initpage, FALSE);
-#ifdef MYCROFT_HACK
- for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
- printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
- lcv,
- vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
- vm_physmem[lcv].start, vm_physmem[lcv].end);
- }
-#endif
pmap_initialized = TRUE;
/* Initialise our L1 page table queues and counters */
@@ -1836,25 +1784,6 @@
}
/*
- * pmap_find_pv()
- *
- * This is a local function that finds a PV head for a given physical page.
- * This is a common op, and this function removes loads of ifdefs in the code.
- */
-static __inline struct pv_head *
-pmap_find_pvh(phys)
- paddr_t phys;
-{
- int bank, off;
- struct pv_head *pvh;
-
- if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
- panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
- pvh = &vm_physmem[bank].pmseg.pvhead[off];
- return (pvh);
-}
-
-/*
* pmap_zero_page()
*
* Zero a given physical page by mapping it at a page hook point.
@@ -1866,13 +1795,13 @@
pmap_zero_page(phys)
Home |
Main Index |
Thread Index |
Old Index