Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Add a VM_MDPAGE_MEMBERS macro that defines pmap-specific...
details: https://anonhg.NetBSD.org/src/rev/5911d13440de
branches: trunk
changeset: 509223:5911d13440de
user: thorpej <thorpej%NetBSD.org@localhost>
date: Sun Apr 29 22:44:31 2001 +0000
description:
Add a VM_MDPAGE_MEMBERS macro that defines pmap-specific data for
each vm_page structure. Add a VM_MDPAGE_INIT() macro to init this
data when pages are initialized by UVM. These macros are mandatory,
but ports may #define them to nothing if they are not needed/used.
This deprecates struct pmap_physseg. As a transitional measure,
allow a port to #define PMAP_PHYSSEG so that it can continue to
use it until its pmap is converted to use VM_MDPAGE_MEMBERS.
Use all this stuff to eliminate a lot of extra work in the Alpha
pmap module (it's smaller and faster now). Changes to other pmap
modules will follow.
diffstat:
sys/arch/alpha/alpha/pmap.c | 324 ++++++++++------------------------
sys/arch/alpha/include/pmap.h | 33 +--
sys/arch/alpha/include/vmparam.h | 19 +-
sys/arch/amiga/include/vmparam.h | 6 +-
sys/arch/amigappc/include/vmparam.h | 6 +-
sys/arch/arm26/include/vmparam.h | 12 +-
sys/arch/arm32/include/vmparam.h | 6 +-
sys/arch/atari/include/vmparam.h | 6 +-
sys/arch/bebox/include/vmparam.h | 6 +-
sys/arch/hp300/include/vmparam.h | 6 +-
sys/arch/hpcarm/include/vmparam.h | 6 +-
sys/arch/i386/include/vmparam.h | 6 +-
sys/arch/luna68k/include/vmparam.h | 6 +-
sys/arch/mac68k/include/vmparam.h | 6 +-
sys/arch/macppc/include/vmparam.h | 6 +-
sys/arch/mips/include/vmparam.h | 6 +-
sys/arch/mvme68k/include/vmparam.h | 6 +-
sys/arch/netwinder/include/vmparam.h | 6 +-
sys/arch/news68k/include/vmparam.h | 6 +-
sys/arch/next68k/include/vmparam.h | 7 +-
sys/arch/ofppc/include/vmparam.h | 6 +-
sys/arch/pc532/include/vmparam.h | 6 +-
sys/arch/prep/include/vmparam.h | 6 +-
sys/arch/sandpoint/include/vmparam.h | 6 +-
sys/arch/sh3/include/vmparam.h | 6 +-
sys/arch/sparc/include/vmparam.h | 6 +-
sys/arch/sparc64/include/vmparam.h | 6 +-
sys/arch/sun2/include/vmparam.h | 8 +-
sys/arch/sun3/include/vmparam.h | 8 +-
sys/arch/vax/include/vmparam.h | 7 +-
sys/arch/x68k/include/vmparam.h | 6 +-
sys/uvm/uvm_page.c | 3 +-
sys/uvm/uvm_page.h | 7 +-
33 files changed, 262 insertions(+), 304 deletions(-)
diffs (truncated from 1438 to 300 lines):
diff -r c7fc0b0ff4a4 -r 5911d13440de sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c Sun Apr 29 22:17:24 2001 +0000
+++ b/sys/arch/alpha/alpha/pmap.c Sun Apr 29 22:44:31 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.172 2001/04/29 06:54:04 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.173 2001/04/29 22:44:31 thorpej Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -154,7 +154,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.172 2001/04/29 06:54:04 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.173 2001/04/29 22:44:31 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -250,12 +250,6 @@
*/
u_long pmap_ncpuids;
-/*
- * Storage for physical->virtual entries and page attributes.
- */
-struct pv_head *pv_table;
-int pv_table_npages;
-
#ifndef PMAP_PV_LOWAT
#define PMAP_PV_LOWAT 16
#endif
@@ -277,11 +271,6 @@
struct pool pmap_pv_pool;
/*
- * Canonical names for PGU_* constants.
- */
-const char *pmap_pgu_strings[] = PGU_STRINGS;
-
-/*
* Address Space Numbers.
*
* On many implementations of the Alpha architecture, the TLB entries and
@@ -466,15 +455,6 @@
#define PAGE_IS_MANAGED(pa) (vm_physseg_find(atop(pa), NULL) != -1)
-static __inline struct pv_head *
-pa_to_pvh(paddr_t pa)
-{
- int bank, pg;
-
- bank = vm_physseg_find(atop(pa), &pg);
- return (&vm_physmem[bank].pmseg.pvhead[pg]);
-}
-
/*
* Internal routines
*/
@@ -482,7 +462,7 @@
void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, boolean_t);
boolean_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *,
boolean_t, long);
-void pmap_changebit(paddr_t, pt_entry_t, pt_entry_t, long);
+void pmap_changebit(vm_page_t, pt_entry_t, pt_entry_t, long);
/*
* PT page management functions.
@@ -503,8 +483,8 @@
/*
* PV table management functions.
*/
-int pmap_pv_enter(pmap_t, paddr_t, vaddr_t, pt_entry_t *, boolean_t);
-void pmap_pv_remove(pmap_t, paddr_t, vaddr_t, boolean_t);
+int pmap_pv_enter(pmap_t, vm_page_t, vaddr_t, pt_entry_t *, boolean_t);
+void pmap_pv_remove(pmap_t, vm_page_t, vaddr_t, boolean_t);
void *pmap_pv_page_alloc(u_long, int, int);
void pmap_pv_page_free(void *, u_long, int);
#ifdef DEBUG
@@ -864,27 +844,6 @@
uvm_pageboot_alloc(sizeof(pt_entry_t) * lev3mapsize);
/*
- * Allocate memory for the pv_heads. (A few more of the latter
- * are allocated than are needed.)
- *
- * We could do this in pmap_init when we know the actual
- * managed page pool size, but its better to use kseg0
- * addresses rather than kernel virtual addresses mapped
- * through the TLB.
- */
- pv_table_npages = physmem;
- pv_table = (struct pv_head *)
- uvm_pageboot_alloc(sizeof(struct pv_head) * pv_table_npages);
-
- /*
- * ...and intialize the pv_entry list headers.
- */
- for (i = 0; i < pv_table_npages; i++) {
- LIST_INIT(&pv_table[i].pvh_list);
- simple_lock_init(&pv_table[i].pvh_slock);
- }
-
- /*
* Set up level 1 page table
*/
@@ -1156,9 +1115,6 @@
void
pmap_init(void)
{
- vsize_t s;
- int bank;
- struct pv_head *pvh;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
@@ -1169,17 +1125,6 @@
alpha_protection_init();
/*
- * Memory for the pv heads has already been allocated.
- * Initialize the physical memory segments.
- */
- pvh = pv_table;
- for (bank = 0; bank < vm_nphysseg; bank++) {
- s = vm_physmem[bank].end - vm_physmem[bank].start;
- vm_physmem[bank].pmseg.pvhead = pvh;
- pvh += s;
- }
-
- /*
* Set a low water mark on the pv_entry pool, so that we are
* more likely to have these around even in extreme memory
* starvation.
@@ -1504,11 +1449,9 @@
pmap_page_protect(vm_page_t pg, vm_prot_t prot)
{
pmap_t pmap;
- struct pv_head *pvh;
pv_entry_t pv, nextpv;
boolean_t needkisync = FALSE;
long cpu_id = cpu_number();
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
@@ -1523,11 +1466,10 @@
/* copy_on_write */
case VM_PROT_READ|VM_PROT_EXECUTE:
case VM_PROT_READ:
- pvh = pa_to_pvh(pa);
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pvh->pvh_slock);
-/* XXX */ pmap_changebit(pa, 0, ~(PG_KWE | PG_UWE), cpu_id);
- simple_unlock(&pvh->pvh_slock);
+ simple_lock(&pg->pvh_slock);
+/* XXX */ pmap_changebit(pg, 0, ~(PG_KWE | PG_UWE), cpu_id);
+ simple_unlock(&pg->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return;
/* remove_all */
@@ -1535,10 +1477,9 @@
break;
}
- pvh = pa_to_pvh(pa);
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pvh->pvh_slock);
- for (pv = LIST_FIRST(&pvh->pvh_list); pv != NULL; pv = nextpv) {
+ simple_lock(&pg->pvh_slock);
+ for (pv = LIST_FIRST(&pg->pvh_list); pv != NULL; pv = nextpv) {
nextpv = LIST_NEXT(pv, pv_list);
pmap = pv->pv_pmap;
@@ -1573,7 +1514,7 @@
if (needkisync)
PMAP_SYNC_ISTREAM_KERNEL();
- simple_unlock(&pvh->pvh_slock);
+ simple_unlock(&pg->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
}
@@ -1670,7 +1611,7 @@
int
pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
- boolean_t managed;
+ vm_page_t pg; /* if != NULL, managed page */
pt_entry_t *pte, npte, opte;
paddr_t opa;
boolean_t tflush = TRUE;
@@ -1687,7 +1628,7 @@
printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
pmap, va, pa, prot, flags);
#endif
- managed = PAGE_IS_MANAGED(pa);
+ pg = PHYS_TO_VM_PAGE(pa);
isactive = PMAP_ISACTIVE(pmap, cpu_id);
wired = (flags & PMAP_WIRED) != 0;
@@ -1881,8 +1822,8 @@
/*
* Enter the mapping into the pv_table if appropriate.
*/
- if (managed) {
- error = pmap_pv_enter(pmap, pa, va, pte, TRUE);
+ if (pg != NULL) {
+ error = pmap_pv_enter(pmap, pg, va, pte, TRUE);
if (error) {
pmap_l3pt_delref(pmap, va, pte, cpu_id);
if (flags & PMAP_CANFAIL)
@@ -1904,21 +1845,20 @@
* Build the new PTE.
*/
npte = ((pa >> PGSHIFT) << PG_SHIFT) | pte_prot(pmap, prot) | PG_V;
- if (managed) {
- struct pv_head *pvh = pa_to_pvh(pa);
+ if (pg != NULL) {
int attrs;
#ifdef DIAGNOSTIC
if ((flags & VM_PROT_ALL) & ~prot)
panic("pmap_enter: access type exceeds prot");
#endif
- simple_lock(&pvh->pvh_slock);
+ simple_lock(&pg->pvh_slock);
if (flags & VM_PROT_WRITE)
- pvh->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
+ pg->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
else if (flags & VM_PROT_ALL)
- pvh->pvh_attrs |= PGA_REFERENCED;
- attrs = pvh->pvh_attrs;
- simple_unlock(&pvh->pvh_slock);
+ pg->pvh_attrs |= PGA_REFERENCED;
+ attrs = pg->pvh_attrs;
+ simple_unlock(&pg->pvh_slock);
/*
* Set up referenced/modified emulation for new mapping.
@@ -2464,8 +2404,6 @@
boolean_t
pmap_clear_modify(vm_page_t pg)
{
- struct pv_head *pvh;
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
boolean_t rv = FALSE;
long cpu_id = cpu_number();
@@ -2474,18 +2412,16 @@
printf("pmap_clear_modify(%p)\n", pg);
#endif
- pvh = pa_to_pvh(pa);
-
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pvh->pvh_slock);
-
- if (pvh->pvh_attrs & PGA_MODIFIED) {
+ simple_lock(&pg->pvh_slock);
+
+ if (pg->pvh_attrs & PGA_MODIFIED) {
rv = TRUE;
- pmap_changebit(pa, PG_FOW, ~0, cpu_id);
- pvh->pvh_attrs &= ~PGA_MODIFIED;
+ pmap_changebit(pg, PG_FOW, ~0, cpu_id);
+ pg->pvh_attrs &= ~PGA_MODIFIED;
}
- simple_unlock(&pvh->pvh_slock);
+ simple_unlock(&pg->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return (rv);
@@ -2499,8 +2435,6 @@
boolean_t
pmap_clear_reference(vm_page_t pg)
{
- struct pv_head *pvh;
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
boolean_t rv = FALSE;
long cpu_id = cpu_number();
@@ -2509,18 +2443,16 @@
printf("pmap_clear_reference(%p)\n", pg);
#endif
- pvh = pa_to_pvh(pa);
-
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pvh->pvh_slock);
-
- if (pvh->pvh_attrs & PGA_REFERENCED) {
+ simple_lock(&pg->pvh_slock);
+
+ if (pg->pvh_attrs & PGA_REFERENCED) {
rv = TRUE;
- pmap_changebit(pa, PG_FOR | PG_FOW | PG_FOE, ~0, cpu_id);
- pvh->pvh_attrs &= ~PGA_REFERENCED;
Home |
Main Index |
Thread Index |
Old Index