Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/uebayasi-xip]: src/sys/arch/arm/arm32 Replace all remaining pg->mdpage r...
details: https://anonhg.NetBSD.org/src/rev/32265a58d353
branches: uebayasi-xip
changeset: 751562:32265a58d353
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Wed Feb 10 15:37:48 2010 +0000
description:
Replace all remaining pg->mdpage references with VM_PAGE_TO_MD(). Now struct
vm_page * is fully opaque.
diffstat:
sys/arch/arm/arm32/pmap.c | 362 ++++++++++++++++++++++++---------------------
1 files changed, 194 insertions(+), 168 deletions(-)
diffs (truncated from 880 to 300 lines):
diff -r e4a47f69bd98 -r 32265a58d353 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed Feb 10 14:20:23 2010 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed Feb 10 15:37:48 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.211.2.5 2010/02/10 14:18:30 uebayasi Exp $ */
+/* $NetBSD: pmap.c,v 1.211.2.6 2010/02/10 15:37:48 uebayasi Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.5 2010/02/10 14:18:30 uebayasi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.211.2.6 2010/02/10 15:37:48 uebayasi Exp $");
#ifdef PMAP_DEBUG
@@ -2122,6 +2122,7 @@
static void
pmap_clearbit(struct vm_page *pg, u_int maskbits)
{
+ struct vm_page_md *md = VM_PAGE_TO_MD(pg);
struct l2_bucket *l2b;
struct pv_entry *pv;
pt_entry_t *ptep, npte, opte;
@@ -2129,7 +2130,7 @@
vaddr_t va;
u_int oflags;
#ifdef PMAP_CACHE_VIPT
- const bool want_syncicache = PV_IS_EXEC_P(pg->mdpage.pvh_attrs);
+ const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
bool need_syncicache = false;
bool did_syncicache = false;
bool need_vac_me_harder = false;
@@ -2140,7 +2141,7 @@
pg, VM_PAGE_TO_PHYS(pg), maskbits));
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pg->mdpage.pvh_slock);
+ simple_lock(&md->pvh_slock);
#ifdef PMAP_CACHE_VIPT
/*
@@ -2148,25 +2149,25 @@
* then we know we definitely need to sync or discard it.
*/
if (want_syncicache)
- need_syncicache = pg->mdpage.pvh_attrs & PVF_MOD;
+ need_syncicache = md->pvh_attrs & PVF_MOD;
#endif
/*
* Clear saved attributes (modify, reference)
*/
- pg->mdpage.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
-
- if (SLIST_EMPTY(&pg->mdpage.pvh_list)) {
+ md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
+
+ if (SLIST_EMPTY(&md->pvh_list)) {
#ifdef PMAP_CACHE_VIPT
if (need_syncicache) {
/*
* No one has it mapped, so just discard it. The next
* exec remapping will cause it to be synced.
*/
- pg->mdpage.pvh_attrs &= ~PVF_EXEC;
+ md->pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_clearbit);
}
#endif
- simple_unlock(&pg->mdpage.pvh_slock);
+ simple_unlock(&md->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return;
}
@@ -2174,7 +2175,7 @@
/*
* Loop over all current mappings setting/clearing as appropos
*/
- SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) {
+ SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
va = pv->pv_va;
pm = pv->pv_pmap;
oflags = pv->pv_flags;
@@ -2244,15 +2245,15 @@
* Keep alias accounting up to date
*/
if (pv->pv_pmap == pmap_kernel()) {
- pg->mdpage.krw_mappings--;
- pg->mdpage.kro_mappings++;
+ md->krw_mappings--;
+ md->kro_mappings++;
} else {
- pg->mdpage.urw_mappings--;
- pg->mdpage.uro_mappings++;
+ md->urw_mappings--;
+ md->uro_mappings++;
}
#ifdef PMAP_CACHE_VIPT
- if (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0)
- pg->mdpage.pvh_attrs &= ~PVF_WRITE;
+ if (md->urw_mappings + md->krw_mappings == 0)
+ md->pvh_attrs &= ~PVF_WRITE;
if (want_syncicache)
need_syncicache = true;
need_vac_me_harder = true;
@@ -2315,7 +2316,7 @@
* If we need to sync the I-cache and we haven't done it yet, do it.
*/
if (need_syncicache && !did_syncicache) {
- pmap_syncicache_page(&pg->mdpage, VM_PAGE_TO_PHYS(pg));
+ pmap_syncicache_page(md, VM_PAGE_TO_PHYS(pg));
PMAPCOUNT(exec_synced_clearbit);
}
/*
@@ -2324,12 +2325,12 @@
* this as a page deletion.
*/
if (need_vac_me_harder) {
- if (pg->mdpage.pvh_attrs & PVF_NC)
- pmap_vac_me_harder(&pg->mdpage, VM_PAGE_TO_PHYS(pg), NULL, 0);
+ if (md->pvh_attrs & PVF_NC)
+ pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), NULL, 0);
}
#endif
- simple_unlock(&pg->mdpage.pvh_slock);
+ simple_unlock(&md->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
}
@@ -2567,6 +2568,7 @@
static void
pmap_page_remove(struct vm_page *pg)
{
+ struct vm_page_md *md = VM_PAGE_TO_MD(pg);
struct l2_bucket *l2b;
struct pv_entry *pv, *npv, **pvp;
pmap_t pm;
@@ -2579,35 +2581,35 @@
VM_PAGE_TO_PHYS(pg)));
PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&pg->mdpage.pvh_slock);
-
- pv = SLIST_FIRST(&pg->mdpage.pvh_list);
+ simple_lock(&md->pvh_slock);
+
+ pv = SLIST_FIRST(&md->pvh_list);
if (pv == NULL) {
#ifdef PMAP_CACHE_VIPT
/*
* We *know* the page contents are about to be replaced.
* Discard the exec contents
*/
- if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
+ if (PV_IS_EXEC_P(md->pvh_attrs))
PMAPCOUNT(exec_discarded_page_protect);
- pg->mdpage.pvh_attrs &= ~PVF_EXEC;
- KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+ md->pvh_attrs &= ~PVF_EXEC;
+ KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
#endif
- simple_unlock(&pg->mdpage.pvh_slock);
+ simple_unlock(&md->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
return;
}
#ifdef PMAP_CACHE_VIPT
- KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(&pg->mdpage));
+ KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
#endif
/*
* Clear alias counts
*/
#ifdef PMAP_CACHE_VIVT
- pg->mdpage.k_mappings = 0;
+ md->k_mappings = 0;
#endif
- pg->mdpage.urw_mappings = pg->mdpage.uro_mappings = 0;
+ md->urw_mappings = md->uro_mappings = 0;
flush = false;
flags = 0;
@@ -2616,7 +2618,7 @@
pmap_clean_page(pv, false);
#endif
- pvp = &SLIST_FIRST(&pg->mdpage.pvh_list);
+ pvp = &SLIST_FIRST(&md->pvh_list);
while (pv) {
pm = pv->pv_pmap;
npv = SLIST_NEXT(pv, pv_link);
@@ -2637,9 +2639,9 @@
continue;
}
if (pv->pv_flags & PVF_WRITE)
- pg->mdpage.krw_mappings--;
+ md->krw_mappings--;
else
- pg->mdpage.kro_mappings--;
+ md->kro_mappings--;
#endif
PMAPCOUNT(kernel_unmappings);
}
@@ -2678,8 +2680,8 @@
*/
if (pv == NULL) {
*pvp = NULL;
- if (!SLIST_EMPTY(&pg->mdpage.pvh_list))
- pmap_vac_me_harder(&pg->mdpage, VM_PAGE_TO_PHYS(pg), pm, 0);
+ if (!SLIST_EMPTY(&md->pvh_list))
+ pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, 0);
}
pmap_release_pmap_lock(pm);
}
@@ -2687,16 +2689,16 @@
/*
* Its EXEC cache is now gone.
*/
- if (PV_IS_EXEC_P(pg->mdpage.pvh_attrs))
+ if (PV_IS_EXEC_P(md->pvh_attrs))
PMAPCOUNT(exec_discarded_page_protect);
- pg->mdpage.pvh_attrs &= ~PVF_EXEC;
- KASSERT(pg->mdpage.urw_mappings == 0);
- KASSERT(pg->mdpage.uro_mappings == 0);
- if (pg->mdpage.krw_mappings == 0)
- pg->mdpage.pvh_attrs &= ~PVF_WRITE;
- KASSERT((pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE));
+ md->pvh_attrs &= ~PVF_EXEC;
+ KASSERT(md->urw_mappings == 0);
+ KASSERT(md->uro_mappings == 0);
+ if (md->krw_mappings == 0)
+ md->pvh_attrs &= ~PVF_WRITE;
+ KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
#endif
- simple_unlock(&pg->mdpage.pvh_slock);
+ simple_unlock(&md->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
if (flush) {
@@ -2940,7 +2942,7 @@
return (ENOMEM);
}
- pmap_enter_pv(&pg->mdpage, VM_PAGE_TO_PHYS(pg), pv, pm, va, nflags);
+ pmap_enter_pv(md, VM_PAGE_TO_PHYS(pg), pv, pm, va, nflags);
}
} else {
/*
@@ -2963,10 +2965,11 @@
* Looks like there's an existing 'managed' mapping
* at this address.
*/
- simple_lock(&opg->mdpage.pvh_slock);
- pv = pmap_remove_pv(&opg->mdpage, VM_PAGE_TO_PHYS(opg), pm, va);
- pmap_vac_me_harder(&opg->mdpage, VM_PAGE_TO_PHYS(opg), pm, 0);
- simple_unlock(&opg->mdpage.pvh_slock);
+ struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
+ simple_lock(&omd->pvh_slock);
+ pv = pmap_remove_pv(omd, VM_PAGE_TO_PHYS(opg), pm, va);
+ pmap_vac_me_harder(omd, VM_PAGE_TO_PHYS(opg), pm, 0);
+ simple_unlock(&omd->pvh_slock);
oflags = pv->pv_flags;
#ifdef PMAP_CACHE_VIVT
@@ -3045,17 +3048,19 @@
is_cached, pm->pm_cstate.cs_all));
if (pg != NULL) {
- simple_lock(&pg->mdpage.pvh_slock);
- pmap_vac_me_harder(&pg->mdpage, VM_PAGE_TO_PHYS(pg), pm, va);
- simple_unlock(&pg->mdpage.pvh_slock);
+ struct vm_page_md *md = VM_PAGE_TO_MD(pg);
+ simple_lock(&md->pvh_slock);
+ pmap_vac_me_harder(md, VM_PAGE_TO_PHYS(pg), pm, va);
+ simple_unlock(&md->pvh_slock);
}
}
#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
if (pg) {
- simple_lock(&pg->mdpage.pvh_slock);
- KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC)));
- KASSERT(((pg->mdpage.pvh_attrs & PVF_WRITE) == 0) == (pg->mdpage.urw_mappings + pg->mdpage.krw_mappings == 0));
- simple_unlock(&pg->mdpage.pvh_slock);
+ struct vm_page_md *md = VM_PAGE_TO_MD(pg);
+ simple_lock(&md->pvh_slock);
+ KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
+ KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
+ simple_unlock(&md->pvh_slock);
}
#endif
@@ -3156,11 +3161,12 @@
* number of sequential pages in one go.
*/
if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
+ struct vm_page_md *md = VM_PAGE_TO_MD(pg);
Home |
Main Index |
Thread Index |
Old Index