Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Use VM_PAGE_TO_MD() to locate struct vm_page_md. N...
details: https://anonhg.NetBSD.org/src/rev/af5fa428fef0
branches: trunk
changeset: 758274:af5fa428fef0
user: uebayasi <uebayasi%NetBSD.org@localhost>
date: Sat Oct 30 17:20:43 2010 +0000
description:
Use VM_PAGE_TO_MD() to locate struct vm_page_md. No functional
changes.
diffstat:
sys/arch/hppa/hppa/pmap.c | 123 ++++++++++++++++++++++++++-------------------
sys/arch/ia64/ia64/pmap.c | 46 ++++++++++------
2 files changed, 100 insertions(+), 69 deletions(-)
diffs (truncated from 543 to 300 lines):
diff -r b6bd4e6e35c3 -r af5fa428fef0 sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Sat Oct 30 17:00:54 2010 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Sat Oct 30 17:20:43 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.76 2010/06/21 14:43:34 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.77 2010/10/30 17:20:43 uebayasi Exp $ */
/*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.76 2010/06/21 14:43:34 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77 2010/10/30 17:20:43 uebayasi Exp $");
#include "opt_cputype.h"
@@ -91,6 +91,8 @@
#include <ddb/db_output.h>
#endif
+#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
+
#ifdef PMAPDEBUG
#define static /**/
@@ -497,27 +499,30 @@
pmap_dump_pv(paddr_t pa)
{
struct vm_page *pg;
+ struct vm_page_md *md;
struct pv_entry *pve;
pg = PHYS_TO_VM_PAGE(pa);
- mutex_enter(&pg->mdpage.pvh_lock);
- db_printf("pg %p attr 0x%08x aliases %d\n", pg, pg->mdpage.pvh_attrs,
- pg->mdpage.pvh_aliases);
- for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
+ md = VM_PAGE_TO_MD(pg);
+ mutex_enter(&md->pvh_lock);
+ db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
+ md->pvh_aliases);
+ for (pve = md->pvh_list; pve; pve = pve->pv_next)
db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
pve->pv_va & PV_VAMASK);
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
}
#endif
int
pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry *pve;
int ret = 0;
/* check for non-equ aliased mappings */
- for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
+ for (pve = md->pvh_list; pve; pve = pve->pv_next) {
vaddr_t pva = pve->pv_va & PV_VAMASK;
pte |= pmap_vp_find(pve->pv_pmap, pva);
@@ -566,26 +571,29 @@
pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
vaddr_t va, struct vm_page *pdep, u_int flags)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+
DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
__func__, pg, pve, pm, va, pdep, flags));
- KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+ KASSERT(mutex_owned(&md->pvh_lock));
pve->pv_pmap = pm;
pve->pv_va = va | flags;
pve->pv_ptp = pdep;
- pve->pv_next = pg->mdpage.pvh_list;
- pg->mdpage.pvh_list = pve;
+ pve->pv_next = md->pvh_list;
+ md->pvh_list = pve;
}
static inline struct pv_entry *
pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry **pve, *pv;
- KASSERT(mutex_owned(&pg->mdpage.pvh_lock));
+ KASSERT(mutex_owned(&md->pvh_lock));
- for (pv = *(pve = &pg->mdpage.pvh_list);
+ for (pv = *(pve = &md->pvh_list);
pv; pv = *(pve = &(*pve)->pv_next))
if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
*pve = pv->pv_next;
@@ -1109,7 +1117,8 @@
continue;
sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
- for (haggis = sheep->mdpage.pvh_list; haggis != NULL; )
+ struct vm_page_md * const md = VM_PAGE_TO_MD(sheep);
+ for (haggis = md->pvh_list; haggis != NULL; )
if (haggis->pv_pmap == pmap) {
DPRINTF(PDB_FOLLOW, (" 0x%lx",
@@ -1123,7 +1132,7 @@
* exploit the sacred knowledge of
* lambeous ozzmosis
*/
- haggis = sheep->mdpage.pvh_list;
+ haggis = md->pvh_list;
} else
haggis = haggis->pv_next;
}
@@ -1206,10 +1215,11 @@
}
pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
- mutex_enter(&pg->mdpage.pvh_lock);
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+ mutex_enter(&md->pvh_lock);
pve = pmap_pv_remove(pg, pmap, va);
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
- mutex_exit(&pg->mdpage.pvh_lock);
+ md->pvh_attrs |= pmap_pvh_attrs(pte);
+ mutex_exit(&md->pvh_lock);
} else {
DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
__func__, va, pa));
@@ -1223,21 +1233,22 @@
}
if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
if (!pve && !(pve = pmap_pv_alloc())) {
if (flags & PMAP_CANFAIL) {
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
PMAP_UNLOCK(pmap);
return (ENOMEM);
}
panic("%s: no pv entries available", __func__);
}
pte |= PTE_PROT(pmap_prot(pmap, prot));
- mutex_enter(&pg->mdpage.pvh_lock);
+ mutex_enter(&md->pvh_lock);
if (pmap_check_alias(pg, va, pte))
pmap_page_remove_locked(pg);
pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
} else if (pve) {
pmap_pv_free(pve);
}
@@ -1304,13 +1315,14 @@
if (pmap_initialized &&
(pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
- mutex_enter(&pg->mdpage.pvh_lock);
+ mutex_enter(&md->pvh_lock);
pve = pmap_pv_remove(pg, pmap, sva);
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+ md->pvh_attrs |= pmap_pvh_attrs(pte);
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
if (pve != NULL)
pmap_pv_free(pve);
@@ -1360,9 +1372,10 @@
continue;
pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
- mutex_enter(&pg->mdpage.pvh_lock);
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
- mutex_exit(&pg->mdpage.pvh_lock);
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+ mutex_enter(&md->pvh_lock);
+ md->pvh_attrs |= pmap_pvh_attrs(pte);
+ mutex_exit(&md->pvh_lock);
pmap_pte_flush(pmap, sva, pte);
pte &= ~PTE_PROT(TLB_AR_MASK);
@@ -1377,24 +1390,26 @@
void
pmap_page_remove(struct vm_page *pg)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
- mutex_enter(&pg->mdpage.pvh_lock);
+ mutex_enter(&md->pvh_lock);
pmap_page_remove_locked(pg);
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
}
void
pmap_page_remove_locked(struct vm_page *pg)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry *pve, *npve, **pvp;
DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
- if (pg->mdpage.pvh_list == NULL)
+ if (md->pvh_list == NULL)
return;
- pvp = &pg->mdpage.pvh_list;
- for (pve = pg->mdpage.pvh_list; pve; pve = npve) {
+ pvp = &md->pvh_list;
+ for (pve = md->pvh_list; pve; pve = npve) {
pmap_t pmap = pve->pv_pmap;
vaddr_t va = pve->pv_va & PV_VAMASK;
volatile pt_entry_t *pde;
@@ -1414,7 +1429,7 @@
*pvp = pve;
pvp = &pve->pv_next;
} else
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+ md->pvh_attrs |= pmap_pvh_attrs(pte);
pmap_pte_flush(pmap, va, pte);
if (pte & PTE_PROT(TLB_WIRED))
@@ -1470,6 +1485,7 @@
bool
pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry *pve;
int res;
@@ -1479,13 +1495,13 @@
KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
- mutex_enter(&pg->mdpage.pvh_lock);
+ mutex_enter(&md->pvh_lock);
/* preserve other bits */
- res = pg->mdpage.pvh_attrs & (set | clear);
- pg->mdpage.pvh_attrs ^= res;
+ res = md->pvh_attrs & (set | clear);
+ md->pvh_attrs ^= res;
- for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
+ for (pve = md->pvh_list; pve; pve = pve->pv_next) {
pmap_t pmap = pve->pv_pmap;
vaddr_t va = pve->pv_va & PV_VAMASK;
volatile pt_entry_t *pde;
@@ -1505,7 +1521,7 @@
pte |= set;
if (!(pve->pv_va & PV_KENTER)) {
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
+ md->pvh_attrs |= pmap_pvh_attrs(pte);
res |= pmap_pvh_attrs(opte);
}
@@ -1515,7 +1531,7 @@
}
}
}
- mutex_exit(&pg->mdpage.pvh_lock);
+ mutex_exit(&md->pvh_lock);
return ((res & (clear | set)) != 0);
}
@@ -1523,15 +1539,16 @@
bool
pmap_testbit(struct vm_page *pg, u_int bit)
{
+ struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
struct pv_entry *pve;
pt_entry_t pte;
int ret;
DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit));
- mutex_enter(&pg->mdpage.pvh_lock);
+ mutex_enter(&md->pvh_lock);
- for (pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve;
+ for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
pve = pve->pv_next) {
pmap_t pm = pve->pv_pmap;
@@ -1539,10 +1556,10 @@
if (pve->pv_va & PV_KENTER)
continue;
- pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte);
Home |
Main Index |
Thread Index |
Old Index