Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/ia64/ia64 remove pmap_track_modified() since it ref...
details: https://anonhg.NetBSD.org/src/rev/00d93d60ff64
branches: trunk
changeset: 782532:00d93d60ff64
user: chs <chs%NetBSD.org@localhost>
date: Mon Nov 05 15:11:36 2012 +0000
description:
remove pmap_track_modified() since it references kmem globals
that no longer exist. this check was a hold-over from freebsd,
kmem pages on netbsd are not managed and thus mod/ref state is
anyway not tracked. also remove commented-out assertions about
pageq locking, this was another freebsd hold-over since pmaps
do not use the pageq lock on netbsd.
diffstat:
sys/arch/ia64/ia64/pmap.c | 40 ++++++----------------------------------
1 files changed, 6 insertions(+), 34 deletions(-)
diffs (145 lines):
diff -r 888af02a5bf1 -r 00d93d60ff64 sys/arch/ia64/ia64/pmap.c
--- a/sys/arch/ia64/ia64/pmap.c Mon Nov 05 08:07:11 2012 +0000
+++ b/sys/arch/ia64/ia64/pmap.c Mon Nov 05 15:11:36 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.28 2012/08/31 14:31:46 chs Exp $ */
+/* $NetBSD: pmap.c,v 1.29 2012/11/05 15:11:36 chs Exp $ */
/*-
@@ -85,7 +85,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.28 2012/08/31 14:31:46 chs Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.29 2012/11/05 15:11:36 chs Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -273,9 +273,6 @@
static void
pmap_insert_entry(pmap_t pmap, vaddr_t va, struct vm_page *pg);
-static __inline int
-pmap_track_modified(vaddr_t va);
-
static void
pmap_enter_vhpt(struct ia64_lpte *, vaddr_t);
static int pmap_remove_vhpt(vaddr_t);
@@ -1162,7 +1159,6 @@
if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
panic("pmap_protect: unaligned addresses");
- //uvm_lock_pageq();
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
while (sva < eva) {
@@ -1179,7 +1175,8 @@
if (pmap_managed(pte)) {
pa = pmap_ppn(pte);
pg = PHYS_TO_VM_PAGE(pa);
- if (pmap_dirty(pte)) pmap_clear_dirty(pte);
+ if (pmap_dirty(pte))
+ pmap_clear_dirty(pte);
if (pmap_accessed(pte)) {
pmap_clear_accessed(pte);
}
@@ -1190,7 +1187,6 @@
sva += PAGE_SIZE;
}
- //uvm_unlock_pageq();
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
}
@@ -1286,8 +1282,6 @@
PMAP_UNLOCK(pmap);
}
- //UVM_LOCK_ASSERT_PAGEQ();
-
pg->flags |= PG_RDONLY;
} else {
pmap_page_purge(pg);
@@ -1425,7 +1419,7 @@
* We might be turning off write access to the page,
* so we go ahead and sense modify status.
*/
- if (managed && pmap_dirty(&origpte) && pmap_track_modified(va))
+ if (managed && pmap_dirty(&origpte))
pg->flags &= ~PG_CLEAN;
pmap_invalidate_page(pmap, va);
@@ -1494,8 +1488,6 @@
pmap_t oldpmap;
pv_entry_t pv;
- //UVM_LOCK_ASSERT_PAGEQ();
-
while ((pv = TAILQ_FIRST(&md->pv_list)) != NULL) {
struct ia64_lpte *pte;
pmap_t pmap = pv->pv_pmap;
@@ -1512,7 +1504,6 @@
PMAP_UNLOCK(pmap);
}
- //UVM_LOCK_ASSERT_PAGEQ();
pg->flags |= PG_RDONLY;
}
@@ -1766,8 +1757,7 @@
if (pmap_managed(pte)) {
pg = PHYS_TO_VM_PAGE(pmap_ppn(pte));
if (pmap_dirty(pte))
- if (pmap_track_modified(va))
- pg->flags &= ~(PG_CLEAN);
+ pg->flags &= ~(PG_CLEAN);
if (pmap_accessed(pte))
pg->flags &= ~PG_CLEAN; /* XXX: Do we need this ? */
@@ -1801,21 +1791,6 @@
}
-/*
- * this routine defines the region(s) of memory that should
- * not be tested for the modified bit.
- */
-static __inline int
-pmap_track_modified(vaddr_t va)
-{
- extern char *kmembase, kmemlimit;
- if ((va < (vaddr_t) kmembase) || (va >= (vaddr_t) kmemlimit))
- return 1;
- else
- return 0;
-}
-
-
/***************************************************
* page management routines.
***************************************************/
@@ -1837,7 +1812,6 @@
pv_entry_t allocated_pv;
//LOCK_ASSERT(simple_lock_held(locked_pmap->slock));
- //UVM_LOCK_ASSERT_PAGEQ();
allocated_pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
return allocated_pv;
@@ -1985,7 +1959,6 @@
TAILQ_REMOVE(&md->pv_list, pv, pv_list);
md->pv_list_count--;
if (TAILQ_FIRST(&md->pv_list) == NULL) {
- //UVM_LOCK_ASSERT_PAGEQ();
pg->flags |= PG_RDONLY;
}
@@ -2013,7 +1986,6 @@
pv->pv_va = va;
//LOCK_ASSERT(simple_lock_held(pmap->slock));
- //UVM_LOCK_ASSERT_PAGEQ();
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&md->pv_list, pv, pv_list);
md->pv_list_count++;
Home |
Main Index |
Thread Index |
Old Index