Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Revert attempt at tracking unmanaged mapp...
details: https://anonhg.NetBSD.org/src/rev/92b201d4104f
branches: trunk
changeset: 356695:92b201d4104f
user: flxd <flxd%NetBSD.org@localhost>
date: Sun Oct 08 12:08:30 2017 +0000
description:
Revert attempt at tracking unmanaged mappings for VIVT as it was incomplete and
buggy. PR port-shark/52102
>From skrll@. Tested by martin@ and me.
diffstat:
sys/arch/arm/arm32/pmap.c | 45 +++++++++++++++++----------------------------
1 files changed, 17 insertions(+), 28 deletions(-)
diffs (141 lines):
diff -r a004e1bfc3b4 -r 92b201d4104f sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sun Oct 08 11:08:55 2017 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sun Oct 08 12:08:30 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.357 2017/09/06 11:51:33 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.358 2017/10/08 12:08:30 flxd Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -217,7 +217,7 @@
#include <arm/locore.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.357 2017/09/06 11:51:33 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.358 2017/10/08 12:08:30 flxd Exp $");
//#define PMAP_DEBUG
#ifdef PMAP_DEBUG
@@ -3586,7 +3586,7 @@
pmap_release_pmap_lock(pm);
}
-#if !defined(ARM_MMU_EXTENDED)
+#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
static struct pv_entry *
pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
{
@@ -3594,9 +3594,7 @@
paddr_t pa = VM_PAGE_TO_PHYS(pg);
struct pv_entry *pv;
-#ifdef PMAP_CACHE_VIPT
KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
-#endif
KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
KASSERT(pmap_page_locked_p(md));
@@ -3614,18 +3612,16 @@
if (SLIST_EMPTY(&md->pvh_list)) {
md->pvh_attrs &= ~PVF_EXEC;
PMAPCOUNT(exec_discarded_kremove);
-#ifdef PMAP_CACHE_VIPT
} else {
pmap_syncicache_page(md, pa);
PMAPCOUNT(exec_synced_kremove);
-#endif
}
}
pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
return pv;
}
-#endif /* !ARM_MMU_EXTENDED */
+#endif /* PMAP_CACHE_VIPT && !ARM_MMU_EXTENDED */
/*
* pmap_kenter_pa: enter an unmanaged, wired kernel mapping
@@ -3637,11 +3633,16 @@
void
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
{
+#ifdef PMAP_CACHE_VIVT
+ struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
+#endif
+#ifdef PMAP_CACHE_VIPT
struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
struct vm_page *opg;
#ifndef ARM_MMU_EXTENDED
struct pv_entry *pv = NULL;
#endif
+#endif
struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
UVMHIST_FUNC(__func__);
@@ -3675,13 +3676,12 @@
l2b->l2b_occupancy += PAGE_SIZE / L2_S_SIZE;
} else {
PMAPCOUNT(kenter_remappings);
+#ifdef PMAP_CACHE_VIPT
opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
+#if !defined(ARM_MMU_EXTENDED) || defined(DIAGNOSTIC)
struct vm_page_md *omd __diagused = VM_PAGE_TO_MD(opg);
- if (opg
-#ifdef PMAP_CACHE_VIPT
- && arm_cache_prefer_mask != 0
-#endif
- && true) {
+#endif
+ if (opg && arm_cache_prefer_mask != 0) {
KASSERT(opg != pg);
KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
KASSERT((flags & PMAP_KMPAGE) == 0);
@@ -3691,6 +3691,7 @@
pmap_release_page_lock(omd);
#endif
}
+#endif
if (l2pte_valid_p(opte)) {
l2pte_reset(ptep);
PTE_SYNC(ptep);
@@ -3749,14 +3750,8 @@
md->pvh_attrs |= PVF_KMPAGE;
#endif
atomic_inc_32(&pmap_kmpages);
- } else if (false
#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
- || arm_cache_prefer_mask != 0
-#elif defined(PMAP_CACHE_VIVT)
- || true
-#endif
- || false) {
-#if !defined(ARM_MMU_EXTENDED)
+ } else if (arm_cache_prefer_mask != 0) {
if (pv == NULL) {
pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
KASSERT(pv != NULL);
@@ -3773,7 +3768,7 @@
pmap_release_page_lock(md);
#endif
}
-#if !defined(ARM_MMU_EXTENDED)
+#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
} else {
if (pv != NULL)
pool_put(&pmap_pv_pool, pv);
@@ -3834,14 +3829,8 @@
}
#endif
atomic_dec_32(&pmap_kmpages);
- } else if (false
#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
- || arm_cache_prefer_mask != 0
-#elif defined(PMAP_CACHE_VIVT)
- || true
-#endif
- || false) {
-#if !defined(ARM_MMU_EXTENDED)
+ } else if (arm_cache_prefer_mask != 0) {
pmap_acquire_page_lock(omd);
pool_put(&pmap_pv_pool,
pmap_kremove_pg(opg, va));
Home |
Main Index |
Thread Index |
Old Index