Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/mips Deal with incompatible cache aliases. Specific...
details: https://anonhg.NetBSD.org/src/rev/c451b7f3e282
branches: trunk
changeset: 329147:c451b7f3e282
user: skrll <skrll%NetBSD.org@localhost>
date: Sun May 11 07:53:28 2014 +0000
description:
Deal with incompatible cache aliases. Specifically,
- always flush an ephemeral page on unmap
- track unmanaged mappings (mappings entered via pmap_kenter_pa) for
aliases where required and handle appropriately (via pmap_enter_pv)
Hopefully this (finally) addresses the instability reported in the
following PRs:
PR/44900 - R5000/Rm5200 mips ports are broken
PR/46890 - upcoming NetBSD 6.0 release is very unstable / unusable on cobalt qube 2
PR/48628 - cobalt and hpcmips ports are dead
diffstat:
sys/arch/mips/include/pmap.h | 4 +-
sys/arch/mips/mips/pmap.c | 141 +++++++++++++++++++++-----------------
sys/arch/mips/mips/pmap_segtab.c | 17 +---
3 files changed, 83 insertions(+), 79 deletions(-)
diffs (truncated from 441 to 300 lines):
diff -r 78f595d7a5e2 -r c451b7f3e282 sys/arch/mips/include/pmap.h
--- a/sys/arch/mips/include/pmap.h Sun May 11 02:07:35 2014 +0000
+++ b/sys/arch/mips/include/pmap.h Sun May 11 07:53:28 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.62 2012/07/05 17:21:02 matt Exp $ */
+/* $NetBSD: pmap.h,v 1.63 2014/05/11 07:53:28 skrll Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -283,6 +283,7 @@
#endif /* MIPS3_PLUS */
#define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
+#define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
/*
* Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
@@ -329,6 +330,7 @@
struct pv_entry *pv_next; /* next pv_entry */
struct pmap *pv_pmap; /* pmap where mapping lies */
vaddr_t pv_va; /* virtual address for mapping */
+#define PV_KENTER 0x001
} *pv_entry_t;
#define PG_MD_UNCACHED 0x0001 /* page is mapped uncached */
diff -r 78f595d7a5e2 -r c451b7f3e282 sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Sun May 11 02:07:35 2014 +0000
+++ b/sys/arch/mips/mips/pmap.c Sun May 11 07:53:28 2014 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.213 2014/05/03 12:50:01 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.214 2014/05/11 07:53:28 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.213 2014/05/03 12:50:01 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.214 2014/05/11 07:53:28 skrll Exp $");
/*
* Manages physical address maps.
@@ -317,7 +317,7 @@
/* Forward function declarations */
void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
-void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
+void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *, int);
pt_entry_t *pmap_pte(pmap_t, vaddr_t);
/*
@@ -386,13 +386,13 @@
}
PG_MD_PVLIST_UNLOCK(md);
kpreempt_disable();
- pmap_tlb_syncicache(md->pvh_first.pv_va, onproc);
+ pmap_tlb_syncicache(trunc_page(md->pvh_first.pv_va), onproc);
kpreempt_enable();
#else
if (MIPS_HAS_R4K_MMU) {
if (PG_MD_CACHED_P(md)) {
mips_icache_sync_range_index(
- md->pvh_first.pv_va, PAGE_SIZE);
+ trunc_page(md->pvh_first.pv_va), PAGE_SIZE);
}
} else {
mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
@@ -436,10 +436,10 @@
*/
(void)PG_MD_PVLIST_LOCK(md, false);
if (PG_MD_CACHED_P(md)
- && mips_cache_badalias(pv->pv_va, va))
- mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
- if (pv->pv_pmap == NULL)
- pv->pv_va = va;
+ && mips_cache_badalias(pv->pv_va, va)) {
+ mips_dcache_wbinv_range_index(trunc_page(pv->pv_va),
+ PAGE_SIZE);
+ }
PG_MD_PVLIST_UNLOCK(md);
}
@@ -450,23 +450,13 @@
pmap_unmap_ephemeral_page(struct vm_page *pg, vaddr_t va,
pt_entry_t old_pt_entry)
{
- struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
- pv_entry_t pv = &md->pvh_first;
-
+
if (MIPS_CACHE_VIRTUAL_ALIAS) {
- (void)PG_MD_PVLIST_LOCK(md, false);
- if (PG_MD_CACHED_P(md)
- || (pv->pv_pmap != NULL
- && mips_cache_badalias(pv->pv_va, va))) {
-
- /*
- * If this page was previously cached or we had to use an
- * incompatible alias and it has a valid mapping, flush it
- * from the cache.
- */
- mips_dcache_wbinv_range(va, PAGE_SIZE);
- }
- PG_MD_PVLIST_UNLOCK(md);
+ /*
+ * Flush the page to avoid future incompatible aliases
+ */
+ KASSERT((va & PAGE_MASK) == 0);
+ mips_dcache_wbinv_range(va, PAGE_SIZE);
}
#ifndef _LP64
/*
@@ -1073,7 +1063,7 @@
while (pv != NULL) {
const pmap_t pmap = pv->pv_pmap;
const uint16_t gen = PG_MD_PVLIST_GEN(md);
- va = pv->pv_va;
+ va = trunc_page(pv->pv_va);
PG_MD_PVLIST_UNLOCK(md);
pmap_protect(pmap, va, va + PAGE_SIZE, prot);
KASSERT(pv->pv_pmap == pmap);
@@ -1101,7 +1091,7 @@
pv = &md->pvh_first;
while (pv->pv_pmap != NULL) {
const pmap_t pmap = pv->pv_pmap;
- va = pv->pv_va;
+ va = trunc_page(pv->pv_va);
PG_MD_PVLIST_UNLOCK(md);
pmap_remove(pmap, va, va + PAGE_SIZE);
pmap_update(pmap);
@@ -1118,6 +1108,9 @@
const uint32_t pg_mask = ~(mips_pg_m_bit() | mips_pg_ro_bit());
const uint32_t p = (flags & VM_PROT_WRITE) ? mips_pg_rw_bit() : mips_pg_ro_bit();
KASSERT(kpreempt_disabled());
+ KASSERT((sva & PAGE_MASK) == 0);
+ KASSERT((eva & PAGE_MASK) == 0);
+
/*
* Change protection on every valid mapping within this segment.
*/
@@ -1162,6 +1155,8 @@
pt_entry_t *pte;
u_int p;
+ KASSERT((sva & PAGE_MASK) == 0);
+ KASSERT((eva & PAGE_MASK) == 0);
PMAP_COUNT(protect);
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
@@ -1315,7 +1310,7 @@
pv != NULL;
pv = pv->pv_next) {
pmap_t pmap = pv->pv_pmap;
- vaddr_t va = pv->pv_va;
+ vaddr_t va = trunc_page(pv->pv_va);
pt_entry_t *pte;
uint32_t pt_entry;
@@ -1493,6 +1488,7 @@
struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
PMAP_COUNT(enter_exec_mapping);
if (!PG_MD_EXECPAGE_P(md)) {
+ KASSERT((pa & PAGE_MASK) == 0);
mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa),
PAGE_SIZE);
pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE);
@@ -1505,7 +1501,7 @@
kpreempt_disable();
if (pmap == pmap_kernel()) {
if (pg)
- pmap_enter_pv(pmap, va, pg, &npte);
+ pmap_enter_pv(pmap, va, pg, &npte, 0);
/* enter entries into kernel pmap */
pte = kvtopte(va);
@@ -1546,7 +1542,7 @@
/* Done after case that may sleep/return. */
if (pg)
- pmap_enter_pv(pmap, va, pg, &npte);
+ pmap_enter_pv(pmap, va, pg, &npte, 0);
/*
* Now validate mapping with desired protection/wiring.
@@ -1663,6 +1659,17 @@
kpreempt_disable();
pte = kvtopte(va);
KASSERT(!mips_pg_v(pte->pt_entry));
+
+ /*
+ * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
+ */
+ if (managed && (flags & PMAP_KMPAGE) == 0) {
+ pmap_t pmap = pmap_kernel();
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+
+ pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
+ }
+
pte->pt_entry = npte;
pmap_tlb_update_addr(pmap_kernel(), va, npte, false);
kpreempt_enable();
@@ -1688,23 +1695,10 @@
}
PMAP_COUNT(kremove_pages);
- if (MIPS_HAS_R4K_MMU && MIPS_CACHE_VIRTUAL_ALIAS) {
- struct vm_page * const pg =
- PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
- if (pg != NULL) {
- struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
- (void)PG_MD_PVLIST_LOCK(md, false);
- pv_entry_t pv = &md->pvh_first;
- if (pv->pv_pmap == NULL) {
- pv->pv_va = va;
- } else if (PG_MD_CACHED_P(md)
- && mips_cache_badalias(pv->pv_va, va)) {
- mips_dcache_wbinv_range(va, PAGE_SIZE);
- }
- PG_MD_PVLIST_UNLOCK(md);
- }
- }
-
+ struct vm_page * const pg =
+ PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pt_entry));
+ if (pg)
+ pmap_remove_pv(pmap_kernel(), va, pg, false);
pte->pt_entry = new_pt_entry;
pmap_tlb_invalidate_addr(pmap_kernel(), va);
}
@@ -2001,10 +1995,13 @@
gen = PG_MD_PVLIST_LOCK(md, false);
for (; pv != NULL; pv = pv_next) {
pmap_t pmap = pv->pv_pmap;
- vaddr_t va = pv->pv_va;
+ vaddr_t va = trunc_page(pv->pv_va);
pt_entry_t *pte;
uint32_t pt_entry;
+
pv_next = pv->pv_next;
+ if (pv->pv_va & PV_KENTER)
+ continue;
if (pmap == pmap_kernel()) {
pte = kvtopte(va);
} else {
@@ -2083,8 +2080,13 @@
#ifdef _LP64
KASSERT(!MIPS_XKPHYS_P(pv->pv_va));
#endif
+ pv_entry_t opv = &md->pvh_first;
+ for (; opv != NULL; opv = opv->pv_next) {
+ if (mips_cache_badalias(pv->pv_va, opv->pv_va)) {
+ KASSERT(PG_MD_UNCACHED_P(md));
+ }
+ }
}
- pv = &md->pvh_first;
}
#endif /* PARANOIADIAG */
}
@@ -2094,7 +2096,8 @@
* physical to virtual map table.
*/
void
-pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
+pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte,
+ int flags)
{
struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
pv_entry_t pv, npv, apv;
@@ -2134,7 +2137,7 @@
PMAP_COUNT(mappings);
pmap_clear_mdpage_attributes(md, PG_MD_UNCACHED);
pv->pv_pmap = pmap;
- pv->pv_va = va;
+ pv->pv_va = va | flags;
} else {
#if defined(MIPS3_PLUS) && !defined(MULTIPROCESSOR) /* XXX mmu XXX */
if (MIPS_CACHE_VIRTUAL_ALIAS) {
@@ -2155,8 +2158,9 @@
if (mips_cache_badalias(pv->pv_va, va)) {
for (npv = pv; npv; npv = npv->pv_next) {
- pmap_remove(npv->pv_pmap, npv->pv_va,
- npv->pv_va + PAGE_SIZE);
+ vaddr_t nva = trunc_page(npv->pv_va);
+ pmap_remove(npv->pv_pmap, nva,
+ nva + PAGE_SIZE);
pmap_update(npv->pv_pmap);
goto again;
}
@@ -2175,9 +2179,10 @@
* share the same cache index again.
*/
if (mips_cache_badalias(pv->pv_va, va)) {
+ vaddr_t nva = trunc_page(pv->pv_va);
pmap_page_cache(pg, false);
- mips_dcache_wbinv_range_index(
- pv->pv_va, PAGE_SIZE);
Home |
Main Index |
Thread Index |
Old Index