Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Move closer to the common pmap by using t...
details: https://anonhg.NetBSD.org/src/rev/ebdb6849cd1d
branches: trunk
changeset: 353807:ebdb6849cd1d
user: skrll <skrll%NetBSD.org@localhost>
date: Wed May 24 06:27:33 2017 +0000
description:
Move closer to the common pmap by using the same pmap_remove_all
optimisation where TLB flushes are avoided by clever ASID assignment.
pmap_remove_all_complete can now be removed.
diffstat:
sys/arch/arm/arm32/pmap.c | 311 ++++++++++++++++++++++-----------------------
1 files changed, 150 insertions(+), 161 deletions(-)
diffs (truncated from 493 to 300 lines):
diff -r 87714d0f757f -r ebdb6849cd1d sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Wed May 24 05:11:29 2017 +0000
+++ b/sys/arch/arm/arm32/pmap.c Wed May 24 06:27:33 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.347 2017/05/22 06:35:04 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.348 2017/05/24 06:27:33 skrll Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -217,7 +217,7 @@
#include <arm/locore.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.347 2017/05/22 06:35:04 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.348 2017/05/24 06:27:33 skrll Exp $");
//#define PMAP_DEBUG
#ifdef PMAP_DEBUG
@@ -485,6 +485,11 @@
#define PMAPCOUNT(x) ((void)0)
#endif
+#ifdef ARM_MMU_EXTENDED
+void pmap_md_pdetab_activate(pmap_t, struct lwp *);
+void pmap_md_pdetab_deactivate(pmap_t pm);
+#endif
+
/*
* pmap copy/zero page, and mem(5) hook point
*/
@@ -3411,14 +3416,6 @@
void
pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
{
- vaddr_t next_bucket;
- u_int cleanlist_idx, total, cnt;
- struct {
- vaddr_t va;
- pt_entry_t *ptep;
- } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
- u_int mappings;
-
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (pm=%p, sva=%#x, eva=%#x)", pm, sva, eva, 0);
@@ -3427,22 +3424,27 @@
*/
pmap_acquire_pmap_lock(pm);
+#ifndef ARM_MMU_EXTENDED
+ u_int cleanlist_idx, total, cnt;
+ struct {
+ vaddr_t va;
+ pt_entry_t *ptep;
+ } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
+
if (pm->pm_remove_all || !pmap_is_cached(pm)) {
cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
-#ifndef ARM_MMU_EXTENDED
if (pm->pm_cstate.cs_tlb == 0)
pm->pm_remove_all = true;
-#endif
} else
cleanlist_idx = 0;
-
total = 0;
+#endif
while (sva < eva) {
/*
* Do one L2 bucket's worth at a time.
*/
- next_bucket = L2_NEXT_BUCKET_VA(sva);
+ vaddr_t next_bucket = L2_NEXT_BUCKET_VA(sva);
if (next_bucket > eva)
next_bucket = eva;
@@ -3453,9 +3455,9 @@
}
pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)];
-
- for (mappings = 0;
- sva < next_bucket;
+ u_int mappings = 0;
+
+ for (;sva < next_bucket;
sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) {
pt_entry_t opte = *ptep;
@@ -3502,13 +3504,12 @@
}
#ifdef ARM_MMU_EXTENDED
- if (pm == pmap_kernel()) {
- l2pte_reset(ptep);
- PTE_SYNC(ptep);
- pmap_tlb_flush_SE(pm, sva, flags);
- continue;
+ l2pte_reset(ptep);
+ PTE_SYNC(ptep);
+ if (__predict_false(pm->pm_remove_all == false)) {
+ pmap_tlb_flush_SE(pm, sva, flags);
}
-#endif
+#else
if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
/* Add to the clean list. */
cleanlist[cleanlist_idx].ptep = ptep;
@@ -3540,8 +3541,10 @@
pmap_tlb_flush_SE(pm, sva, flags);
}
}
+#endif
}
+#ifndef ARM_MMU_EXTENDED
/*
* Deal with any left overs
*/
@@ -3550,10 +3553,6 @@
for (cnt = 0; cnt < cleanlist_idx; cnt++) {
l2pte_reset(cleanlist[cnt].ptep);
PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep);
-#ifdef ARM_MMU_EXTENDED
- vaddr_t clva = cleanlist[cnt].va;
- pmap_tlb_flush_SE(pm, clva, PVF_REF);
-#else
vaddr_t va = cleanlist[cnt].va;
if (pm->pm_cstate.cs_all != 0) {
vaddr_t clva = va & ~PAGE_MASK;
@@ -3565,7 +3564,6 @@
pmap_tlb_flush_SE(pm, clva,
PVF_REF | flags);
}
-#endif /* ARM_MMU_EXTENDED */
}
/*
@@ -3584,7 +3582,7 @@
pm->pm_remove_all = true;
}
}
-
+#endif /* ARM_MMU_EXTENDED */
pmap_free_l2_bucket(pm, l2b, mappings);
pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE);
@@ -4768,20 +4766,86 @@
pmap_release_pmap_lock(pm);
}
+#ifdef ARM_MMU_EXTENDED
+void
+pmap_md_pdetab_activate(pmap_t pm, struct lwp *l)
+{
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
+
+ /*
+ * Assume that TTBR1 has only global mappings and TTBR0 only
+ * has non-global mappings. To prevent speculation from doing
+ * evil things we disable translation table walks using TTBR0
+ * before setting the CONTEXTIDR (ASID) or new TTBR0 value.
+ * Once both are set, table walks are reenabled.
+ */
+ const uint32_t old_ttbcr = armreg_ttbcr_read();
+ armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
+ arm_isb();
+
+ pmap_tlb_asid_acquire(pm, l);
+
+ struct cpu_info * const ci = curcpu();
+ struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci));
+
+ cpu_setttb(pm->pm_l1_pa, pai->pai_asid);
+ /*
+ * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0
+ * have been updated.
+ */
+ arm_isb();
+
+ if (pm != pmap_kernel()) {
+ armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0);
+ }
+ cpu_cpwait();
+
+ UVMHIST_LOG(maphist, " pm %p pm->pm_l1_pa %08x asid %u... done", pm,
+ pm->pm_l1_pa, pai->pai_asid, 0);
+
+ KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u",
+ ci->ci_pmap_asid_cur, pai->pai_asid);
+ ci->ci_pmap_cur = pm;
+}
+
+void
+pmap_md_pdetab_deactivate(pmap_t pm)
+{
+
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
+
+ kpreempt_disable();
+ struct cpu_info * const ci = curcpu();
+ /*
+ * Disable translation table walks from TTBR0 while no pmap has been
+ * activated.
+ */
+ const uint32_t old_ttbcr = armreg_ttbcr_read();
+ armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
+ arm_isb();
+ pmap_tlb_asid_deactivate(pm);
+ cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID);
+ arm_isb();
+
+ ci->ci_pmap_cur = pmap_kernel();
+ KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u",
+ ci->ci_pmap_asid_cur);
+ kpreempt_enable();
+}
+#endif
+
void
pmap_activate(struct lwp *l)
{
- struct cpu_info * const ci = curcpu();
extern int block_userspace_access;
pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap;
-#ifdef ARM_MMU_EXTENDED
- struct pmap_asid_info * const pai = PMAP_PAI(npm, cpu_tlb_info(ci));
-#endif
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, npm, 0, 0);
+ struct cpu_info * const ci = curcpu();
+
/*
* If activating a non-current lwp or the current lwp is
* already active, just return.
@@ -4831,9 +4895,7 @@
* entire cache.
*/
pmap_t rpm = ci->ci_pmap_lastuser;
-#endif
-
-#ifndef ARM_MMU_EXTENDED
+
/*
* XXXSCW: There's a corner case here which can leave turds in the
* cache as reported in kern/41058. They're probably left over during
@@ -4881,30 +4943,7 @@
#endif
#ifdef ARM_MMU_EXTENDED
- /*
- * Assume that TTBR1 has only global mappings and TTBR0 only has
- * non-global mappings. To prevent speculation from doing evil things
- * we disable translation table walks using TTBR0 before setting the
- * CONTEXTIDR (ASID) or new TTBR0 value. Once both are set, table
- * walks are reenabled.
- */
- UVMHIST_LOG(maphist, " acquiring asid", 0, 0, 0, 0);
- const uint32_t old_ttbcr = armreg_ttbcr_read();
- armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
- arm_isb();
- pmap_tlb_asid_acquire(npm, l);
- UVMHIST_LOG(maphist, " setting ttbr pa=%#x asid=%#x", npm->pm_l1_pa, pai->pai_asid, 0, 0);
- cpu_setttb(npm->pm_l1_pa, pai->pai_asid);
- /*
- * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 have
- * been updated.
- */
- arm_isb();
- if (npm != pmap_kernel()) {
- armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0);
- }
- cpu_cpwait();
- ci->ci_pmap_asid_cur = pai->pai_asid;
+ pmap_md_pdetab_activate(npm, l);
#else
cpu_domains(ndacr);
if (npm == pmap_kernel() || npm == rpm) {
@@ -4947,8 +4986,8 @@
/* But the new one is */
npm->pm_activated = true;
}
-#endif
ci->ci_pmap_cur = npm;
+#endif
UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
}
@@ -4962,20 +5001,7 @@
UVMHIST_LOG(maphist, "(l=%#x) pm=%#x", l, pm, 0, 0);
#ifdef ARM_MMU_EXTENDED
- kpreempt_disable();
- struct cpu_info * const ci = curcpu();
- /*
- * Disable translation table walks from TTBR0 while no pmap has been
Home |
Main Index |
Thread Index |
Old Index