Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Optimize pmap_{copy, zerp}_page_generic to...
details: https://anonhg.NetBSD.org/src/rev/57b7daab04ab
branches: trunk
changeset: 783205:57b7daab04ab
user: matt <matt%NetBSD.org@localhost>
date: Tue Dec 11 01:16:10 2012 +0000
description:
Optimize pmap_{copy,zerp}_page_generic to understand that when all of memory
is always mapped that you don't have create mappings dynamically.
diffstat:
sys/arch/arm/arm32/pmap.c | 158 +++++++++++++++++++++++++++++++--------------
1 files changed, 107 insertions(+), 51 deletions(-)
diffs (223 lines):
diff -r d09b379c2ce5 -r 57b7daab04ab sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Tue Dec 11 01:13:05 2012 +0000
+++ b/sys/arch/arm/arm32/pmap.c Tue Dec 11 01:16:10 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.243 2012/12/10 06:54:23 matt Exp $ */
+/* $NetBSD: pmap.c,v 1.244 2012/12/11 01:16:10 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -212,7 +212,7 @@
#include <arm/cpuconf.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.243 2012/12/10 06:54:23 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.244 2012/12/11 01:16:10 matt Exp $");
#ifdef PMAP_DEBUG
@@ -668,12 +668,6 @@
/*
- * External function prototypes
- */
-extern void bzero_page(vaddr_t);
-extern void bcopy_page(vaddr_t, vaddr_t);
-
-/*
* Misc variables
*/
vaddr_t virtual_avail;
@@ -4442,14 +4436,26 @@
struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
struct vm_page_md *md = VM_PAGE_TO_MD(pg);
#endif
-#ifdef PMAP_CACHE_VIPT
+#if defined(PMAP_CACHE_VIPT)
/* Choose the last page color it had, if any */
const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
#else
const vsize_t va_offset = 0;
#endif
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+ /*
+ * Is this page mapped at its natural color?
+ * If we have all of memory mapped, then just convert PA to VA.
+ */
+ const bool okcolor = va_offset == (phys & arm_cache_prefer_mask);
+ const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start);
+#else
+ const bool okcolor = false;
+ const vaddr_t vdstp = cdstp + va_offset;
+#endif
pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
+
#ifdef DEBUG
if (!SLIST_EMPTY(&md->pvh_list))
panic("pmap_zero_page: page has mappings");
@@ -4457,25 +4463,39 @@
KDASSERT((phys & PGOFSET) == 0);
- /*
- * Hook in the page, zero it, and purge the cache for that
- * zeroed page. Invalidate the TLB as needed.
- */
- *ptep = L2_S_PROTO | phys |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
- PTE_SYNC(ptep);
- cpu_tlb_flushD_SE(cdstp + va_offset);
- cpu_cpwait();
- bzero_page(cdstp + va_offset);
- /*
- * Unmap the page.
- */
- *ptep = 0;
- PTE_SYNC(ptep);
- cpu_tlb_flushD_SE(cdstp + va_offset);
+ if (!okcolor) {
+ /*
+ * Hook in the page, zero it, and purge the cache for that
+ * zeroed page. Invalidate the TLB as needed.
+ */
+ *ptep = L2_S_PROTO | phys |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(cdstp + va_offset);
+ cpu_cpwait();
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+ /*
+ * If we are direct-mapped and our color isn't ok, then before
+ * we bzero the page invalidate its contents from the cache and
+ * reset the color to its natural color.
+ */
+ cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE);
+ md->pvh_attrs &= ~arm_cache_prefer_mask;
+ md->pvh_attrs |= (phys & arm_cache_prefer_mask);
+#endif
+ }
+ bzero_page(vdstp);
+ if (!okcolor) {
+ /*
+ * Unmap the page.
+ */
+ *ptep = 0;
+ PTE_SYNC(ptep);
+ cpu_tlb_flushD_SE(cdstp + va_offset);
#ifdef PMAP_CACHE_VIVT
- cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
-#endif
+ cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
+#endif
+ }
#ifdef PMAP_CACHE_VIPT
/*
* This page is now cache resident so it now has a page color.
@@ -4635,6 +4655,23 @@
const vsize_t src_va_offset = 0;
const vsize_t dst_va_offset = 0;
#endif
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
+ /*
+ * Is this page mapped at its natural color?
+ * If we have all of memory mapped, then just convert PA to VA.
+ */
+ const bool src_okcolor = src_va_offset == (src & arm_cache_prefer_mask);
+ const bool dst_okcolor = dst_va_offset == (dst & arm_cache_prefer_mask);
+ const vaddr_t vsrcp = src_okcolor
+ ? KERNEL_BASE + (src - physical_start)
+ : csrcp + src_va_offset;
+ const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
+#else
+ const bool src_okcolor = false;
+ const bool dst_okcolor = false;
+ const vaddr_t vsrcp = csrcp + va_offset;
+ const vaddr_t vdstp = cdstp + va_offset;
+#endif
pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT];
pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
@@ -4666,38 +4703,57 @@
* the cache for the appropriate page. Invalidate the TLB
* as required.
*/
- *src_ptep = L2_S_PROTO
- | src
+ if (!src_okcolor) {
+ *src_ptep = L2_S_PROTO
+ | src
#ifdef PMAP_CACHE_VIPT
- | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
+ | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
#endif
#ifdef PMAP_CACHE_VIVT
- | pte_l2_s_cache_mode
-#endif
- | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
- *dst_ptep = L2_S_PROTO | dst |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
- PTE_SYNC(src_ptep);
- PTE_SYNC(dst_ptep);
- cpu_tlb_flushD_SE(csrcp + src_va_offset);
- cpu_tlb_flushD_SE(cdstp + dst_va_offset);
- cpu_cpwait();
- bcopy_page(csrcp + src_va_offset, cdstp + dst_va_offset);
+ | pte_l2_s_cache_mode
+#endif
+ | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
+ PTE_SYNC(src_ptep);
+ cpu_tlb_flushD_SE(csrcp + src_va_offset);
+ cpu_cpwait();
+ }
+ if (!dst_okcolor) {
+ *dst_ptep = L2_S_PROTO | dst |
+ L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ PTE_SYNC(dst_ptep);
+ cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+ cpu_cpwait();
+#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
+ /*
+ * If we are direct-mapped and our color isn't ok, then before
+ * we bcopy to the new page invalidate its contents from the
+ * cache and reset its color to its natural color.
+ */
+ cpu_dcache_inv_range(cdstp + dst_va_offset, PAGE_SIZE);
+ dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
+ dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
+#endif
+ }
+ bcopy_page(vsrcp, vdstp);
#ifdef PMAP_CACHE_VIVT
- cpu_dcache_inv_range(csrcp + src_va_offset, PAGE_SIZE);
-#endif
-#ifdef PMAP_CACHE_VIVT
- cpu_dcache_wbinv_range(cdstp + dst_va_offset, PAGE_SIZE);
+ cpu_dcache_inv_range(vsrcp, PAGE_SIZE);
+ cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
#endif
/*
* Unmap the pages.
*/
- *src_ptep = 0;
- *dst_ptep = 0;
- PTE_SYNC(src_ptep);
- PTE_SYNC(dst_ptep);
- cpu_tlb_flushD_SE(csrcp + src_va_offset);
- cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+ if (!src_okcolor) {
+ *src_ptep = 0;
+ PTE_SYNC(src_ptep);
+ cpu_tlb_flushD_SE(csrcp + src_va_offset);
+ cpu_cpwait();
+ }
+ if (!dst_okcolor) {
+ *dst_ptep = 0;
+ PTE_SYNC(dst_ptep);
+ cpu_tlb_flushD_SE(cdstp + dst_va_offset);
+ cpu_cpwait();
+ }
#ifdef PMAP_CACHE_VIPT
/*
* Now that the destination page is in the cache, mark it as colored.
Home |
Main Index |
Thread Index |
Old Index