Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm/pmap MI PMAP hardware page table walker support.
details: https://anonhg.NetBSD.org/src/rev/6eae033f557f
branches: trunk
changeset: 372037:6eae033f557f
user: skrll <skrll%NetBSD.org@localhost>
date: Wed Oct 26 07:35:19 2022 +0000
description:
MI PMAP hardware page table walker support.
This is based on code given to me by Matt Thomas a long time ago with
many updates and bugs fixes from me.
diffstat:
sys/arch/mips/include/pmap.h | 44 +-
sys/arch/mips/mips/pmap_machdep.c | 6 +-
sys/arch/powerpc/booke/booke_pmap.c | 26 +-
sys/arch/powerpc/booke/trap.c | 13 +-
sys/uvm/pmap/pmap.c | 174 +++++-
sys/uvm/pmap/pmap.h | 143 ++++-
sys/uvm/pmap/pmap_segtab.c | 1034 +++++++++++++++++++++++++++-------
sys/uvm/pmap/pmap_tlb.c | 36 +-
sys/uvm/pmap/pmap_tlb.h | 5 +-
9 files changed, 1192 insertions(+), 289 deletions(-)
diffs (truncated from 2149 to 300 lines):
diff -r 53c1fb46ac31 -r 6eae033f557f sys/arch/mips/include/pmap.h
--- a/sys/arch/mips/include/pmap.h Wed Oct 26 06:36:39 2022 +0000
+++ b/sys/arch/mips/include/pmap.h Wed Oct 26 07:35:19 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.76 2022/01/04 05:39:12 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.77 2022/10/26 07:35:19 skrll Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -158,6 +158,28 @@
#endif /* __PMAP_PRIVATE */
+// these use register_t so we can pass XKPHYS addresses to them on N32
+bool pmap_md_direct_mapped_vaddr_p(register_t);
+paddr_t pmap_md_direct_mapped_vaddr_to_paddr(register_t);
+bool pmap_md_io_vaddr_p(vaddr_t);
+
+/*
+ * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
+ */
+vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
+paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
+struct vm_page *pmap_md_alloc_poolpage(int);
+
+/*
+ * Other hooks for the pool allocator.
+ */
+paddr_t pmap_md_pool_vtophys(vaddr_t);
+vaddr_t pmap_md_pool_phystov(paddr_t);
+#define POOL_VTOPHYS(va) pmap_md_pool_vtophys((vaddr_t)va)
+#define POOL_PHYSTOV(pa) pmap_md_pool_phystov((paddr_t)pa)
+
+#define pmap_md_direct_map_paddr(pa) pmap_md_pool_phystov((paddr_t)pa)
+
struct tlbmask {
vaddr_t tlb_hi;
#ifdef __mips_o32
@@ -241,26 +263,6 @@
#define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
-// these use register_t so we can pass XKPHYS addresses to them on N32
-bool pmap_md_direct_mapped_vaddr_p(register_t);
-paddr_t pmap_md_direct_mapped_vaddr_to_paddr(register_t);
-bool pmap_md_io_vaddr_p(vaddr_t);
-
-/*
- * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
- */
-vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
-paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
-struct vm_page *pmap_md_alloc_poolpage(int);
-
-/*
- * Other hooks for the pool allocator.
- */
-paddr_t pmap_md_pool_vtophys(vaddr_t);
-vaddr_t pmap_md_pool_phystov(paddr_t);
-#define POOL_VTOPHYS(va) pmap_md_pool_vtophys((vaddr_t)va)
-#define POOL_PHYSTOV(pa) pmap_md_pool_phystov((paddr_t)pa)
-
#ifdef MIPS64_SB1
/* uncached accesses are bad; all accesses should be cached (and coherent) */
#undef PMAP_PAGEIDLEZERO
diff -r 53c1fb46ac31 -r 6eae033f557f sys/arch/mips/mips/pmap_machdep.c
--- a/sys/arch/mips/mips/pmap_machdep.c Wed Oct 26 06:36:39 2022 +0000
+++ b/sys/arch/mips/mips/pmap_machdep.c Wed Oct 26 07:35:19 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_machdep.c,v 1.37 2022/09/25 06:21:58 skrll Exp $ */
+/* $NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.37 2022/09/25 06:21:58 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.38 2022/10/26 07:35:20 skrll Exp $");
/*
* Manages physical address maps.
@@ -470,7 +470,7 @@
/*
* Now set the page table pointer...
*/
- stb->seg_tab[j] = &sysmap[i];
+ stb->seg_ppg[j] = (pmap_ptpage_t *)&sysmap[i];
#ifdef _LP64
/*
* If we are at end of this XSEG, terminate the loop
diff -r 53c1fb46ac31 -r 6eae033f557f sys/arch/powerpc/booke/booke_pmap.c
--- a/sys/arch/powerpc/booke/booke_pmap.c Wed Oct 26 06:36:39 2022 +0000
+++ b/sys/arch/powerpc/booke/booke_pmap.c Wed Oct 26 07:35:19 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: booke_pmap.c,v 1.35 2022/09/25 06:21:58 skrll Exp $ */
+/* $NetBSD: booke_pmap.c,v 1.36 2022/10/26 07:35:20 skrll Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -37,7 +37,7 @@
#define __PMAP_PRIVATE
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.35 2022/09/25 06:21:58 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.36 2022/10/26 07:35:20 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_multiprocessor.h"
@@ -100,7 +100,7 @@
* the next time page is faulted, it will get icache
* synched. But this is easier. :)
*/
- paddr_t pa = VM_PAGE_TO_PHYS(pg);
+ const paddr_t pa = VM_PAGE_TO_PHYS(pg);
dcache_wb_page(pa);
icache_inv_page(pa);
}
@@ -227,11 +227,12 @@
* an extra page for the segment table and allows the user/kernel
* access to be common.
*/
- pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
- pt_entry_t *ptep = (void *)kv_segtabs;
- memset(ptep, 0, NBPG * kv_nsegtabs);
- for (size_t i = 0; i < kv_nsegtabs; i++, ptep += NPTEPG) {
- *ptp++ = ptep;
+
+ pmap_ptpage_t **ppg_p = &stp->seg_ppg[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
+ pmap_ptpage_t *ppg = (void *)kv_segtabs;
+ memset(ppg, 0, NBPG * kv_nsegtabs);
+ for (size_t i = 0; i < kv_nsegtabs; i++, ppg++) {
+ *ppg_p++ = ppg;
}
#if PMAP_MINIMALTLB
@@ -246,10 +247,10 @@
endkernel += NBPG * dm_nsegtabs;
ptp = stp->seg_tab;
- ptep = (void *)dm_segtabs;
- memset(ptep, 0, NBPG * dm_nsegtabs);
- for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ptep += NPTEPG) {
- *ptp = ptep;
+ ppg = (void *)dm_segtabs;
+ memset(ppg, 0, NBPG * dm_nsegtabs);
+ for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ppg ++) {
+ *ptp = ppg;
}
/*
@@ -308,6 +309,7 @@
struct vm_page *
pmap_md_alloc_poolpage(int flags)
{
+
/*
* Any managed page works for us.
*/
diff -r 53c1fb46ac31 -r 6eae033f557f sys/arch/powerpc/booke/trap.c
--- a/sys/arch/powerpc/booke/trap.c Wed Oct 26 06:36:39 2022 +0000
+++ b/sys/arch/powerpc/booke/trap.c Wed Oct 26 07:35:19 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: trap.c,v 1.38 2022/09/25 06:21:58 skrll Exp $ */
+/* $NetBSD: trap.c,v 1.39 2022/10/26 07:35:20 skrll Exp $ */
/*-
* Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.38 2022/09/25 06:21:58 skrll Exp $");
+__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.39 2022/10/26 07:35:20 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_altivec.h"
@@ -148,10 +148,13 @@
pmap_segtab_t * const stb = stbs[(tf->tf_srr1 / psl_mask) & 1];
if (__predict_false(stb == NULL))
return NULL;
- pt_entry_t * const ptep = stb->seg_tab[va >> SEGSHIFT];
- if (__predict_false(ptep == NULL))
+
+ pmap_ptpage_t * const ppg = stb->seg_ppg[va >> SEGSHIFT];
+ if (__predict_false(ppg == NULL))
return NULL;
- return ptep + ((va & SEGOFSET) >> PAGE_SHIFT);
+ const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
+
+ return ppg->ppg_ptes + pte_idx;
}
static int
diff -r 53c1fb46ac31 -r 6eae033f557f sys/uvm/pmap/pmap.c
--- a/sys/uvm/pmap/pmap.c Wed Oct 26 06:36:39 2022 +0000
+++ b/sys/uvm/pmap/pmap.c Wed Oct 26 07:35:19 2022 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.68 2022/10/23 06:37:15 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.69 2022/10/26 07:35:20 skrll Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.68 2022/10/23 06:37:15 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.69 2022/10/26 07:35:20 skrll Exp $");
/*
* Manages physical address maps.
@@ -95,9 +95,11 @@
* and to when physical maps must be made correct.
*/
+#include "opt_ddb.h"
#include "opt_modular.h"
#include "opt_multiprocessor.h"
#include "opt_sysv.h"
+#include "opt_uvmhist.h"
#define __PMAP_PRIVATE
@@ -194,6 +196,18 @@
#define PMAP_ASID_RESERVED 0
CTASSERT(PMAP_ASID_RESERVED == 0);
+#ifdef PMAP_HWPAGEWALKER
+#ifndef PMAP_PDETAB_ALIGN
+#define PMAP_PDETAB_ALIGN /* nothing */
+#endif
+
+#ifdef _LP64
+pmap_pdetab_t pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */
+#endif
+pmap_pdetab_t pmap_kern_pdetab PMAP_PDETAB_ALIGN;
+#endif
+
+#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
#ifndef PMAP_SEGTAB_ALIGN
#define PMAP_SEGTAB_ALIGN /* nothing */
#endif
@@ -205,11 +219,17 @@
.seg_seg[(VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NSEGPG - 1)] = &pmap_kstart_segtab,
#endif
};
+#endif
struct pmap_kernel kernel_pmap_store = {
.kernel_pmap = {
.pm_count = 1,
+#ifdef PMAP_HWPAGEWALKER
+ .pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS,
+#endif
+#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
.pm_segtab = &pmap_kern_segtab,
+#endif
.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
},
@@ -228,10 +248,10 @@
#ifdef UVMHIST
static struct kern_history_ent pmapexechistbuf[10000];
static struct kern_history_ent pmaphistbuf[10000];
-static struct kern_history_ent pmapsegtabhistbuf[1000];
+static struct kern_history_ent pmapxtabhistbuf[5000];
UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf);
UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf);
-UVMHIST_DEFINE(pmapsegtabhist) = UVMHIST_INITIALIZER(pmapsegtabhist, pmapsegtabhistbuf);
+UVMHIST_DEFINE(pmapxtabhist) = UVMHIST_INITIALIZER(pmapxtabhist, pmapxtabhistbuf);
#endif
/*
@@ -370,6 +390,7 @@
pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
{
volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
+
#ifdef MULTIPROCESSOR
for (;;) {
u_int old_attr = *attrp;
@@ -454,7 +475,6 @@
void
pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
{
-
*vstartp = pmap_limits.virtual_start;
*vendp = pmap_limits.virtual_end;
}
@@ -597,6 +617,29 @@
void
pmap_bootstrap_common(void)
{
+ UVMHIST_LINK_STATIC(pmapexechist);
+ UVMHIST_LINK_STATIC(pmaphist);
+ UVMHIST_LINK_STATIC(pmapxtabhist);
+
+ static const struct uvm_pagerops pmap_pager = {
Home |
Main Index |
Thread Index |
Old Index