Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch x86 pmap improvements, reducing system time during ...
details: https://anonhg.NetBSD.org/src/rev/bbf7c979ef50
branches: trunk
changeset: 968171:bbf7c979ef50
user: ad <ad%NetBSD.org@localhost>
date: Sat Jan 04 22:49:20 2020 +0000
description:
x86 pmap improvements, reducing system time during a build by about 15% on
my test machine:
- Replace the global pv_hash with a per-pmap record of dynamically allocated
pv entries. The data structure used for this can be changed easily, and
has no special concurrency requirements. For now go with radixtree.
- Change pmap_pdp_cache back into a pool; cache the page directory with the
pmap, and avoid contention on pmaps_lock by adjusting the global list in
the pool_cache ctor & dtor. Align struct pmap and its lock, and update
some comments.
- Simplify pv_entry lists slightly. Allow both PP_EMBEDDED and dynamically
allocated entries to co-exist on a single page. This adds a pointer to
struct vm_page on x86, but shrinks pv_entry to 32 bytes (which also gets
it nicely aligned).
- More elegantly solve the chicken-and-egg problem introduced into the pmap
with radixtree lookup for pages, where we need PTEs mapped and page
allocations to happen under a single hold of the pmap's lock. While here
undo some cut-n-paste.
- Don't adjust pmap_kernel's stats with atomics, because its mutex is now
held in the places the stats are changed.
diffstat:
sys/arch/x86/include/pmap.h | 23 +-
sys/arch/x86/include/pmap_pv.h | 14 +-
sys/arch/x86/x86/pmap.c | 1175 ++++++++++++++++++++-------------------
sys/arch/xen/x86/xen_pmap.c | 16 +-
4 files changed, 623 insertions(+), 605 deletions(-)
diffs (truncated from 2228 to 300 lines):
diff -r 0bffbf969f80 -r bbf7c979ef50 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Sat Jan 04 22:46:01 2020 +0000
+++ b/sys/arch/x86/include/pmap.h Sat Jan 04 22:49:20 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.107 2019/12/15 19:24:11 ad Exp $ */
+/* $NetBSD: pmap.h,v 1.108 2020/01/04 22:49:20 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -67,6 +67,8 @@
#ifndef _X86_PMAP_H_
#define _X86_PMAP_H_
+#include <sys/radixtree.h>
+
/*
* pl*_pi: index in the ptp page for a pde mapping a VA.
* (pl*_i below is the index in the virtual array of all pdes per level)
@@ -232,9 +234,9 @@
extern kmutex_t pmaps_lock; /* protects pmaps */
/*
- * pool_cache(9) that PDPs are allocated from
+ * pool_cache(9) that pmaps are allocated from
*/
-extern struct pool_cache pmap_pdp_cache;
+extern struct pool_cache pmap_cache;
/*
* the pmap structure
@@ -248,14 +250,14 @@
*/
struct pmap {
- struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
- kmutex_t pm_lock; /* locks for pm_objs */
- LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
- pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
+ struct uvm_object pm_obj[PTP_LEVELS-1];/* objects for lvl >= 1) */
+ LIST_ENTRY(pmap) pm_list; /* list of all pmaps */
+ pd_entry_t *pm_pdir; /* VA of PD */
paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
struct vm_page *pm_ptphint[PTP_LEVELS-1];
/* pointer to a PTP in our pmap */
- struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
+ struct radix_tree pm_pvtree; /* tree of non-embedded pv entries */
+ struct pmap_statistics pm_stats; /* pmap stats */
#if !defined(__x86_64__)
vaddr_t pm_hiexec; /* highest executable mapping */
@@ -286,6 +288,9 @@
void (*pm_tlb_flush)(struct pmap *);
void *pm_data;
+
+ kmutex_t pm_lock /* locks for pm_objs */
+ __aligned(64); /* give lock own cache line */
};
/* macro to access pm_pdirpa slots */
@@ -374,7 +379,7 @@
void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
pd_entry_t * const **);
-void pmap_unmap_ptes(struct pmap *, struct pmap *, struct vm_page *);
+void pmap_unmap_ptes(struct pmap *, struct pmap *);
bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *,
int *lastlvl);
diff -r 0bffbf969f80 -r bbf7c979ef50 sys/arch/x86/include/pmap_pv.h
--- a/sys/arch/x86/include/pmap_pv.h Sat Jan 04 22:46:01 2020 +0000
+++ b/sys/arch/x86/include/pmap_pv.h Sat Jan 04 22:49:20 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_pv.h,v 1.8 2020/01/02 21:39:42 ad Exp $ */
+/* $NetBSD: pmap_pv.h,v 1.9 2020/01/04 22:49:20 ad Exp $ */
/*-
* Copyright (c)2008 YAMAMOTO Takashi,
@@ -55,8 +55,7 @@
struct pv_entry {
struct pv_pte pve_pte; /* should be the first member */
- LIST_ENTRY(pv_entry) pve_list; /* on pv_head::pvh_list */
- SLIST_ENTRY(pv_entry) pve_hash;
+ LIST_ENTRY(pv_entry) pve_list; /* on pmap_page::pp_pvlist */
};
#define pve_next pve_list.le_next
@@ -69,16 +68,11 @@
/* PP_EMBEDDED */
struct pv_pte u_pte;
- /* !PP_EMBEDDED */
- struct pv_head {
- LIST_HEAD(, pv_entry) pvh_list;
- } u_head;
-
/* PTPs */
struct vm_page *u_link;
} pp_u;
+ LIST_HEAD(, pv_entry) pp_pvlist;
#define pp_pte pp_u.u_pte
-#define pp_head pp_u.u_head
#define pp_link pp_u.u_link
uint8_t pp_flags;
uint8_t pp_attrs;
@@ -90,6 +84,6 @@
/* pp_flags */
#define PP_EMBEDDED 1
-#define PMAP_PAGE_INIT(pp) /* none */
+#define PMAP_PAGE_INIT(pp) LIST_INIT(&(pp)->pp_pvlist)
#endif /* !_X86_PMAP_PV_H_ */
diff -r 0bffbf969f80 -r bbf7c979ef50 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Sat Jan 04 22:46:01 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c Sat Jan 04 22:49:20 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.352 2020/01/02 21:39:42 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.353 2020/01/04 22:49:20 ad Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.352 2020/01/02 21:39:42 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.353 2020/01/04 22:49:20 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -213,8 +213,8 @@
* - struct pmap_page: describes one pv-tracked page, without
* necessarily a corresponding vm_page
* - struct pv_entry: describes one <PMAP,VA> mapping of a PA
- * - struct pv_head: there is one pv_head per pv-tracked page of
- * physical memory. the pv_head points to a list of pv_entry
+ * - pmap_page::pp_pvlist: there is one list per pv-tracked page of
+ * physical memory. the pp_pvlist points to a list of pv_entry
* structures which describe all the <PMAP,VA> pairs that this
* page is mapped in. this is critical for page based operations
* such as pmap_page_protect() [change protection on _all_ mappings
@@ -224,16 +224,19 @@
/*
* Locking
*
- * We have the following locks that we must contend with:
+ * We have the following locks that we must contend with, listed in the
+ * order that they must be acquired:
*
- * - pmap lock (per pmap, part of uvm_object)
+ * - pg->uobject->vmobjlock, pg->uanon->an_lock
+ * These per-object locks are taken by the VM system before calling into
+ * the pmap module. Holding them prevents concurrent operations on the
+ * given page or set of pages. Asserted with uvm_page_owner_locked_p().
+ *
+ * - pmap->pm_lock (per pmap)
* This lock protects the fields in the pmap structure including the
- * non-kernel PDEs in the PDP, and the PTEs.
- *
- * - pvh_lock (per pv_head)
- * This lock protects the pv_entry list which is chained off the pv_head
- * structure for a specific pv-tracked PA. It is locked when traversing
- * the list (e.g. adding/removing mappings, syncing R/M bits, etc).
+ * non-kernel PDEs in the PDP, the PTEs, and the PVE radix tree. For
+ * modifying kernel PTEs it is not required as kernel PDEs are never
+ * freed, and the kernel is expected to be self consistent.
*
* - pmaps_lock
* This lock protects the list of active pmaps (headed by "pmaps"). We
@@ -254,7 +257,7 @@
long nkptp[] = NKPTP_INITIALIZER;
struct pmap_head pmaps;
-kmutex_t pmaps_lock;
+kmutex_t pmaps_lock __cacheline_aligned;
struct pcpu_area *pcpuarea __read_mostly;
@@ -275,7 +278,7 @@
* Global data structures
*/
-static struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */
+static struct pmap kernel_pmap_store __cacheline_aligned; /* kernel's pmap */
struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
struct bootspace bootspace __read_mostly;
@@ -301,61 +304,6 @@
#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mp_pp)
-#define PV_HASH_SIZE 32768
-#define PV_HASH_LOCK_CNT 32
-
-struct pv_hash_lock {
- kmutex_t lock;
-} __aligned(CACHE_LINE_SIZE) pv_hash_locks[PV_HASH_LOCK_CNT]
- __aligned(CACHE_LINE_SIZE);
-
-struct pv_hash_head {
- SLIST_HEAD(, pv_entry) hh_list;
-} pv_hash_heads[PV_HASH_SIZE];
-
-static u_int
-pvhash_hash(struct vm_page *ptp, vaddr_t va)
-{
-
- return (uintptr_t)ptp / sizeof(*ptp) + (va >> PAGE_SHIFT);
-}
-
-static struct pv_hash_head *
-pvhash_head(u_int hash)
-{
-
- return &pv_hash_heads[hash % PV_HASH_SIZE];
-}
-
-static kmutex_t *
-pvhash_lock(u_int hash)
-{
-
- return &pv_hash_locks[hash % PV_HASH_LOCK_CNT].lock;
-}
-
-static struct pv_entry *
-pvhash_remove(struct pv_hash_head *hh, struct vm_page *ptp, vaddr_t va)
-{
- struct pv_entry *pve;
- struct pv_entry *prev;
-
- prev = NULL;
- SLIST_FOREACH(pve, &hh->hh_list, pve_hash) {
- if (pve->pve_pte.pte_ptp == ptp &&
- pve->pve_pte.pte_va == va) {
- if (prev != NULL) {
- SLIST_REMOVE_AFTER(prev, pve_hash);
- } else {
- SLIST_REMOVE_HEAD(&hh->hh_list, pve_hash);
- }
- break;
- }
- prev = pve;
- }
- return pve;
-}
-
/*
* Other data structures
*/
@@ -384,7 +332,9 @@
/*
* pool that pmap structures are allocated from
*/
-static struct pool_cache pmap_cache;
+struct pool_cache pmap_cache;
+static int pmap_ctor(void *, void *, int);
+static void pmap_dtor(void *, void *);
/*
* pv_entry cache
@@ -411,10 +361,10 @@
int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
-/* PDP pool_cache(9) and its callbacks */
-struct pool_cache pmap_pdp_cache;
-static int pmap_pdp_ctor(void *, void *, int);
-static void pmap_pdp_dtor(void *, void *);
+/* PDP pool and its callbacks */
+static struct pool pmap_pdp_pool;
+static void pmap_pdp_init(pd_entry_t *);
+static void pmap_pdp_fini(pd_entry_t *);
#ifdef PAE
/* need to allocate items of 4 pages */
@@ -439,6 +389,12 @@
extern vaddr_t pentium_idt_vaddr;
#endif
+/* Array of freshly allocated PTPs, for pmap_get_ptp(). */
+struct pmap_ptparray {
+ struct vm_page *pg[PTP_LEVELS + 1];
+ bool alloced[PTP_LEVELS + 1];
+};
+
/*
* Local prototypes
*/
@@ -457,8 +413,11 @@
static void pmap_remap_largepages(void);
#endif
-static int pmap_get_ptp(struct pmap *, vaddr_t,
- pd_entry_t * const *, int, struct vm_page **);
+static int pmap_get_ptp(struct pmap *, struct pmap_ptparray *, vaddr_t, int,
+ struct vm_page **);
+static void pmap_unget_ptp(struct pmap *, struct pmap_ptparray *);
+static void pmap_install_ptp(struct pmap *, struct pmap_ptparray *, vaddr_t,
Home |
Main Index |
Thread Index |
Old Index