Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Expose pmap_pdp_cache publicly to x86/xen pmap. Pro...
details: https://anonhg.NetBSD.org/src/rev/0567229128e9
branches: trunk
changeset: 771437:0567229128e9
user: jym <jym%NetBSD.org@localhost>
date: Sun Nov 20 19:41:27 2011 +0000
description:
Expose pmap_pdp_cache publicly to x86/xen pmap. Provide suspend/resume
callbacks for Xen pmap.
Turn static internal callbacks of pmap_pdp_cache.
XXX the implementation of pool_cache_invalidate(9) is still wrong, and
IMHO this needs fixing before -6. See
http://mail-index.netbsd.org/tech-kern/2011/11/18/msg011924.html
diffstat:
sys/arch/x86/include/pmap.h | 25 +++++++++++++----------
sys/arch/x86/x86/pmap.c | 43 +++++++++++------------------------------
sys/arch/xen/x86/xen_pmap.c | 34 +++++++++++++++++++++-----------
sys/arch/xen/xen/xen_machdep.c | 19 +++--------------
4 files changed, 52 insertions(+), 69 deletions(-)
diffs (truncated from 311 to 300 lines):
diff -r 87ddb56a74d1 -r 0567229128e9 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Sun Nov 20 18:42:56 2011 +0000
+++ b/sys/arch/x86/include/pmap.h Sun Nov 20 19:41:27 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.45 2011/11/08 17:16:52 cherry Exp $ */
+/* $NetBSD: pmap.h,v 1.46 2011/11/20 19:41:27 jym Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -125,6 +125,11 @@
extern kmutex_t pmaps_lock; /* protects pmaps */
/*
+ * pool_cache(9) that PDPs are allocated from
+ */
+extern struct pool_cache pmap_pdp_cache;
+
+/*
* the pmap structure
*
* note that the pm_obj contains the lock pointer, the reference count,
@@ -261,8 +266,6 @@
bool pmap_is_curpmap(struct pmap *);
-void pmap_invalidate_pool_caches(void);
-
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
typedef enum tlbwhy {
@@ -411,15 +414,7 @@
void pmap_cpu_init_late(struct cpu_info *);
bool sse2_idlezero_page(void *);
-
#ifdef XEN
-
-void pmap_unmap_all_apdp_pdes(void);
-#ifdef PAE
-void pmap_map_recursive_entries(void);
-void pmap_unmap_recursive_entries(void);
-#endif /* PAE */
-
#include <sys/bitops.h>
#define XPTE_MASK L1_FRAME
@@ -468,9 +463,17 @@
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
+void pmap_xen_resume(void);
+void pmap_xen_suspend(void);
+
void pmap_apte_flush(struct pmap *);
void pmap_unmap_apdp(void);
+#ifdef PAE
+void pmap_map_recursive_entries(void);
+void pmap_unmap_recursive_entries(void);
+#endif /* PAE */
+
#endif /* XEN */
/* pmap functions with machine addresses */
diff -r 87ddb56a74d1 -r 0567229128e9 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Sun Nov 20 18:42:56 2011 +0000
+++ b/sys/arch/x86/x86/pmap.c Sun Nov 20 19:41:27 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.141 2011/11/08 17:16:52 cherry Exp $ */
+/* $NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $ */
/*-
* Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -171,7 +171,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.141 2011/11/08 17:16:52 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.142 2011/11/20 19:41:27 jym Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -508,17 +508,14 @@
int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
-/*
- * pool and cache that PDPs are allocated from
- */
-
-static struct pool_cache pmap_pdp_cache;
-int pmap_pdp_ctor(void *, void *, int);
-void pmap_pdp_dtor(void *, void *);
+/* PDP pool_cache(9) and its callbacks */
+struct pool_cache pmap_pdp_cache;
+static int pmap_pdp_ctor(void *, void *, int);
+static void pmap_pdp_dtor(void *, void *);
#ifdef PAE
/* need to allocate items of 4 pages */
-void *pmap_pdp_alloc(struct pool *, int);
-void pmap_pdp_free(struct pool *, void *);
+static void *pmap_pdp_alloc(struct pool *, int);
+static void pmap_pdp_free(struct pool *, void *);
static struct pool_allocator pmap_pdp_allocator = {
.pa_alloc = pmap_pdp_alloc,
.pa_free = pmap_pdp_free,
@@ -2014,7 +2011,7 @@
/*
* pmap_pdp_ctor: constructor for the PDP cache.
*/
-int
+static int
pmap_pdp_ctor(void *arg, void *v, int flags)
{
pd_entry_t *pdir = v;
@@ -2121,7 +2118,7 @@
* pmap_pdp_dtor: destructor for the PDP cache.
*/
-void
+static void
pmap_pdp_dtor(void *arg, void *v)
{
#ifdef XEN
@@ -2152,7 +2149,7 @@
/* pmap_pdp_alloc: Allocate a page for the pdp memory pool. */
-void *
+static void *
pmap_pdp_alloc(struct pool *pp, int flags)
{
return (void *)uvm_km_alloc(kernel_map,
@@ -2165,7 +2162,7 @@
* pmap_pdp_free: free a PDP
*/
-void
+static void
pmap_pdp_free(struct pool *pp, void *v)
{
uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE * PDP_SIZE,
@@ -4466,19 +4463,3 @@
return pflag;
}
-
-/*
- * Invalidates pool_cache(9) used by pmap(9).
- */
-void
-pmap_invalidate_pool_caches(void)
-{
-#ifdef XEN
- /*
- * We must invalidate all shadow pages found inside the pmap_pdp_cache.
- * They are technically considered by Xen as L2 pages, although they
- * are not currently found inside pmaps list.
- */
- pool_cache_invalidate(&pmap_pdp_cache);
-#endif
-}
diff -r 87ddb56a74d1 -r 0567229128e9 sys/arch/xen/x86/xen_pmap.c
--- a/sys/arch/xen/x86/xen_pmap.c Sun Nov 20 18:42:56 2011 +0000
+++ b/sys/arch/xen/x86/xen_pmap.c Sun Nov 20 19:41:27 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_pmap.c,v 1.8 2011/11/08 17:16:52 cherry Exp $ */
+/* $NetBSD: xen_pmap.c,v 1.9 2011/11/20 19:41:27 jym Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@@ -102,7 +102,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.8 2011/11/08 17:16:52 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.9 2011/11/20 19:41:27 jym Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -434,13 +434,12 @@
/*
* Flush all APDP entries found in pmaps
- * Required during Xen save/restore operations, as it does not
+ * Required during Xen save/restore operations, as Xen does not
* handle alternative recursive mappings properly
*/
void
-pmap_unmap_all_apdp_pdes(void)
+pmap_xen_suspend(void)
{
-
int i;
int s;
struct pmap *pm;
@@ -470,6 +469,17 @@
splx(s);
+#ifdef PAE
+ pmap_unmap_recursive_entries();
+#endif
+}
+
+void
+pmap_xen_resume(void)
+{
+#ifdef PAE
+ pmap_map_recursive_entries();
+#endif
}
#ifdef PAE
@@ -486,12 +496,10 @@
void
pmap_map_recursive_entries(void)
{
-
int i;
struct pmap *pm;
mutex_enter(&pmaps_lock);
-
LIST_FOREACH(pm, &pmaps, pm_list) {
for (i = 0; i < PDP_SIZE; i++) {
xpq_queue_pte_update(
@@ -499,7 +507,6 @@
xpmap_ptom((pm)->pm_pdirpa[i]) | PG_V);
}
}
-
mutex_exit(&pmaps_lock);
for (i = 0; i < PDP_SIZE; i++) {
@@ -514,21 +521,24 @@
void
pmap_unmap_recursive_entries(void)
{
-
int i;
struct pmap *pm;
- pmap_invalidate_pool_caches();
+ /*
+ * Invalidate pmap_pdp_cache as it contains L2-pinned objects with
+ * recursive entries.
+ * XXX jym@ : find a way to drain per-CPU caches to. pool_cache_inv
+ * does not do that.
+ */
+ pool_cache_invalidate(&pmap_pdp_cache);
mutex_enter(&pmaps_lock);
-
LIST_FOREACH(pm, &pmaps, pm_list) {
for (i = 0; i < PDP_SIZE; i++) {
xpq_queue_pte_update(
xpmap_ptom(pmap_pdirpa(pm, PDIR_SLOT_PTE + i)), 0);
}
}
-
mutex_exit(&pmaps_lock);
/* do it for pmap_kernel() too! */
diff -r 87ddb56a74d1 -r 0567229128e9 sys/arch/xen/xen/xen_machdep.c
--- a/sys/arch/xen/xen/xen_machdep.c Sun Nov 20 18:42:56 2011 +0000
+++ b/sys/arch/xen/xen/xen_machdep.c Sun Nov 20 19:41:27 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_machdep.c,v 1.8 2011/09/20 00:12:24 jym Exp $ */
+/* $NetBSD: xen_machdep.c,v 1.9 2011/11/20 19:41:27 jym Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -53,7 +53,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.8 2011/09/20 00:12:24 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_machdep.c,v 1.9 2011/11/20 19:41:27 jym Exp $");
#include "opt_xen.h"
@@ -285,16 +285,7 @@
{
kpreempt_disable();
- /*
- * Xen lazy evaluation of recursive mappings requires
- * to flush the APDP entries
- */
- pmap_unmap_all_apdp_pdes();
-
-#ifdef PAE
- pmap_unmap_recursive_entries();
-#endif
-
+ pmap_xen_suspend();
xen_suspendclocks();
/*
Home |
Main Index |
Thread Index |
Old Index