Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/rmind-uvmplock]: src/sys/arch - Split off Xen versions of pmap_map_ptes/...
details: https://anonhg.NetBSD.org/src/rev/6f2dd73fb8c2
branches: rmind-uvmplock
changeset: 753062:6f2dd73fb8c2
user: rmind <rmind%NetBSD.org@localhost>
date: Mon May 31 01:12:13 2010 +0000
description:
- Split off Xen versions of pmap_map_ptes/pmap_unmap_ptes into Xen pmap,
also move pmap_apte_flush() with pmap_unmap_apdp() there.
- Make Xen buildable.
diffstat:
sys/arch/x86/include/cpuvar.h | 4 +-
sys/arch/x86/include/pmap.h | 5 +-
sys/arch/x86/x86/pmap.c | 190 +---------------------------------------
sys/arch/x86/x86/pmap_tlb.c | 16 ++-
sys/arch/xen/conf/files.xen | 3 +-
sys/arch/xen/x86/cpu.c | 7 +-
sys/arch/xen/x86/xen_pmap.c | 196 +++++++++++++++++++++++++++++++++++++++++-
7 files changed, 221 insertions(+), 200 deletions(-)
diffs (truncated from 640 to 300 lines):
diff -r c860ae39de0a -r 6f2dd73fb8c2 sys/arch/x86/include/cpuvar.h
--- a/sys/arch/x86/include/cpuvar.h Sun May 30 05:16:34 2010 +0000
+++ b/sys/arch/x86/include/cpuvar.h Mon May 31 01:12:13 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpuvar.h,v 1.31.4.1 2010/05/30 05:17:12 rmind Exp $ */
+/* $NetBSD: cpuvar.h,v 1.31.4.2 2010/05/31 01:12:13 rmind Exp $ */
/*-
* Copyright (c) 2000, 2007 The NetBSD Foundation, Inc.
@@ -96,9 +96,7 @@
#endif
#endif /* defined(_KERNEL_OPT) */
-#ifdef MULTIPROCESSOR
extern uint32_t cpus_running;
-#endif
int x86_ipi(int, int, int);
void x86_self_ipi(int);
diff -r c860ae39de0a -r 6f2dd73fb8c2 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Sun May 30 05:16:34 2010 +0000
+++ b/sys/arch/x86/include/pmap.h Mon May 31 01:12:13 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.29.2.5 2010/05/30 05:17:12 rmind Exp $ */
+/* $NetBSD: pmap.h,v 1.29.2.6 2010/05/31 01:12:13 rmind Exp $ */
/*
*
@@ -436,6 +436,9 @@
paddr_t vtomach(vaddr_t);
#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
+void pmap_apte_flush(struct pmap *);
+void pmap_unmap_apdp(void);
+
#endif /* XEN */
/* pmap functions with machine addresses */
diff -r c860ae39de0a -r 6f2dd73fb8c2 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Sun May 30 05:16:34 2010 +0000
+++ b/sys/arch/x86/x86/pmap.c Mon May 31 01:12:13 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.105.2.8 2010/05/30 05:17:13 rmind Exp $ */
+/* $NetBSD: pmap.c,v 1.105.2.9 2010/05/31 01:12:14 rmind Exp $ */
/*-
* Copyright (c) 2008, 2010 The NetBSD Foundation, Inc.
@@ -177,7 +177,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105.2.8 2010/05/30 05:17:13 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105.2.9 2010/05/31 01:12:14 rmind Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -328,7 +328,6 @@
const long nkptpmax[] = NKPTPMAX_INITIALIZER;
const long nbpd[] = NBPD_INITIALIZER;
pd_entry_t * const normal_pdes[] = PDES_INITIALIZER;
-pd_entry_t * const alternate_pdes[] = APDES_INITIALIZER;
long nkptp[] = NKPTP_INITIALIZER;
@@ -336,8 +335,6 @@
static vaddr_t pmap_maxkvaddr;
-#define COUNT(x) /* nothing */
-
/*
* XXX kludge: dummy locking to make KASSERTs in uvm_page.c comfortable.
* actual locking is done by pm_lock.
@@ -571,9 +568,6 @@
vaddr_t, vaddr_t, vaddr_t,
struct pv_entry **);
-#ifdef XEN
-static void pmap_unmap_apdp(void);
-#endif
static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int,
long *);
@@ -706,43 +700,6 @@
(kernel && (pmap->pm_kernel_cpus & ci->ci_cpumask) != 0));
}
-#ifdef XEN
-static void
-pmap_apte_flush(struct pmap *pmap)
-{
-
- KASSERT(kpreempt_disabled());
-
- /*
- * Flush the APTE mapping from all other CPUs that
- * are using the pmap we are using (who's APTE space
- * is the one we've just modified).
- *
- * XXXthorpej -- find a way to defer the IPI.
- */
- pmap_tlb_shootdown(pmap, (vaddr_t)-1LL, 0, TLBSHOOT_APTE);
- pmap_tlb_shootnow();
-}
-
-/*
- * Unmap the content of APDP PDEs
- */
-static void
-pmap_unmap_apdp(void)
-{
- int i;
-
- for (i = 0; i < PDP_SIZE; i++) {
- pmap_pte_set(APDP_PDE+i, 0);
-#if defined (PAE)
- /* clear shadow entries too */
- pmap_pte_set(APDP_PDE_SHADOW+i, 0);
-#endif
- }
-}
-
-#endif /* XEN */
-
/*
* Add a reference to the specified pmap.
*/
@@ -754,6 +711,8 @@
atomic_inc_uint(&pmap->pm_obj[0].uo_refs);
}
+#ifndef XEN
+
/*
* pmap_map_ptes: map a pmap's PTEs into KVM and lock them in
*
@@ -765,116 +724,6 @@
pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2,
pd_entry_t **ptepp, pd_entry_t * const **pdeppp)
{
-#ifdef XEN
- pd_entry_t opde, npde;
- struct pmap *ourpmap;
- struct cpu_info *ci;
- struct lwp *l;
- bool iscurrent;
- uint64_t ncsw;
- int s;
-
- /* the kernel's pmap is always accessible */
- if (pmap == pmap_kernel()) {
- *pmap2 = NULL;
- *ptepp = PTE_BASE;
- *pdeppp = normal_pdes;
- return;
- }
- KASSERT(kpreempt_disabled());
-
- retry:
- l = curlwp;
- ncsw = l->l_ncsw;
- ourpmap = NULL;
- ci = curcpu();
-#if defined(__x86_64__)
- /*
- * curmap can only be pmap_kernel so at this point
- * pmap_is_curpmap is always false
- */
- iscurrent = 0;
- ourpmap = pmap_kernel();
-#else /* __x86_64__*/
- if (ci->ci_want_pmapload &&
- vm_map_pmap(&l->l_proc->p_vmspace->vm_map) == pmap) {
- pmap_load();
- if (l->l_ncsw != ncsw)
- goto retry;
- }
- iscurrent = pmap_is_curpmap(pmap);
- /* if curpmap then we are always mapped */
- if (iscurrent) {
- mutex_enter(pmap->pm_lock);
- *pmap2 = NULL;
- *ptepp = PTE_BASE;
- *pdeppp = normal_pdes;
- goto out;
- }
- ourpmap = ci->ci_pmap;
-#endif /* __x86_64__ */
-
- /* need to lock both curpmap and pmap: use ordered locking */
- pmap_reference(ourpmap);
- if ((uintptr_t) pmap < (uintptr_t) ourpmap) {
- mutex_enter(pmap->pm_lock);
- mutex_enter(ourpmap->pm_lock);
- } else {
- mutex_enter(ourpmap->pm_lock);
- mutex_enter(pmap->pm_lock);
- }
-
- if (l->l_ncsw != ncsw)
- goto unlock_and_retry;
-
- /* need to load a new alternate pt space into curpmap? */
- COUNT(apdp_pde_map);
- opde = *APDP_PDE;
- if (!pmap_valid_entry(opde) ||
- pmap_pte2pa(opde) != pmap_pdirpa(pmap, 0)) {
- int i;
- s = splvm();
- /* Make recursive entry usable in user PGD */
- for (i = 0; i < PDP_SIZE; i++) {
- npde = pmap_pa2pte(
- pmap_pdirpa(pmap, i * NPDPG)) | PG_k | PG_V;
- xpq_queue_pte_update(
- xpmap_ptom(pmap_pdirpa(pmap, PDIR_SLOT_PTE + i)),
- npde);
- xpq_queue_pte_update(xpmap_ptetomach(&APDP_PDE[i]),
- npde);
-#ifdef PAE
- /* update shadow entry too */
- xpq_queue_pte_update(
- xpmap_ptetomach(&APDP_PDE_SHADOW[i]), npde);
-#endif /* PAE */
- xpq_queue_invlpg(
- (vaddr_t)&pmap->pm_pdir[PDIR_SLOT_PTE + i]);
- }
- if (pmap_valid_entry(opde))
- pmap_apte_flush(ourpmap);
- splx(s);
- }
- *pmap2 = ourpmap;
- *ptepp = APTE_BASE;
- *pdeppp = alternate_pdes;
- KASSERT(l->l_ncsw == ncsw);
-#if !defined(__x86_64__)
- out:
-#endif
- /*
- * might have blocked, need to retry?
- */
- if (l->l_ncsw != ncsw) {
- unlock_and_retry:
- if (ourpmap != NULL) {
- mutex_exit(ourpmap->pm_lock);
- pmap_destroy(ourpmap);
- }
- mutex_exit(pmap->pm_lock);
- goto retry;
- }
-#else /* XEN */
struct pmap *curpmap;
struct cpu_info *ci;
uint32_t cpumask;
@@ -931,7 +780,6 @@
*pmap2 = curpmap;
*ptepp = PTE_BASE;
*pdeppp = normal_pdes;
-#endif /* XEN */
}
/*
@@ -941,31 +789,6 @@
void
pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2)
{
-#ifdef XEN
-
- if (pmap == pmap_kernel()) {
- return;
- }
- KASSERT(kpreempt_disabled());
- if (pmap2 == NULL) {
- mutex_exit(pmap->pm_lock);
- } else {
-#if defined(__x86_64__)
- KASSERT(pmap2 == pmap_kernel());
-#else
- KASSERT(curcpu()->ci_pmap == pmap2);
-#endif
-#if defined(MULTIPROCESSOR)
- pmap_unmap_apdp();
- pmap_pte_flush();
- pmap_apte_flush(pmap2);
-#endif /* MULTIPROCESSOR */
- COUNT(apdp_pde_unmap);
- mutex_exit(pmap->pm_lock);
- mutex_exit(pmap2->pm_lock);
- pmap_destroy(pmap2);
- }
-#else /* XEN */
struct cpu_info *ci;
struct pmap *mypmap;
@@ -1005,9 +828,10 @@
*/
pmap_reference(pmap);
pmap_destroy(pmap2);
-#endif /* XEN */
}
Home |
Main Index |
Thread Index |
Old Index