Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/cherry-xenmp]: src/sys/arch/xen/x86 fix amd64 boot.
details: https://anonhg.NetBSD.org/src/rev/b67c077bd522
branches: cherry-xenmp
changeset: 765640:b67c077bd522
user: cherry <cherry%NetBSD.org@localhost>
date: Fri Sep 09 11:53:43 2011 +0000
description:
fix amd64 boot.
diffstat:
sys/arch/xen/x86/cpu.c | 53 ++++++++++++++++++++++++++++++++++---------
sys/arch/xen/x86/x86_xpmap.c | 8 +++---
2 files changed, 45 insertions(+), 16 deletions(-)
diffs (154 lines):
diff -r abf3ce1815c0 -r b67c077bd522 sys/arch/xen/x86/cpu.c
--- a/sys/arch/xen/x86/cpu.c Fri Sep 09 11:38:20 2011 +0000
+++ b/sys/arch/xen/x86/cpu.c Fri Sep 09 11:53:43 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.56.2.10 2011/09/01 08:04:46 cherry Exp $ */
+/* $NetBSD: cpu.c,v 1.56.2.11 2011/09/09 11:53:43 cherry Exp $ */
/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */
/*-
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.56.2.10 2011/09/01 08:04:46 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.56.2.11 2011/09/09 11:53:43 cherry Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@@ -712,6 +712,7 @@
* work.
*/
extern void x86_64_tls_switch(struct lwp *);
+
void
cpu_hatch(void *v)
{
@@ -918,7 +919,8 @@
* Use pmap_kernel() L4 PD directly, until we setup the
* per-cpu L4 PD in pmap_cpu_init_late()
*/
- initctx->ctrlreg[3] = xpmap_ptom(pcb->pcb_cr3);
+
+ initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_kpm_pdirpa)));
initctx->ctrlreg[4] = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT;
@@ -1281,7 +1283,7 @@
}
if (__predict_true(pmap != pmap_kernel())) {
- xen_set_user_pgd(ci->ci_kpm_pdirpa);
+ xen_set_user_pgd(pmap_pdirpa(pmap, 0));
ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0);
}
else {
@@ -1299,6 +1301,27 @@
/*
* pmap_cpu_init_late: perform late per-CPU initialization.
+ * Short note about percpu PDIR pages:
+ * Both the PAE and __x86_64__ architectures have per-cpu PDIR
+ * tables. This is to get around Xen's pagetable setup constraints for
+ * PAE (multiple L3[3]s cannot point to the same L2 - Xen
+ * will refuse to pin a table setup this way.) and for multiple cpus
+ * to map in different user pmaps on __x86_64__ (see: cpu_load_pmap())
+ *
+ * What this means for us is that the PDIR of the pmap_kernel() is
+ * considered to be a canonical "SHADOW" PDIR with the following
+ * properties:
+ * - Its recursive mapping points to itself
+ * - per-cpu recurseive mappings point to themselves
+ * - per-cpu L4 pages' kernel entries are expected to be in sync with
+ * the shadow
+ * - APDP_PDE_SHADOW accesses the shadow pdir
+ * - APDP_PDE accesses the per-cpu pdir
+ * - alternate mappings are considered per-cpu - however, x86 pmap
+ * currently partially consults the shadow - this works because the
+ * shadow PDE is updated together with the per-cpu entry (see:
+ * xen_pmap.c: pmap_map_ptes(), and the pmap is locked while the
+ * alternate ptes are mapped in.
*/
void
@@ -1315,7 +1338,7 @@
KASSERT(ci != NULL);
-#ifdef PAE
+#if defined(PAE)
ci->ci_pae_l3_pdir = (paddr_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
@@ -1344,26 +1367,32 @@
}
ci->ci_kpm_pdirpa = vtophys((vaddr_t) ci->ci_kpm_pdir);
KASSERT(ci->ci_kpm_pdirpa != 0);
+
+#if defined(__x86_64__)
/*
- * Copy over kernel pmd entries from boot
- * cpu. XXX:locking/races
+ * Copy over the pmap_kernel() shadow L4 entries
*/
- memcpy(ci->ci_kpm_pdir,
- &pmap_kernel()->pm_pdir[PDIR_SLOT_KERN],
- nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
+ memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir, PAGE_SIZE);
+
+ /* Recursive kernel mapping */
+ ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
+#elif defined(PAE)
+ /* Copy over the pmap_kernel() shadow L2 entries that map the kernel */
+ memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN, nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
+#endif /* __x86_64__ else PAE */
/* Xen wants R/O */
pmap_kenter_pa((vaddr_t)ci->ci_kpm_pdir, ci->ci_kpm_pdirpa,
VM_PROT_READ, 0);
-#ifdef PAE
+#if defined(PAE)
/* Initialise L3 entry 3. This mapping is shared across all
* pmaps and is static, ie; loading a new pmap will not update
* this entry.
*/
- ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_V;
+ ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PG_k | PG_V;
/* Mark L3 R/O (Xen wants this) */
pmap_kenter_pa((vaddr_t)ci->ci_pae_l3_pdir, ci->ci_pae_l3_pdirpa,
diff -r abf3ce1815c0 -r b67c077bd522 sys/arch/xen/x86/x86_xpmap.c
--- a/sys/arch/xen/x86/x86_xpmap.c Fri Sep 09 11:38:20 2011 +0000
+++ b/sys/arch/xen/x86/x86_xpmap.c Fri Sep 09 11:53:43 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: x86_xpmap.c,v 1.26.2.8 2011/08/30 12:53:46 cherry Exp $ */
+/* $NetBSD: x86_xpmap.c,v 1.26.2.9 2011/09/09 11:53:43 cherry Exp $ */
/*
* Copyright (c) 2006 Mathieu Ropert <mro%adviseo.fr@localhost>
@@ -69,7 +69,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.26.2.8 2011/08/30 12:53:46 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.26.2.9 2011/09/09 11:53:43 cherry Exp $");
#include "opt_xen.h"
#include "opt_ddb.h"
@@ -975,11 +975,11 @@
#endif
#else /* PAE */
/* recursive entry in higher-level per-cpu PD and pmap_kernel() */
- bt_pgd[PDIR_SLOT_PTE] =
+ bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V;
#ifdef __x86_64__
bt_cpu_pgd[PDIR_SLOT_PTE] =
+ xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V;
#endif /* __x86_64__ */
- xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
__PRINTK(("bt_cpu_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
" entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
bt_pgd[PDIR_SLOT_PTE]));
Home |
Main Index |
Thread Index |
Old Index