Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch Move SVS into x86/svs.c



details:   https://anonhg.NetBSD.org/src/rev/f79294520f2b
branches:  trunk
changeset: 359458:f79294520f2b
user:      maxv <maxv%NetBSD.org@localhost>
date:      Sun Feb 11 09:39:36 2018 +0000

description:
Move SVS into x86/svs.c

diffstat:

 sys/arch/amd64/amd64/machdep.c |  393 +-------------------------------------
 sys/arch/x86/conf/files.x86    |    3 +-
 sys/arch/x86/x86/svs.c         |  426 +++++++++++++++++++++++++++++++++++++++++
 3 files changed, 430 insertions(+), 392 deletions(-)

diffs (truncated from 865 to 300 lines):

diff -r d58ef9f53310 -r f79294520f2b sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c    Sun Feb 11 08:27:18 2018 +0000
+++ b/sys/arch/amd64/amd64/machdep.c    Sun Feb 11 09:39:36 2018 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: machdep.c,v 1.297 2018/02/04 17:03:21 maxv Exp $       */
+/*     $NetBSD: machdep.c,v 1.298 2018/02/11 09:39:36 maxv Exp $       */
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.297 2018/02/04 17:03:21 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.298 2018/02/11 09:39:36 maxv Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -123,7 +123,6 @@
 #include "opt_realmem.h"
 #include "opt_xen.h"
 #include "opt_kaslr.h"
-#include "opt_svs.h"
 #ifndef XEN
 #include "opt_physmem.h"
 #endif
@@ -2236,391 +2235,3 @@
        return true;
 }
 #endif
-
-/* -------------------------------------------------------------------------- */
-
-#ifdef SVS
-/*
- * Separate Virtual Space
- *
- * A per-cpu L4 page is maintained in ci_svs_updirpa. During each context
- * switch to a user pmap, updirpa is populated with the entries of the new
- * pmap, minus what we don't want to have mapped in userland.
- *
- * Note on locking/synchronization here:
- *
- * (a) Touching ci_svs_updir without holding ci_svs_mtx first is *not*
- *     allowed.
- *
- * (b) pm_kernel_cpus contains the set of CPUs that have the pmap loaded
- *     in their CR3 register. It must *not* be replaced by pm_cpus.
- *
- * (c) When a context switch on the current CPU is made from a user LWP
- *     towards a kernel LWP, CR3 is not updated. Therefore, the pmap's
- *     pm_kernel_cpus still contains the current CPU. It implies that the
- *     remote CPUs that execute other threads of the user process we just
- *     left will keep synchronizing us against their changes.
- *
- * List of areas that are removed from userland:
- *     PTE Space         [OK]
- *     Direct Map        [OK]
- *     Remote PCPU Areas [OK]
- *     Kernel Heap       [OK]
- *     Kernel Image      [OK]
- *
- * TODO:
- *
- * (a) The NMI stack is not double-entered. Therefore if we ever receive
- *     an NMI and leave it, the content of the stack will be visible to
- *     userland (via Meltdown). Normally we never leave NMIs, unless a
- *     privileged user launched PMCs. That's unlikely to happen, our PMC
- *     support is pretty minimal.
- *
- * (b) Enable SVS depending on the CPU model, and add a sysctl to disable
- *     it dynamically.
- *
- * (c) Narrow down the entry points: hide the 'jmp handler' instructions.
- *     This makes sense on GENERIC_KASLR kernels.
- *
- * (d) Right now there is only one global LDT, and that's not compatible
- *     with USER_LDT.
- */
-
-struct svs_utls {
-       paddr_t kpdirpa;
-       uint64_t scratch;
-       vaddr_t rsp0;
-};
-
-static pd_entry_t *
-svs_tree_add(struct cpu_info *ci, vaddr_t va)
-{
-       extern const vaddr_t ptp_masks[];
-       extern const int ptp_shifts[];
-       extern const long nbpd[];
-       pd_entry_t *dstpde;
-       size_t i, pidx, mod;
-       struct vm_page *pg;
-       paddr_t pa;
-
-       dstpde = ci->ci_svs_updir;
-       mod = (size_t)-1;
-
-       for (i = PTP_LEVELS; i > 1; i--) {
-               pidx = pl_i(va % mod, i);
-
-               if (!pmap_valid_entry(dstpde[pidx])) {
-                       pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
-                       if (pg == 0)
-                               panic("%s: failed to allocate PA for CPU %d\n",
-                                       __func__, cpu_index(ci));
-                       pa = VM_PAGE_TO_PHYS(pg);
-
-                       dstpde[pidx] = PG_V | PG_RW | pa;
-               }
-
-               pa = (paddr_t)(dstpde[pidx] & PG_FRAME);
-               dstpde = (pd_entry_t *)PMAP_DIRECT_MAP(pa);
-               mod = nbpd[i-1];
-       }
-
-       return dstpde;
-}
-
-static void
-svs_page_add(struct cpu_info *ci, vaddr_t va)
-{
-       pd_entry_t *srcpde, *dstpde, pde;
-       size_t idx, pidx;
-       paddr_t pa;
-
-       /* Create levels L4, L3 and L2. */
-       dstpde = svs_tree_add(ci, va);
-
-       pidx = pl1_i(va % NBPD_L2);
-
-       /*
-        * If 'va' is in a large page, we need to compute its physical
-        * address manually.
-        */
-       idx = pl2_i(va);
-       srcpde = L2_BASE;
-       if (!pmap_valid_entry(srcpde[idx])) {
-               panic("%s: L2 page not mapped", __func__);
-       }
-       if (srcpde[idx] & PG_PS) {
-               pa = srcpde[idx] & PG_2MFRAME;
-               pa += (paddr_t)(va % NBPD_L2);
-               pde = (srcpde[idx] & ~(PG_PS|PG_2MFRAME)) | pa;
-
-               if (pmap_valid_entry(dstpde[pidx])) {
-                       panic("%s: L1 page already mapped", __func__);
-               }
-               dstpde[pidx] = pde;
-               return;
-       }
-
-       /*
-        * Normal page, just copy the PDE.
-        */
-       idx = pl1_i(va);
-       srcpde = L1_BASE;
-       if (!pmap_valid_entry(srcpde[idx])) {
-               panic("%s: L1 page not mapped", __func__);
-       }
-       if (pmap_valid_entry(dstpde[pidx])) {
-               panic("%s: L1 page already mapped", __func__);
-       }
-       dstpde[pidx] = srcpde[idx];
-}
-
-static void
-svs_rsp0_init(struct cpu_info *ci)
-{
-       const cpuid_t cid = cpu_index(ci);
-       vaddr_t va, rsp0;
-       pd_entry_t *pd;
-       size_t pidx;
-
-       rsp0 = (vaddr_t)&pcpuarea->ent[cid].rsp0;
-
-       /* The first page is a redzone. */
-       va = rsp0 + PAGE_SIZE;
-
-       /* Create levels L4, L3 and L2. */
-       pd = svs_tree_add(ci, va);
-
-       /* Get the info for L1. */
-       pidx = pl1_i(va % NBPD_L2);
-       if (pmap_valid_entry(pd[pidx])) {
-               panic("%s: rsp0 page already mapped", __func__);
-       }
-
-       ci->ci_svs_rsp0_pte = (pt_entry_t *)&pd[pidx];
-       ci->ci_svs_rsp0 = rsp0 + PAGE_SIZE + sizeof(struct trapframe);
-       ci->ci_svs_ursp0 = ci->ci_svs_rsp0 - sizeof(struct trapframe);
-       ci->ci_svs_krsp0 = 0;
-}
-
-static void
-svs_utls_init(struct cpu_info *ci)
-{
-       const vaddr_t utlsva = (vaddr_t)&pcpuarea->utls;
-       struct svs_utls *utls;
-       struct vm_page *pg;
-       pd_entry_t *pd;
-       size_t pidx;
-       paddr_t pa;
-       vaddr_t va;
-
-       /* Create levels L4, L3 and L2. */
-       pd = svs_tree_add(ci, utlsva);
-
-       /* Allocate L1. */
-       pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
-       if (pg == 0)
-               panic("%s: failed to allocate PA for CPU %d\n", __func__,
-                   cpu_index(ci));
-       pa = VM_PAGE_TO_PHYS(pg);
-
-       /* Enter L1. */
-       if (pmap_valid_entry(L1_BASE[pl1_i(utlsva)])) {
-               panic("%s: local page already mapped", __func__);
-       }
-       pidx = pl1_i(utlsva % NBPD_L2);
-       if (pmap_valid_entry(pd[pidx])) {
-               panic("%s: L1 page already mapped", __func__);
-       }
-       pd[pidx] = PG_V | PG_RW | pmap_pg_nx | pa;
-
-       /*
-        * Now, allocate a VA in the kernel map, that points to the UTLS
-        * page.
-        */
-       va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
-           UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
-       if (va == 0) {
-               panic("%s: unable to allocate VA\n", __func__);
-       }
-       pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, 0);
-       pmap_update(pmap_kernel());
-
-       ci->ci_svs_utls = va;
-
-       /* Initialize the constant fields of the UTLS page */
-       utls = (struct svs_utls *)ci->ci_svs_utls;
-       utls->rsp0 = ci->ci_svs_rsp0;
-}
-
-static void
-svs_range_add(struct cpu_info *ci, vaddr_t va, size_t size)
-{
-       size_t i, n;
-
-       KASSERT(size % PAGE_SIZE == 0);
-       n = size / PAGE_SIZE;
-       for (i = 0; i < n; i++) {
-               svs_page_add(ci, va + i * PAGE_SIZE);
-       }
-}
-
-void
-cpu_svs_init(struct cpu_info *ci)
-{
-       extern char __text_user_start;
-       extern char __text_user_end;
-       const cpuid_t cid = cpu_index(ci);
-       struct vm_page *pg;
-
-       KASSERT(ci != NULL);
-
-       pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
-       if (pg == 0)
-               panic("%s: failed to allocate L4 PA for CPU %d\n",
-                       __func__, cpu_index(ci));
-       ci->ci_svs_updirpa = VM_PAGE_TO_PHYS(pg);
-
-       ci->ci_svs_updir = (pt_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
-               UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
-       if (ci->ci_svs_updir == NULL)
-               panic("%s: failed to allocate L4 VA for CPU %d\n",
-                       __func__, cpu_index(ci));
-
-       pmap_kenter_pa((vaddr_t)ci->ci_svs_updir, ci->ci_svs_updirpa,
-               VM_PROT_READ | VM_PROT_WRITE, 0);
-
-       pmap_update(pmap_kernel());
-
-       ci->ci_svs_kpdirpa = pmap_pdirpa(pmap_kernel(), 0);
-
-       mutex_init(&ci->ci_svs_mtx, MUTEX_DEFAULT, IPL_VM);
-
-       svs_page_add(ci, (vaddr_t)&pcpuarea->idt);
-       svs_page_add(ci, (vaddr_t)&pcpuarea->ldt);
-       svs_range_add(ci, (vaddr_t)&pcpuarea->ent[cid],
-           offsetof(struct pcpu_entry, rsp0));
-       svs_range_add(ci, (vaddr_t)&__text_user_start,
-           (vaddr_t)&__text_user_end - (vaddr_t)&__text_user_start);
-
-       svs_rsp0_init(ci);
-       svs_utls_init(ci);
-}
-



Home | Main Index | Thread Index | Old Index