Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/arm Make the ARM pmap use ASIDs, split TTBRs, and M...



details:   https://anonhg.NetBSD.org/src/rev/becf8b879579
branches:  trunk
changeset: 795041:becf8b879579
user:      matt <matt%NetBSD.org@localhost>
date:      Sun Mar 30 15:50:51 2014 +0000

description:
Make the ARM pmap use ASIDs, split TTBRs, and MP safe.  This only happens for
ARMv6 or later CPUs.  This means that on context switch that the TLBs and
caches no longer to cleaned/flushed.  Also, eXecute Never (XN) protection has
been added so non-exec pages can not be run.  Change the page size for ARMv6+
to be 8KB while allows a L1PT to be a normal page.  This means that the L1PT
is not special.  Use the XN support to only sync pages that are executed from.

diffstat:

 sys/arch/arm/arm32/pmap.c          |  2316 ++++++++++++++++++++++++------------
 sys/arch/arm/conf/files.arm        |    71 +-
 sys/arch/arm/include/arm32/param.h |    21 +-
 sys/arch/arm/include/arm32/pmap.h  |    17 +-
 sys/arch/arm/include/cpuconf.h     |    11 +-
 5 files changed, 1613 insertions(+), 823 deletions(-)

diffs (truncated from 4936 to 300 lines):

diff -r 3bf41b589f8c -r becf8b879579 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sun Mar 30 15:26:15 2014 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sun Mar 30 15:50:51 2014 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.270 2014/02/27 18:10:01 joerg Exp $ */
+/*     $NetBSD: pmap.c,v 1.271 2014/03/30 15:50:51 matt Exp $  */
 
 /*
  * Copyright 2003 Wasabi Systems, Inc.
@@ -192,25 +192,33 @@
 #include "opt_lockdebug.h"
 #include "opt_multiprocessor.h"
 
+#ifdef MULTIPROCESSOR
+#define _INTR_PRIVATE
+#endif
+
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/kernel.h>
 #include <sys/systm.h>
 #include <sys/proc.h>
+#include <sys/intr.h>
 #include <sys/pool.h>
 #include <sys/kmem.h>
 #include <sys/cdefs.h>
 #include <sys/cpu.h>
 #include <sys/sysctl.h>
 #include <sys/bus.h>
+#include <sys/atomic.h>
+#include <sys/kernhist.h>
 
 #include <uvm/uvm.h>
 
 #include <arm/locore.h>
-#include <arm/arm32/katelib.h>
-
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.270 2014/02/27 18:10:01 joerg Exp $");
-
+//#include <arm/arm32/katelib.h>
+
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.271 2014/03/30 15:50:51 matt Exp $");
+
+//#define PMAP_DEBUG
 #ifdef PMAP_DEBUG
 
 /* XXX need to get rid of all refs to this */
@@ -240,7 +248,7 @@
 #define        PDB_EXEC        0x80000
 
 int debugmap = 1;
-int pmapdebug = 0; 
+int pmapdebug = 0;
 #define        NPDEBUG(_lev_,_stat_) \
        if (pmapdebug & (_lev_)) \
                ((_stat_))
@@ -252,8 +260,16 @@
 /*
  * pmap_kernel() points here
  */
-static struct pmap     kernel_pmap_store;
-struct pmap            *const kernel_pmap_ptr = &kernel_pmap_store;
+static struct pmap     kernel_pmap_store = {
+#ifndef ARM_MMU_EXTENDED
+       .pm_activated = true,
+       .pm_domain = PMAP_DOMAIN_KERNEL,
+       .pm_cstate.cs_all = PMAP_CACHE_STATE_ALL,
+#endif
+};
+struct pmap * const    kernel_pmap_ptr = &kernel_pmap_store;
+#undef pmap_kernel
+#define pmap_kernel()  (&kernel_pmap_store)
 #ifdef PMAP_NEED_ALLOC_POOLPAGE
 int                    arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
 #endif
@@ -297,7 +313,7 @@
 #define        PMAP_EVCNT_INITIALIZER(name) \
        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
 
-#ifdef PMAP_CACHE_VIPT
+#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
 static struct evcnt pmap_ev_vac_clean_one =
    PMAP_EVCNT_INITIALIZER("clean page (1 color)");
 static struct evcnt pmap_ev_vac_flush_one =
@@ -374,6 +390,27 @@
 EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings);
 EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings);
 
+static struct evcnt pmap_ev_fixup_mod =
+   PMAP_EVCNT_INITIALIZER("page modification emulations");
+static struct evcnt pmap_ev_fixup_ref =
+   PMAP_EVCNT_INITIALIZER("page reference emulations");
+static struct evcnt pmap_ev_fixup_exec =
+   PMAP_EVCNT_INITIALIZER("exec pages fixed up");
+static struct evcnt pmap_ev_fixup_pdes =
+   PMAP_EVCNT_INITIALIZER("pdes fixed up");
+#ifndef ARM_MMU_EXTENDED
+static struct evcnt pmap_ev_fixup_ptesync =
+   PMAP_EVCNT_INITIALIZER("ptesync fixed");
+#endif
+
+EVCNT_ATTACH_STATIC(pmap_ev_fixup_mod);
+EVCNT_ATTACH_STATIC(pmap_ev_fixup_ref);
+EVCNT_ATTACH_STATIC(pmap_ev_fixup_exec);
+EVCNT_ATTACH_STATIC(pmap_ev_fixup_pdes);
+#ifndef ARM_MMU_EXTENDED
+EVCNT_ATTACH_STATIC(pmap_ev_fixup_ptesync);
+#endif
+
 #ifdef PMAP_CACHE_VIPT
 static struct evcnt pmap_ev_exec_mappings =
    PMAP_EVCNT_INITIALIZER("exec pages mapped");
@@ -385,6 +422,7 @@
 
 static struct evcnt pmap_ev_exec_synced =
    PMAP_EVCNT_INITIALIZER("exec pages synced");
+#ifndef ARM_MMU_EXTENDED
 static struct evcnt pmap_ev_exec_synced_map =
    PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
 static struct evcnt pmap_ev_exec_synced_unmap =
@@ -395,13 +433,16 @@
    PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
 static struct evcnt pmap_ev_exec_synced_kremove =
    PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
+#endif
 
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced);
+#ifndef ARM_MMU_EXTENDED
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove);
+#endif
 
 static struct evcnt pmap_ev_exec_discarded_unmap =
    PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
@@ -415,6 +456,10 @@
    PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
 static struct evcnt pmap_ev_exec_discarded_kremove =
    PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
+#ifdef ARM_MMU_EXTENDED
+static struct evcnt pmap_ev_exec_discarded_modfixup =
+   PMAP_EVCNT_INITIALIZER("exec pages discarded (MF)");
+#endif
 
 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero);
@@ -422,6 +467,9 @@
 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit);
 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove);
+#ifdef ARM_MMU_EXTENDED
+EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_modfixup);
+#endif
 #endif /* PMAP_CACHE_VIPT */
 
 static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates");
@@ -442,9 +490,21 @@
  */
 static pt_entry_t *csrc_pte, *cdst_pte;
 static vaddr_t csrcp, cdstp;
-vaddr_t memhook;                       /* used by mem.c */
-kmutex_t memlock;                      /* used by mem.c */
-void *zeropage;                                /* used by mem.c */
+#ifdef MULTIPROCESSOR
+static size_t cnptes;
+#define        cpu_csrc_pte(o) (csrc_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT))
+#define        cpu_cdst_pte(o) (cdst_pte + cnptes * cpu_number() + ((o) >> L2_S_SHIFT))
+#define        cpu_csrcp(o)    (csrcp + L2_S_SIZE * cnptes * cpu_number() + (o))
+#define        cpu_cdstp(o)    (cdstp + L2_S_SIZE * cnptes * cpu_number() + (o))
+#else
+#define        cpu_csrc_pte(o) (csrc_pte + ((o) >> L2_S_SHIFT))
+#define        cpu_cdst_pte(o) (cdst_pte + ((o) >> L2_S_SHIFT))
+#define        cpu_csrcp(o)    (csrcp + (o))
+#define        cpu_cdstp(o)    (cdstp + (o))
+#endif
+vaddr_t memhook;                       /* used by mem.c & others */
+kmutex_t memlock __cacheline_aligned;  /* used by mem.c & others */
+kmutex_t pmap_lock __cacheline_aligned;
 extern void *msgbufaddr;
 int pmap_kmpages;
 /*
@@ -456,22 +516,55 @@
  * Misc. locking data structures
  */
 
-#define        pmap_acquire_pmap_lock(pm)                      \
-       do {                                            \
-               if ((pm) != pmap_kernel())              \
-                       mutex_enter((pm)->pm_lock);     \
-       } while (/*CONSTCOND*/0)
-
-#define        pmap_release_pmap_lock(pm)                      \
-       do {                                            \
-               if ((pm) != pmap_kernel())              \
-                       mutex_exit((pm)->pm_lock);      \
-       } while (/*CONSTCOND*/0)
+static inline void
+pmap_acquire_pmap_lock(pmap_t pm)
+{
+       if (pm == pmap_kernel()) {
+#ifdef MULTIPROCESSOR
+               KERNEL_LOCK(1, NULL);
+#endif
+       } else {
+               mutex_enter(pm->pm_lock);
+       }
+}
+
+static inline void
+pmap_release_pmap_lock(pmap_t pm)
+{
+       if (pm == pmap_kernel()) {
+#ifdef MULTIPROCESSOR
+               KERNEL_UNLOCK_ONE(NULL);
+#endif
+       } else {
+               mutex_exit(pm->pm_lock);
+       }
+}
+
+static inline void
+pmap_acquire_page_lock(struct vm_page_md *md)
+{
+       mutex_enter(&pmap_lock);
+}
+
+static inline void
+pmap_release_page_lock(struct vm_page_md *md)
+{
+       mutex_exit(&pmap_lock);
+}
+
+#ifdef DIAGNOSTIC
+static inline int
+pmap_page_locked_p(struct vm_page_md *md)
+{
+       return mutex_owned(&pmap_lock);
+}
+#endif
 
 
 /*
  * Metadata for L1 translation tables.
  */
+#ifndef ARM_MMU_EXTENDED
 struct l1_ttable {
        /* Entry on the L1 Table list */
        SLIST_ENTRY(l1_ttable) l1_link;
@@ -498,15 +591,6 @@
 };
 
 /*
- * Convert a virtual address into its L1 table index. That is, the
- * index used to locate the L2 descriptor table pointer in an L1 table.
- * This is basically used to index l1->l1_kva[].
- *
- * Each L2 descriptor table represents 1MB of VA space.
- */
-#define        L1_IDX(va)              (((vaddr_t)(va)) >> L1_S_SHIFT)
-
-/*
  * L1 Page Tables are tracked using a Least Recently Used list.
  *  - New L1s are allocated from the HEAD.
  *  - Freed L1s are added to the TAIl.
@@ -520,6 +604,7 @@
  * A list of all L1 tables
  */
 static SLIST_HEAD(, l1_ttable) l1_list;
+#endif /* ARM_MMU_EXTENDED */
 
 /*
  * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
@@ -535,8 +620,8 @@
        /* List of L2 page descriptors */
        struct l2_bucket {
                pt_entry_t *l2b_kva;    /* KVA of L2 Descriptor Table */
-               paddr_t l2b_phys;       /* Physical address of same */
-               u_short l2b_l1idx;      /* This L2 table's L1 index */
+               paddr_t l2b_pa;         /* Physical address of same */
+               u_short l2b_l1slot;     /* This L2 table's L1 index */
                u_short l2b_occupancy;  /* How many active descriptors */
        } l2_bucket[L2_BUCKET_SIZE];
 };
@@ -545,15 +630,20 @@
  * Given an L1 table index, calculate the corresponding l2_dtable index
  * and bucket index within the l2_dtable.
  */
-#define        L2_IDX(l1idx)           (((l1idx) >> L2_BUCKET_LOG2) & \
-                                (L2_SIZE - 1))
-#define        L2_BUCKET(l1idx)        ((l1idx) & (L2_BUCKET_SIZE - 1))
+#define L2_BUCKET_XSHIFT       (L2_BUCKET_XLOG2 - L1_S_SHIFT)
+#define L2_BUCKET_XFRAME       (~(vaddr_t)0 << L2_BUCKET_XLOG2)
+#define L2_BUCKET_IDX(l1slot)  ((l1slot) >> L2_BUCKET_XSHIFT)
+#define L2_IDX(l1slot)         (L2_BUCKET_IDX(l1slot) >> L2_BUCKET_LOG2)
+#define L2_BUCKET(l1slot)      (L2_BUCKET_IDX(l1slot) & (L2_BUCKET_SIZE - 1))
+
+__CTASSERT(0x100000000ULL == ((uint64_t)L2_SIZE * L2_BUCKET_SIZE * L1_S_SIZE));
+__CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCKET_XSIZE-1));



Home | Main Index | Thread Index | Old Index