Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/alpha Rather than tracking "needs I-sync on return ...



details:   https://anonhg.NetBSD.org/src/rev/129a3acc3659
branches:  trunk
changeset: 379367:129a3acc3659
user:      thorpej <thorpej%NetBSD.org@localhost>
date:      Sat May 29 22:14:09 2021 +0000

description:
Rather than tracking "needs I-sync on return to userspace" in a bitmap,
track it with a separate field in the pmap_percpu.  Not only does this
reduce cache line contention ever so slightly, it also frees up a field
in the shared portion of the pmap structure.

diffstat:

 sys/arch/alpha/alpha/pmap.c   |  14 +++++++-------
 sys/arch/alpha/include/pmap.h |  16 +++++++++-------
 2 files changed, 16 insertions(+), 14 deletions(-)

diffs (108 lines):

diff -r 6fa3cf219165 -r 129a3acc3659 sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c       Sat May 29 21:54:50 2021 +0000
+++ b/sys/arch/alpha/alpha/pmap.c       Sat May 29 22:14:09 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.280 2021/05/29 22:14:09 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
@@ -135,7 +135,7 @@
 
 #include <sys/cdefs.h>                 /* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.280 2021/05/29 22:14:09 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -862,8 +862,6 @@ pmap_tlb_invalidate(const struct pmap_tl
        pmap_t const pmap = tlbctx->t_pmap;
        KASSERT(pmap != NULL);
 
-       const u_long cpu_mask = 1UL << ci->ci_cpuid;
-
        if (__predict_false(pmap != ci->ci_pmap)) {
                TLB_COUNT(invalidate_user_not_current);
 
@@ -875,6 +873,8 @@ pmap_tlb_invalidate(const struct pmap_tl
                        return;
                }
 
+               const u_long cpu_mask = 1UL << ci->ci_cpuid;
+
                /*
                 * We cannot directly invalidate the TLB in this case,
                 * so force allocation of a new ASN when the pmap becomes
@@ -890,14 +890,14 @@ pmap_tlb_invalidate(const struct pmap_tl
                 * of accounting for internal consistency.
                 */
                if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) {
-                       atomic_or_ulong(&pmap->pm_needisync, cpu_mask);
+                       pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1;
                }
                return;
        }
 
        if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) {
                TLB_COUNT(invalidate_user_lazy_imb);
-               atomic_or_ulong(&pmap->pm_needisync, cpu_mask);
+               pmap->pm_percpu[ci->ci_cpuid].pmc_needisync = 1;
        }
 
        if (count == TLB_CTX_ALLVA) {
@@ -3882,7 +3882,7 @@ pmap_asn_alloc(pmap_t const pmap, struct
         * We have a new ASN, so we can skip any pending I-stream sync
         * on the way back out to user space.
         */
-       atomic_and_ulong(&pmap->pm_needisync, ~(1UL << ci->ci_cpuid));
+       pmc->pmc_needisync = 0;
 
 #ifdef DEBUG
        if (pmapdebug & PDB_ASN)
diff -r 6fa3cf219165 -r 129a3acc3659 sys/arch/alpha/include/pmap.h
--- a/sys/arch/alpha/include/pmap.h     Sat May 29 21:54:50 2021 +0000
+++ b/sys/arch/alpha/include/pmap.h     Sat May 29 22:14:09 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.86 2021/05/29 21:54:51 thorpej Exp $ */
+/* $NetBSD: pmap.h,v 1.87 2021/05/29 22:14:09 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
@@ -135,17 +135,19 @@ struct pmap_percpu {
        unsigned int            pmc_asn;        /* address space number */
        unsigned int            pmc_pad0;
        unsigned long           pmc_asngen;     /* ASN generation number */
-       unsigned long           pmc_padN[(COHERENCY_UNIT / 8) - 2];
+       unsigned int            pmc_needisync;  /* CPU needes isync */
+       unsigned int            pmc_pad1;
+       unsigned long           pmc_padN[(COHERENCY_UNIT / 8) - 3];
 };
 
 struct pmap {  /* pmaps are aligned to COHERENCY_UNIT boundaries */
                /* pmaps are locked by hashed mutexes */
        pt_entry_t              *pm_lev1map;    /* [ 0] level 1 map */
        unsigned long           pm_cpus;        /* [ 8] CPUs using pmap */
-       unsigned long           pm_needisync;   /* [16] CPUs needing isync */
+       unsigned long           __pm_spare0;    /* [16] spare field */
        struct pmap_statistics  pm_stats;       /* [32] statistics */
        unsigned int            pm_count;       /* [40] reference count */
-       unsigned int            __pm_spare;     /* [44] spare field */
+       unsigned int            __pm_spare1;    /* [44] spare field */
        TAILQ_ENTRY(pmap)       pm_list;        /* [48] list of all pmaps */
        /* -- COHERENCY_UNIT boundary -- */
        struct pmap_percpu      pm_percpu[];    /* [64] per-CPU data */
@@ -326,10 +328,10 @@ pmap_l3pte(pmap_t pmap, vaddr_t v, pt_en
  */
 #define        PMAP_USERRET(pmap)                                              \
 do {                                                                   \
-       u_long cpu_mask = (1UL << cpu_number());                        \
+       const unsigned long cpu_id = cpu_number();                      \
                                                                        \
-       if ((pmap)->pm_needisync & cpu_mask) {                          \
-               atomic_and_ulong(&(pmap)->pm_needisync, ~cpu_mask);     \
+       if ((pmap)->pm_percpu[cpu_id].pmc_needisync) {                  \
+               (pmap)->pm_percpu[cpu_id].pmc_needisync = 0;            \
                alpha_pal_imb();                                        \
        }                                                               \
 } while (0)



Home | Main Index | Thread Index | Old Index