Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/rmind-uvmplock]: src/sys/arch/hppa Drop per-page locking i.e. pvh_lock a...



details:   https://anonhg.NetBSD.org/src/rev/d5d49d47193f
branches:  rmind-uvmplock
changeset: 753077:d5d49d47193f
user:      skrll <skrll%NetBSD.org@localhost>
date:      Wed Mar 09 19:13:18 2011 +0000

description:
Drop per-page locking i.e. pvh_lock and rely on locking provided by
upper layer, UVM.  Sprinkle asserts.

diffstat:

 sys/arch/hppa/hppa/pmap.c    |  55 +++++++------------------------------------
 sys/arch/hppa/include/pmap.h |   4 +--
 2 files changed, 10 insertions(+), 49 deletions(-)

diffs (226 lines):

diff -r 88bc12c6b6aa -r d5d49d47193f sys/arch/hppa/hppa/pmap.c
--- a/sys/arch/hppa/hppa/pmap.c Tue Mar 08 23:41:09 2011 +0000
+++ b/sys/arch/hppa/hppa/pmap.c Wed Mar 09 19:13:18 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.71.2.4 2011/03/05 20:50:36 rmind Exp $      */
+/*     $NetBSD: pmap.c,v 1.71.2.5 2011/03/09 19:13:18 skrll Exp $      */
 
 /*-
  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.71.2.4 2011/03/05 20:50:36 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.71.2.5 2011/03/09 19:13:18 skrll Exp $");
 
 #include "opt_cputype.h"
 
@@ -504,13 +504,11 @@
 
        pg = PHYS_TO_VM_PAGE(pa);
        md = VM_PAGE_TO_MD(pg);
-       mutex_enter(&md->pvh_lock);
        db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
            md->pvh_aliases);
        for (pve = md->pvh_list; pve; pve = pve->pv_next)
                db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
                    pve->pv_va & PV_VAMASK);
-       mutex_exit(&md->pvh_lock);
 }
 #endif
 
@@ -576,7 +574,7 @@
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
            __func__, pg, pve, pm, va, pdep, flags));
 
-       KASSERT(mutex_owned(&md->pvh_lock));
+       KASSERT(pm == pmap_kernel() || uvm_page_locked_p(pg));
 
        pve->pv_pmap = pm;
        pve->pv_va = va | flags;
@@ -591,7 +589,7 @@
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry **pve, *pv;
 
-       KASSERT(mutex_owned(&md->pvh_lock));
+       KASSERT(pmap == pmap_kernel() || uvm_page_locked_p(pg));
 
        for (pv = *(pve = &md->pvh_list);
            pv; pv = *(pve = &(*pve)->pv_next))
@@ -1216,11 +1214,10 @@
                }
 
                pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
+               pve = pmap_pv_remove(pg, pmap, va);
+
                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-               mutex_enter(&md->pvh_lock);
-               pve = pmap_pv_remove(pg, pmap, va);
                md->pvh_attrs |= pmap_pvh_attrs(pte);
-               mutex_exit(&md->pvh_lock);
        } else {
                DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
                    __func__, va, pa));
@@ -1234,22 +1231,18 @@
        }
 
        if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
-               struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-
                if (!pve && !(pve = pmap_pv_alloc())) {
                        if (flags & PMAP_CANFAIL) {
-                               mutex_exit(&md->pvh_lock);
                                PMAP_UNLOCK(pmap);
                                return (ENOMEM);
                        }
                        panic("%s: no pv entries available", __func__);
                }
                 pte |= PTE_PROT(pmap_prot(pmap, prot));
-               mutex_enter(&md->pvh_lock);
                if (pmap_check_alias(pg, va, pte))
-                       pmap_page_remove_locked(pg);
+                       pmap_page_remove(pg);
                pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
-               mutex_exit(&md->pvh_lock);
+
        } else if (pve) {
                pmap_pv_free(pve);
        }
@@ -1318,13 +1311,10 @@
                            (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
                                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 
-                               mutex_enter(&md->pvh_lock);
 
                                pve = pmap_pv_remove(pg, pmap, sva);
                                md->pvh_attrs |= pmap_pvh_attrs(pte);
 
-                               mutex_exit(&md->pvh_lock);
-
                                if (pve != NULL)
                                        pmap_pv_free(pve);
                        }
@@ -1374,9 +1364,7 @@
 
                        pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
                        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-                       mutex_enter(&md->pvh_lock);
                        md->pvh_attrs |= pmap_pvh_attrs(pte);
-                       mutex_exit(&md->pvh_lock);
 
                        pmap_pte_flush(pmap, sva, pte);
                        pte &= ~PTE_PROT(TLB_AR_MASK);
@@ -1392,16 +1380,6 @@
 pmap_page_remove(struct vm_page *pg)
 {
        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-
-       mutex_enter(&md->pvh_lock);
-       pmap_page_remove_locked(pg);
-       mutex_exit(&md->pvh_lock);
-}
-
-void
-pmap_page_remove_locked(struct vm_page *pg)
-{
-       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
        struct pv_entry *pve, *npve, **pvp;
 
        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
@@ -1496,8 +1474,6 @@
        KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
        KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
 
-       mutex_enter(&md->pvh_lock);
-
        /* preserve other bits */
        res = md->pvh_attrs & (set | clear);
        md->pvh_attrs ^= res;
@@ -1532,7 +1508,6 @@
                        }
                }
        }
-       mutex_exit(&md->pvh_lock);
 
        return ((res & (clear | set)) != 0);
 }
@@ -1547,8 +1522,6 @@
 
        DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit));
 
-       mutex_enter(&md->pvh_lock);
-
        for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
            pve = pve->pv_next) {
                pmap_t pm = pve->pv_pmap;
@@ -1560,7 +1533,6 @@
                md->pvh_attrs |= pmap_pvh_attrs(pte);
        }
        ret = ((md->pvh_attrs & bit) != 0);
-       mutex_exit(&md->pvh_lock);
 
        return ret;
 }
@@ -1736,8 +1708,6 @@
 
                pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
                if (pg != NULL) {
-                       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-
                        KASSERT(pa < HPPA_IOBEGIN);
 
                        struct pv_entry *pve;
@@ -1750,12 +1720,10 @@
                            ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
                            va, pa, pte));
 
-                       mutex_enter(&md->pvh_lock);
                        if (pmap_check_alias(pg, va, pte))
-                               pmap_page_remove_locked(pg);
+                               pmap_page_remove(pg);
                        pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
                            PV_KENTER);
-                       mutex_exit(&md->pvh_lock);
                }
        }
        pmap_pte_set(pde, va, pte);
@@ -1824,13 +1792,8 @@
                pmap_pte_flush(pmap, va, pte);
                pmap_pte_set(pde, va, 0);
                if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
-                       struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-
-                       mutex_enter(&md->pvh_lock);
-
                        pve = pmap_pv_remove(pg, pmap, va);
 
-                       mutex_exit(&md->pvh_lock);
                        if (pve != NULL)
                                pmap_pv_free(pve);
                }
diff -r 88bc12c6b6aa -r d5d49d47193f sys/arch/hppa/include/pmap.h
--- a/sys/arch/hppa/include/pmap.h      Tue Mar 08 23:41:09 2011 +0000
+++ b/sys/arch/hppa/include/pmap.h      Wed Mar 09 19:13:18 2011 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.h,v 1.25.2.4 2011/03/05 20:50:37 rmind Exp $      */
+/*     $NetBSD: pmap.h,v 1.25.2.5 2011/03/09 19:13:19 skrll Exp $      */
 
 /*     $OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $       */
 
@@ -198,7 +198,6 @@
 struct pv_entry;
 
 struct vm_page_md {
-       struct kmutex   pvh_lock;       /* locks every pv on this list */
        struct pv_entry *pvh_list;      /* head of list (locked by pvh_lock) */
        u_int           pvh_attrs;      /* to preserve ref/mod */
        int             pvh_aliases;    /* alias counting */
@@ -206,7 +205,6 @@
 
 #define        VM_MDPAGE_INIT(pg) \
 do {                                                                   \
-       mutex_init(&(pg)->mdpage.pvh_lock, MUTEX_NODEBUG, IPL_VM);      \
        (pg)->mdpage.pvh_list = NULL;                                   \
        (pg)->mdpage.pvh_attrs = 0;                                     \
        (pg)->mdpage.pvh_aliases = 0;                                   \



Home | Main Index | Thread Index | Old Index