Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Don't use simple locks.
details: https://anonhg.NetBSD.org/src/rev/50d2df00ab40
branches: trunk
changeset: 773090:50d2df00ab40
user: matt <matt%NetBSD.org@localhost>
date: Sat Jan 28 00:11:46 2012 +0000
description:
Don't use simple locks.
diffstat:
sys/arch/arm/arm32/pmap.c | 138 ++++++++++++---------------------------------
1 files changed, 38 insertions(+), 100 deletions(-)
diffs (truncated from 607 to 300 lines):
diff -r a7089a46d00b -r 50d2df00ab40 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sat Jan 28 00:00:06 2012 +0000
+++ b/sys/arch/arm/arm32/pmap.c Sat Jan 28 00:11:46 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.225 2012/01/27 19:48:38 para Exp $ */
+/* $NetBSD: pmap.c,v 1.226 2012/01/28 00:11:46 matt Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -211,7 +211,7 @@
#include <machine/param.h>
#include <arm/arm32/katelib.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.225 2012/01/27 19:48:38 para Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.226 2012/01/28 00:11:46 matt Exp $");
#ifdef PMAP_DEBUG
@@ -467,24 +467,6 @@
* Misc. locking data structures
*/
-#if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
-static struct lock pmap_main_lock;
-
-#define PMAP_MAP_TO_HEAD_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
-#define PMAP_MAP_TO_HEAD_UNLOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
-#define PMAP_HEAD_TO_MAP_LOCK() \
- (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
-#define PMAP_HEAD_TO_MAP_UNLOCK() \
- spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
-#else
-#define PMAP_MAP_TO_HEAD_LOCK() /* null */
-#define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
-#define PMAP_HEAD_TO_MAP_LOCK() /* null */
-#define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
-#endif
-
#define pmap_acquire_pmap_lock(pm) \
do { \
if ((pm) != pmap_kernel()) \
@@ -543,7 +525,7 @@
* the userland pmaps which owns this L1) are moved to the TAIL.
*/
static TAILQ_HEAD(, l1_ttable) l1_lru_list;
-static struct simplelock l1_lru_lock;
+static kmutex_t l1_lru_lock __cacheline_aligned;
/*
* A list of all L1 tables
@@ -871,7 +853,6 @@
pv->pv_va = va;
pv->pv_flags = flags;
- simple_lock(&md->pvh_slock); /* lock vm_page */
pvp = &SLIST_FIRST(&md->pvh_list);
#ifdef PMAP_CACHE_VIPT
/*
@@ -924,7 +905,6 @@
#endif
PMAPCOUNT(mappings);
- simple_unlock(&md->pvh_slock); /* unlock, done! */
if (pv->pv_flags & PVF_WIRED)
++pm->pm_stats.wired_count;
@@ -1144,7 +1124,7 @@
/*
* Remove the L1 at the head of the LRU list
*/
- simple_lock(&l1_lru_lock);
+ mutex_spin_enter(&l1_lru_lock);
l1 = TAILQ_FIRST(&l1_lru_list);
KDASSERT(l1 != NULL);
TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
@@ -1163,7 +1143,7 @@
if (++l1->l1_domain_use_count < PMAP_DOMAINS)
TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
- simple_unlock(&l1_lru_lock);
+ mutex_spin_exit(&l1_lru_lock);
/*
* Fix up the relevant bits in the pmap structure
@@ -1181,7 +1161,7 @@
{
struct l1_ttable *l1 = pm->pm_l1;
- simple_lock(&l1_lru_lock);
+ mutex_spin_enter(&l1_lru_lock);
/*
* If this L1 is currently on the LRU list, remove it.
@@ -1207,7 +1187,7 @@
else
TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
- simple_unlock(&l1_lru_lock);
+ mutex_spin_exit(&l1_lru_lock);
}
static inline void
@@ -1231,13 +1211,13 @@
if (l1->l1_domain_use_count == PMAP_DOMAINS)
return;
- simple_lock(&l1_lru_lock);
+ mutex_spin_enter(&l1_lru_lock);
/*
* Check the use count again, now that we've acquired the lock
*/
if (l1->l1_domain_use_count == PMAP_DOMAINS) {
- simple_unlock(&l1_lru_lock);
+ mutex_spin_exit(&l1_lru_lock);
return;
}
@@ -1247,7 +1227,7 @@
TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
- simple_unlock(&l1_lru_lock);
+ mutex_spin_exit(&l1_lru_lock);
}
/*
@@ -2139,9 +2119,6 @@
printf("pmap_clearbit: md %p mask 0x%x\n",
md, maskbits));
- PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&md->pvh_slock);
-
#ifdef PMAP_CACHE_VIPT
/*
* If we might want to sync the I-cache and we've modified it,
@@ -2166,8 +2143,6 @@
PMAPCOUNT(exec_discarded_clearbit);
}
#endif
- simple_unlock(&md->pvh_slock);
- PMAP_HEAD_TO_MAP_UNLOCK();
return;
}
@@ -2328,9 +2303,6 @@
pmap_vac_me_harder(md, pa, NULL, 0);
}
#endif
-
- simple_unlock(&md->pvh_slock);
- PMAP_HEAD_TO_MAP_UNLOCK();
}
/*
@@ -2578,9 +2550,6 @@
printf("pmap_page_remove: md %p (0x%08lx)\n", md,
pa));
- PMAP_HEAD_TO_MAP_LOCK();
- simple_lock(&md->pvh_slock);
-
pv = SLIST_FIRST(&md->pvh_list);
if (pv == NULL) {
#ifdef PMAP_CACHE_VIPT
@@ -2593,8 +2562,6 @@
md->pvh_attrs &= ~PVF_EXEC;
KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
#endif
- simple_unlock(&md->pvh_slock);
- PMAP_HEAD_TO_MAP_UNLOCK();
return;
}
#ifdef PMAP_CACHE_VIPT
@@ -2696,8 +2663,6 @@
md->pvh_attrs &= ~PVF_WRITE;
KASSERT((md->urw_mappings + md->krw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
#endif
- simple_unlock(&md->pvh_slock);
- PMAP_HEAD_TO_MAP_UNLOCK();
if (flush) {
/*
@@ -2799,7 +2764,6 @@
if (flags & PMAP_WIRED)
nflags |= PVF_WIRED;
- PMAP_MAP_TO_HEAD_LOCK();
pmap_acquire_pmap_lock(pm);
/*
@@ -2813,7 +2777,6 @@
if (l2b == NULL) {
if (flags & PMAP_CANFAIL) {
pmap_release_pmap_lock(pm);
- PMAP_MAP_TO_HEAD_UNLOCK();
return (ENOMEM);
}
panic("pmap_enter: failed to allocate L2 bucket");
@@ -2880,11 +2843,10 @@
/*
* We're changing the attrs of an existing mapping.
*/
- simple_lock(&md->pvh_slock);
+ KASSERT(uvm_page_locked_p(pg));
oflags = pmap_modify_pv(md, pa, pm, va,
PVF_WRITE | PVF_EXEC | PVF_WIRED |
PVF_MOD | PVF_REF, nflags);
- simple_unlock(&md->pvh_slock);
#ifdef PMAP_CACHE_VIVT
/*
@@ -2911,10 +2873,9 @@
* It is part of our managed memory so we
* must remove it from the PV list
*/
- simple_lock(&omd->pvh_slock);
+ KASSERT(uvm_page_locked_p(opg));
pv = pmap_remove_pv(omd, opa, pm, va);
pmap_vac_me_harder(omd, opa, pm, 0);
- simple_unlock(&omd->pvh_slock);
oflags = pv->pv_flags;
#ifdef PMAP_CACHE_VIVT
@@ -2945,12 +2906,12 @@
if (pm != pmap_kernel())
pmap_free_l2_bucket(pm, l2b, 0);
pmap_release_pmap_lock(pm);
- PMAP_MAP_TO_HEAD_UNLOCK();
NPDEBUG(PDB_ENTER,
printf("pmap_enter: ENOMEM\n"));
return (ENOMEM);
}
+ KASSERT(uvm_page_locked_p(pg));
pmap_enter_pv(md, pa, pv, pm, va, nflags);
}
} else {
@@ -2980,10 +2941,9 @@
struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
paddr_t opa = VM_PAGE_TO_PHYS(opg);
- simple_lock(&omd->pvh_slock);
+ KASSERT(uvm_page_locked_p(opg));
pv = pmap_remove_pv(omd, opa, pm, va);
pmap_vac_me_harder(omd, opa, pm, 0);
- simple_unlock(&omd->pvh_slock);
oflags = pv->pv_flags;
#ifdef PMAP_CACHE_VIVT
@@ -3064,24 +3024,21 @@
if (pg != NULL) {
struct vm_page_md *md = VM_PAGE_TO_MD(pg);
- simple_lock(&md->pvh_slock);
+ KASSERT(uvm_page_locked_p(pg));
pmap_vac_me_harder(md, pa, pm, va);
- simple_unlock(&md->pvh_slock);
}
}
#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
if (pg) {
struct vm_page_md *md = VM_PAGE_TO_MD(pg);
- simple_lock(&md->pvh_slock);
+ KASSERT(uvm_page_locked_p(pg));
KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
KASSERT(((md->pvh_attrs & PVF_WRITE) == 0) == (md->urw_mappings + md->krw_mappings == 0));
- simple_unlock(&md->pvh_slock);
}
#endif
pmap_release_pmap_lock(pm);
- PMAP_MAP_TO_HEAD_UNLOCK();
return (0);
}
@@ -3127,7 +3084,6 @@
/*
* we lock in the pmap => pv_head direction
*/
- PMAP_MAP_TO_HEAD_LOCK();
pmap_acquire_pmap_lock(pm);
if (pm->pm_remove_all || !pmap_is_cached(pm)) {
@@ -3180,10 +3136,9 @@
struct vm_page_md *md = VM_PAGE_TO_MD(pg);
struct pv_entry *pv;
- simple_lock(&md->pvh_slock);
+ KASSERT(uvm_page_locked_p(pg));
pv = pmap_remove_pv(md, pa, pm, sva);
pmap_vac_me_harder(md, pa, pm, 0);
- simple_unlock(&md->pvh_slock);
if (pv != NULL) {
if (pm->pm_remove_all == false) {
is_exec =
Home |
Main Index |
Thread Index |
Old Index