Port-alpha archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Please test a GENERIC.MP kernel for me
Greetings happy Alpha people --
While working on some changes to subr_pool.c recently, I discovered a bug in
the L1 page table allocation code in the Alpha pmap. However, since I don't
have any Alpha hardware anymore, I can't test the fix myself. I would greatly
appreciate it if someone could test out a GENERIC.MP kernel for me (built from
-current sources a couple of days old):
ftp://ftp.netbsd.org/pub/NetBSD/misc/thorpej/netbsd-alpha-mp.gz
For the curious, attached is the diff that you're being asked to test.
Index: alpha/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/alpha/pmap.c,v
retrieving revision 1.244
diff -u -p -r1.244 pmap.c
--- alpha/pmap.c 21 Oct 2009 21:11:58 -0000 1.244
+++ alpha/pmap.c 23 Oct 2009 19:29:28 -0000
@@ -144,6 +144,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/pool.h>
@@ -358,14 +359,13 @@ static struct pmap_asn_info pmap_asn_inf
* There is a lock ordering constraint for pmap_growkernel_lock.
* pmap_growkernel() acquires the locks in the following order:
*
- * pmap_growkernel_lock -> pmap_all_pmaps_lock ->
+ * pmap_growkernel_lock (write) -> pmap_all_pmaps_lock ->
* pmap->pm_lock
*
- * But pmap_lev1map_create() is called with pmap->pm_lock held,
- * and also needs to acquire the pmap_growkernel_lock. So,
- * we require that the caller of pmap_lev1map_create() (currently,
- * the only caller is pmap_enter()) acquire pmap_growkernel_lock
- * before acquring pmap->pm_lock.
+ * We need to ensure consistency between user pmaps and the
+ * kernel_lev1map. For this reason, pmap_growkernel_lock must
+ * be held to prevent kernel_lev1map changing across pmaps
+ * being added to / removed from the global pmaps list.
*
* Address space number management (global ASN counters and per-pmap
* ASN state) are not locked; they use arrays of values indexed
@@ -377,7 +377,7 @@ static struct pmap_asn_info pmap_asn_inf
*/
static krwlock_t pmap_main_lock;
static kmutex_t pmap_all_pmaps_lock;
-static kmutex_t pmap_growkernel_lock;
+static krwlock_t pmap_growkernel_lock;
#define PMAP_MAP_TO_HEAD_LOCK() rw_enter(&pmap_main_lock,
RW_READER)
#define PMAP_MAP_TO_HEAD_UNLOCK() rw_exit(&pmap_main_lock)
@@ -895,7 +895,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
}
/* Initialize the pmap_growkernel_lock. */
- mutex_init(&pmap_growkernel_lock, MUTEX_DEFAULT, IPL_NONE);
+ rw_init(&pmap_growkernel_lock);
/*
* Set up level three page table (lev3map)
@@ -1179,12 +1179,20 @@ pmap_create(void)
}
mutex_init(&pmap->pm_lock, MUTEX_DEFAULT, IPL_NONE);
+ try_again:
+ rw_enter(&pmap_growkernel_lock, RW_READER);
+
+ if (pmap_lev1map_create(pmap, cpu_number()) != 0) {
+ rw_exit(&pmap_growkernel_lock);
+ (void) kpause("pmap_create", false, hz >> 2, NULL);
+ goto try_again;
+ }
+
mutex_enter(&pmap_all_pmaps_lock);
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
mutex_exit(&pmap_all_pmaps_lock);
- i = pmap_lev1map_create(pmap, cpu_number());
- KASSERT(i == 0);
+ rw_exit(&pmap_growkernel_lock);
return (pmap);
}
@@ -1207,6 +1215,8 @@ pmap_destroy(pmap_t pmap)
if (atomic_dec_uint_nv(&pmap->pm_count) > 0)
return;
+ rw_enter(&pmap_growkernel_lock, RW_READER);
+
/*
* Remove it from the global list of all pmaps.
*/
@@ -1216,6 +1226,8 @@ pmap_destroy(pmap_t pmap)
pmap_lev1map_destroy(pmap, cpu_number());
+ rw_exit(&pmap_growkernel_lock);
+
/*
* Since the pmap is supposed to contain no valid
* mappings at this point, we should always see
@@ -3052,11 +3064,11 @@ pmap_growkernel(vaddr_t maxkvaddr)
vaddr_t va;
int l1idx;
+ rw_enter(&pmap_growkernel_lock, RW_WRITER);
+
if (maxkvaddr <= virtual_end)
goto out; /* we are OK */
- mutex_enter(&pmap_growkernel_lock);
-
va = virtual_end;
while (va < maxkvaddr) {
@@ -3129,9 +3141,9 @@ pmap_growkernel(vaddr_t maxkvaddr)
virtual_end = va;
- mutex_exit(&pmap_growkernel_lock);
-
out:
+ rw_exit(&pmap_growkernel_lock);
+
return (virtual_end);
die:
@@ -3143,34 +3155,21 @@ pmap_growkernel(vaddr_t maxkvaddr)
*
* Create a new level 1 page table for the specified pmap.
*
- * Note: growkernel and the pmap must already be locked.
+ * Note: growkernel must already be held and the pmap either
+ * already locked or unreferenced globally.
*/
static int
pmap_lev1map_create(pmap_t pmap, long cpu_id)
{
pt_entry_t *l1pt;
-#ifdef DIAGNOSTIC
- if (pmap == pmap_kernel())
- panic("pmap_lev1map_create: got kernel pmap");
-#endif
-
- if (pmap->pm_lev1map != kernel_lev1map) {
- /*
- * We have to briefly unlock the pmap in pmap_enter()
- * do deal with a lock ordering constraint, so it's
- * entirely possible for this to happen.
- */
- return (0);
- }
+ KASSERT(pmap != pmap_kernel());
-#ifdef DIAGNOSTIC
- if (pmap->pm_asni[cpu_id].pma_asn != PMAP_ASN_RESERVED)
- panic("pmap_lev1map_create: pmap uses non-reserved ASN");
-#endif
+ KASSERT(pmap->pm_lev1map == kernel_lev1map);
+ KASSERT(pmap->pm_asni[cpu_id].pma_asn == PMAP_ASN_RESERVED);
- /* Being called from pmap_create() in this case; we can sleep. */
- l1pt = pool_cache_get(&pmap_l1pt_cache, PR_WAITOK);
+ /* Don't sleep -- we're called with locks held. */
+ l1pt = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
if (l1pt == NULL)
return (ENOMEM);
@@ -3183,17 +3182,15 @@ pmap_lev1map_create(pmap_t pmap, long cp
*
* Destroy the level 1 page table for the specified pmap.
*
- * Note: the pmap must already be locked.
+ * Note: growkernel must be held and the pmap must already be
+ * locked or not globally referenced.
*/
static void
pmap_lev1map_destroy(pmap_t pmap, long cpu_id)
{
pt_entry_t *l1pt = pmap->pm_lev1map;
-#ifdef DIAGNOSTIC
- if (pmap == pmap_kernel())
- panic("pmap_lev1map_destroy: got kernel pmap");
-#endif
+ KASSERT(pmap != pmap_kernel());
/*
* Go back to referencing the global kernel_lev1map.
@@ -3210,6 +3207,10 @@ pmap_lev1map_destroy(pmap_t pmap, long c
* pmap_l1pt_ctor:
*
* Pool cache constructor for L1 PT pages.
+ *
+ * Note: The growkernel lock is held across allocations
+ * from our pool_cache, so we don't need to acquire it
+ * ourselves.
*/
static int
pmap_l1pt_ctor(void *arg, void *object, int flags)
Thanks!
-- thorpej
Home |
Main Index |
Thread Index |
Old Index