Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Fix another locking error: if freeing a PV entry, d...
details: https://anonhg.NetBSD.org/src/rev/d38d3bc604f3
branches: trunk
changeset: 474979:d38d3bc604f3
user: thorpej <thorpej%NetBSD.org@localhost>
date: Wed Jul 28 06:54:41 1999 +0000
description:
Fix another locking error: if freeing a PV entry, don't attempt to
free a PV page if the PV entry was associated with the kernel pmap,
since the kernel pmap is locked, and freeing the page will execute
a code path which will attempt to lock it again, resulting in deadlock.
No real loss, since the next time a PV entry is freed, the page will
be freed, too.
diffstat:
sys/arch/i386/i386/pmap.c | 34 +++++++++++++++++++++++-----------
sys/arch/pc532/pc532/pmap.c | 34 +++++++++++++++++++++++-----------
2 files changed, 46 insertions(+), 22 deletions(-)
diffs (204 lines):
diff -r 6b19dc5f313e -r d38d3bc604f3 sys/arch/i386/i386/pmap.c
--- a/sys/arch/i386/i386/pmap.c Wed Jul 28 06:35:15 1999 +0000
+++ b/sys/arch/i386/i386/pmap.c Wed Jul 28 06:54:41 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.77 1999/07/28 05:37:54 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.78 1999/07/28 06:54:41 thorpej Exp $ */
/*
*
@@ -400,8 +400,8 @@
static void pmap_enter_pv __P((struct pv_head *,
struct pv_entry *, struct pmap *,
vaddr_t, struct vm_page *));
-static void pmap_free_pv __P((struct pv_entry *));
-static void pmap_free_pvs __P((struct pv_entry *));
+static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
+static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
static void pmap_free_pv_doit __P((struct pv_entry *));
static void pmap_free_pvpage __P((void));
static struct vm_page *pmap_get_ptp __P((struct pmap *, int, boolean_t));
@@ -1416,13 +1416,19 @@
*/
__inline static void
-pmap_free_pv(pv)
+pmap_free_pv(pmap, pv)
+ struct pmap *pmap;
struct pv_entry *pv;
{
simple_lock(&pvalloc_lock);
pmap_free_pv_doit(pv);
- if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL)
+ /*
+ * Can't free the PV page if the PV entries were associated with
+ * the kernel pmap; the pmap is already locked.
+ */
+ if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
+ pmap != pmap_kernel())
pmap_free_pvpage();
simple_unlock(&pvalloc_lock);
@@ -1435,7 +1441,8 @@
*/
__inline static void
-pmap_free_pvs(pvs)
+pmap_free_pvs(pmap, pvs)
+ struct pmap *pmap;
struct pv_entry *pvs;
{
struct pv_entry *nextpv;
@@ -1447,7 +1454,12 @@
pmap_free_pv_doit(pvs);
}
- if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL)
+ /*
+ * Can't free the PV page if the PV entries were associated with
+ * the kernel pmap; the pmap is already locked.
+ */
+ if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
+ pmap != pmap_kernel())
pmap_free_pvpage();
simple_unlock(&pvalloc_lock);
@@ -2246,7 +2258,7 @@
/* end of "for" loop: time for next pte */
}
if (pv_tofree)
- pmap_free_pvs(pv_tofree);
+ pmap_free_pvs(pmap, pv_tofree);
}
@@ -2311,7 +2323,7 @@
simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock);
if (pve)
- pmap_free_pv(pve);
+ pmap_free_pv(pmap, pve);
return(TRUE);
}
@@ -2605,7 +2617,7 @@
}
pmap_unmap_ptes(pve->pv_pmap); /* unlocks pmap */
}
- pmap_free_pvs(pvh->pvh_list);
+ pmap_free_pvs(NULL, pvh->pvh_list);
pvh->pvh_list = NULL;
simple_unlock(&pvh->pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
@@ -3557,7 +3569,7 @@
/* new mapping is not PG_PVLIST. free pve if we've got one */
pvh = NULL; /* ensure !PG_PVLIST */
if (pve)
- pmap_free_pv(pve);
+ pmap_free_pv(pmap, pve);
}
enter_now:
diff -r 6b19dc5f313e -r d38d3bc604f3 sys/arch/pc532/pc532/pmap.c
--- a/sys/arch/pc532/pc532/pmap.c Wed Jul 28 06:35:15 1999 +0000
+++ b/sys/arch/pc532/pc532/pmap.c Wed Jul 28 06:54:41 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.43 1999/07/28 05:37:55 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.44 1999/07/28 06:54:42 thorpej Exp $ */
/*
*
@@ -374,8 +374,8 @@
static void pmap_enter_pv __P((struct pv_head *,
struct pv_entry *, struct pmap *,
vaddr_t, struct vm_page *));
-static void pmap_free_pv __P((struct pv_entry *));
-static void pmap_free_pvs __P((struct pv_entry *));
+static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
+static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
static void pmap_free_pv_doit __P((struct pv_entry *));
static void pmap_free_pvpage __P((void));
static struct vm_page *pmap_get_ptp __P((struct pmap *, int, boolean_t));
@@ -1324,13 +1324,19 @@
*/
__inline static void
-pmap_free_pv(pv)
+pmap_free_pv(pmap, pv)
+ struct pmap *pmap;
struct pv_entry *pv;
{
simple_lock(&pvalloc_lock);
pmap_free_pv_doit(pv);
- if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL)
+ /*
+ * Can't free the PV page if the PV entries were associated with
+ * the kernel pmap; the pmap is already locked.
+ */
+ if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
+ pmap != pmap_kernel())
pmap_free_pvpage();
simple_unlock(&pvalloc_lock);
@@ -1343,7 +1349,8 @@
*/
__inline static void
-pmap_free_pvs(pvs)
+pmap_free_pvs(pmap, pvs)
+ struct pmap *pmap;
struct pv_entry *pvs;
{
struct pv_entry *nextpv;
@@ -1355,7 +1362,12 @@
pmap_free_pv_doit(pvs);
}
- if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL)
+ /*
+ * Can't free the PV page if the PV entries were associated with
+ * the kernel pmap; the pmap is already locked.
+ */
+ if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
+ pmap != pmap_kernel())
pmap_free_pvpage();
simple_unlock(&pvalloc_lock);
@@ -2061,7 +2073,7 @@
/* end of "for" loop: time for next pte */
}
if (pv_tofree)
- pmap_free_pvs(pv_tofree);
+ pmap_free_pvs(pmap, pv_tofree);
}
@@ -2126,7 +2138,7 @@
simple_unlock(&vm_physmem[bank].pmseg.pvhead[off].pvh_lock);
if (pve)
- pmap_free_pv(pve);
+ pmap_free_pv(pmap, pve);
return(TRUE);
}
@@ -2389,7 +2401,7 @@
}
pmap_unmap_ptes(pve->pv_pmap); /* unlocks pmap */
}
- pmap_free_pvs(pvh->pvh_list);
+ pmap_free_pvs(NULL, pvh->pvh_list);
pvh->pvh_list = NULL;
simple_unlock(&pvh->pvh_lock);
PMAP_HEAD_TO_MAP_UNLOCK();
@@ -3312,7 +3324,7 @@
/* new mapping is not PG_PVLIST. free pve if we've got one */
pvh = NULL; /* ensure !PG_PVLIST */
if (pve)
- pmap_free_pv(pve);
+ pmap_free_pv(pmap, pve);
}
enter_now:
Home |
Main Index |
Thread Index |
Old Index