Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/x86/x86 In pmap_enter_ma(), only try to allocate pv...



details:   https://anonhg.NetBSD.org/src/rev/f7f8e056229f
branches:  trunk
changeset: 357655:f7f8e056229f
user:      chs <chs%NetBSD.org@localhost>
date:      Mon Nov 20 20:57:58 2017 +0000

description:
In pmap_enter_ma(), only try to allocate pves if we might need them,
and even if that fails, only fail the operation if we later discover
that we really do need them.  This implements the requirement that
pmap_enter(PMAP_CANFAIL) must not fail when replacing an existing
mapping with the first mapping of a new page, which is an unintended
consequence of the changes from the rmind-uvmplock branch in 2011.

The problem arises when pmap_enter(PMAP_CANFAIL) is used to replace an existing
pmap mapping with a mapping of a different page (eg. to resolve a copy-on-write).
If that fails and leaves the old pmap entry in place, then UVM won't hold
the right locks when it eventually retries.  This entanglement of the UVM and
pmap locking was done in rmind-uvmplock in order to improve performance,
but it also means that the UVM state and pmap state need to be kept in sync
more than they did before.  It would be possible to handle this in the UVM code
instead of in the pmap code, but these pmap changes improve the handling of
low memory situations in general, and handling this in UVM would be clunky,
so this seemed like the better way to go.

This somewhat indirectly fixes PR 52706, as well as the failing assertion
about "uvm_page_locked_p(old_pg)".  (but only on x86, various other platforms
will need their own changes to handle this issue.)

diffstat:

 sys/arch/x86/x86/pmap.c |  75 ++++++++++++++++++++++++++++++++++++------------
 1 files changed, 56 insertions(+), 19 deletions(-)

diffs (139 lines):

diff -r 437ae8cae3ce -r f7f8e056229f sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c   Mon Nov 20 19:10:45 2017 +0000
+++ b/sys/arch/x86/x86/pmap.c   Mon Nov 20 20:57:58 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: pmap.c,v 1.265 2017/11/15 18:02:37 maxv Exp $  */
+/*     $NetBSD: pmap.c,v 1.266 2017/11/20 20:57:58 chs Exp $   */
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.265 2017/11/15 18:02:37 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.266 2017/11/20 20:57:58 chs Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -1790,6 +1790,20 @@
  * p v _ e n t r y   f u n c t i o n s
  */
 
+static bool
+pmap_pp_needs_pve(struct pmap_page *pp)
+{
+
+       /*
+        * Adding a pv entry for this page only needs to allocate a pv_entry
+        * structure if the page already has at least one pv entry,
+        * since the first pv entry is stored in the pmap_page.
+        */
+
+       return (pp->pp_flags & PP_EMBEDDED) != 0 ||
+               !LIST_EMPTY(&pp->pp_head.pvh_list);
+}
+
 /*
  * pmap_free_pvs: free a list of pv_entrys
  */
@@ -4159,15 +4173,20 @@
                new_pp = NULL;
        }
 
-       /* get pves. */
-       new_pve = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
-       new_sparepve = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
-       if (new_pve == NULL || new_sparepve == NULL) {
-               if (flags & PMAP_CANFAIL) {
-                       error = ENOMEM;
-                       goto out2;
-               }
-               panic("%s: pve allocation failed", __func__);
+       /*
+        * Try to get pves now if we might need them.
+        * Keep going even if we fail, since we will not actually need them
+        * if we are just changing the permissions on an existing mapping,
+        * but we won't know if that's the case until later.
+        */
+
+       bool needpves = pmap_pp_needs_pve(new_pp);
+       if (new_pp && needpves) {
+               new_pve = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
+               new_sparepve = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
+       } else {
+               new_pve = NULL;
+               new_sparepve = NULL;
        }
 
        kpreempt_disable();
@@ -4187,10 +4206,31 @@
        }
 
        /*
-        * update the pte.
+        * Check if there is an existing mapping.  If we are now sure that
+        * we need pves and we failed to allocate them earlier, handle that.
+        * Caching the value of oldpa here is safe because only the mod/ref bits
+        * can change while the pmap is locked.
         */
 
        ptep = &ptes[pl1_i(va)];
+       opte = *ptep;
+       bool have_oldpa = pmap_valid_entry(opte);
+       paddr_t oldpa = pmap_pte2pa(opte);
+
+       if (needpves && (!have_oldpa || oldpa != pa) &&
+           (new_pve == NULL || new_sparepve == NULL)) {
+               pmap_unmap_ptes(pmap, pmap2);
+               if (flags & PMAP_CANFAIL) {
+                       error = ENOMEM;
+                       goto out;
+               }
+               panic("%s: pve allocation failed", __func__);
+       }
+
+       /*
+        * update the pte.
+        */
+
        do {
                opte = *ptep;
 
@@ -4228,7 +4268,7 @@
         */
 
        pmap_stats_update_bypte(pmap, npte, opte);
-       if (ptp != NULL && !pmap_valid_entry(opte)) {
+       if (ptp != NULL && !have_oldpa) {
                ptp->wire_count++;
        }
        KASSERT(ptp == NULL || ptp->wire_count > 1);
@@ -4247,16 +4287,14 @@
         */
 
        if ((~opte & (PG_V | PG_PVLIST)) == 0) {
-               if ((old_pg = PHYS_TO_VM_PAGE(pmap_pte2pa(opte))) != NULL) {
+               if ((old_pg = PHYS_TO_VM_PAGE(oldpa)) != NULL) {
                        KASSERT(uvm_page_locked_p(old_pg));
                        old_pp = VM_PAGE_TO_PP(old_pg);
-               } else if ((old_pp = pmap_pv_tracked(pmap_pte2pa(opte)))
-                   == NULL) {
-                       pa = pmap_pte2pa(opte);
+               } else if ((old_pp = pmap_pv_tracked(oldpa)) == NULL) {
                        panic("%s: PG_PVLIST with pv-untracked page"
                            " va = %#"PRIxVADDR
                            " pa = %#" PRIxPADDR " (%#" PRIxPADDR ")",
-                           __func__, va, pa, atop(pa));
+                           __func__, va, oldpa, atop(pa));
                }
 
                old_pve = pmap_remove_pv(old_pp, ptp, va);
@@ -4286,7 +4324,6 @@
        error = 0;
 out:
        kpreempt_enable();
-out2:
        if (old_pve != NULL) {
                pool_cache_put(&pmap_pv_cache, old_pve);
        }



Home | Main Index | Thread Index | Old Index