Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/kern PR port-sparc/54718 (sparc install hangs since rece...



details:   https://anonhg.NetBSD.org/src/rev/922b4a464ae5
branches:  trunk
changeset: 1005167:922b4a464ae5
user:      ad <ad%NetBSD.org@localhost>
date:      Sun Dec 01 13:20:42 2019 +0000

description:
PR port-sparc/54718 (sparc install hangs since recent scheduler changes)

- sched_tick: cpu_need_resched is no longer the correct thing to do here.
  All we need to do is OR the request into the local ci_want_resched.

- sched_resched_cpu: we need to set RESCHED_UPREEMPT even on softint LWPs,
  especially in the !__HAVE_FAST_SOFTINTS case, because the LWP with the
  LP_INTR flag could be running via softint_overlay() - i.e. it has been
  temporarily borrowed from a user process, and it needs to notice the
  resched after it has stopped running softints.

diffstat:

 sys/kern/kern_runq.c  |  13 ++++---------
 sys/kern/sched_4bsd.c |  14 +++++++-------
 sys/kern/sched_m2.c   |   6 +++---
 3 files changed, 14 insertions(+), 19 deletions(-)

diffs (112 lines):

diff -r 6b49b97159da -r 922b4a464ae5 sys/kern/kern_runq.c
--- a/sys/kern/kern_runq.c      Sun Dec 01 12:47:10 2019 +0000
+++ b/sys/kern/kern_runq.c      Sun Dec 01 13:20:42 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_runq.c,v 1.50 2019/11/27 20:31:13 ad Exp $        */
+/*     $NetBSD: kern_runq.c,v 1.51 2019/12/01 13:20:42 ad Exp $        */
 
 /*-
  * Copyright (c) 2019 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.50 2019/11/27 20:31:13 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.51 2019/12/01 13:20:42 ad Exp $");
 
 #include "opt_dtrace.h"
 
@@ -381,13 +381,8 @@
        l = ci->ci_data.cpu_onproc;
        if ((l->l_flag & LW_IDLE) != 0) {
                f = RESCHED_IDLE | RESCHED_UPREEMPT;
-       } else if ((l->l_pflag & LP_INTR) != 0) {
-               /* We can't currently preempt interrupt LWPs - should do. */
-               if (__predict_true(unlock)) {
-                       spc_unlock(ci);
-               }
-               return;
-       } else if (pri >= sched_kpreempt_pri) {
+       } else if (pri >= sched_kpreempt_pri && (l->l_pflag & LP_INTR) == 0) {
+               /* We can't currently preempt softints - should be able to. */
 #ifdef __HAVE_PREEMPTION
                f = RESCHED_KPREEMPT;
 #else
diff -r 6b49b97159da -r 922b4a464ae5 sys/kern/sched_4bsd.c
--- a/sys/kern/sched_4bsd.c     Sun Dec 01 12:47:10 2019 +0000
+++ b/sys/kern/sched_4bsd.c     Sun Dec 01 13:20:42 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: sched_4bsd.c,v 1.38 2019/11/29 18:29:45 ad Exp $       */
+/*     $NetBSD: sched_4bsd.c,v 1.39 2019/12/01 13:20:42 ad Exp $       */
 
 /*
  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.38 2019/11/29 18:29:45 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.39 2019/12/01 13:20:42 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -130,9 +130,9 @@
                /* Force it into mi_switch() to look for other jobs to run. */
 #ifdef __HAVE_PREEMPTION
                atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
-               cpu_need_resched(ci, l, RESCHED_KPREEMPT);
+               atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
 #else
-               cpu_need_resched(ci, l, RESCHED_UPREEMPT);
+               atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
 #endif
                break;
        default:
@@ -144,9 +144,9 @@
                         */
 #ifdef __HAVE_PREEMPTION
                        atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
-                       cpu_need_resched(ci, l, RESCHED_KPREEMPT);
+                       atomic_or_uint(&ci->ci_want_resched, RESCHED_KPREEMPT);
 #else
-                       cpu_need_resched(ci, l, RESCHED_UPREEMPT);
+                       atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
 #endif
                } else if (spc->spc_flags & SPCF_SEENRR) {
                        /*
@@ -155,7 +155,7 @@
                         * Indicate that the process should yield.
                         */
                        spc->spc_flags |= SPCF_SHOULDYIELD;
-                       cpu_need_resched(ci, l, RESCHED_UPREEMPT);
+                       atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
                } else {
                        spc->spc_flags |= SPCF_SEENRR;
                }
diff -r 6b49b97159da -r 922b4a464ae5 sys/kern/sched_m2.c
--- a/sys/kern/sched_m2.c       Sun Dec 01 12:47:10 2019 +0000
+++ b/sys/kern/sched_m2.c       Sun Dec 01 13:20:42 2019 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: sched_m2.c,v 1.34 2019/11/22 20:07:53 ad Exp $ */
+/*     $NetBSD: sched_m2.c,v 1.35 2019/12/01 13:20:42 ad Exp $ */
 
 /*
  * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
@@ -33,7 +33,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.34 2019/11/22 20:07:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.35 2019/12/01 13:20:42 ad Exp $");
 
 #include <sys/param.h>
 
@@ -330,7 +330,7 @@
         */
        if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) {
                spc->spc_flags |= SPCF_SHOULDYIELD;
-               cpu_need_resched(ci, l, RESCHED_UPREEMPT);
+               atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
        } else
                spc->spc_ticks = l->l_sched.timeslice; 
        lwp_unlock(l);



Home | Main Index | Thread Index | Old Index