Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys softint_overlay() (slow case) gains ~nothing but creates...



details:   https://anonhg.NetBSD.org/src/rev/cbaa3c1ee73d
branches:  trunk
changeset: 850160:cbaa3c1ee73d
user:      ad <ad%NetBSD.org@localhost>
date:      Thu Mar 26 20:19:06 2020 +0000

description:
softint_overlay() (slow case) gains ~nothing but creates potential headaches.
In the interests of simplicity remove it and always use the kthreads.

diffstat:

 sys/kern/kern_lwp.c     |  10 +----
 sys/kern/kern_softint.c |  98 +++++++++---------------------------------------
 sys/sys/intr.h          |   3 +-
 sys/sys/userret.h       |   6 +--
 4 files changed, 23 insertions(+), 94 deletions(-)

diffs (224 lines):

diff -r edf9b0a3a9bf -r cbaa3c1ee73d sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c       Thu Mar 26 19:47:23 2020 +0000
+++ b/sys/kern/kern_lwp.c       Thu Mar 26 20:19:06 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_lwp.c,v 1.229 2020/03/08 17:04:45 ad Exp $        */
+/*     $NetBSD: kern_lwp.c,v 1.230 2020/03/26 20:19:06 ad Exp $        */
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
@@ -211,7 +211,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.229 2020/03/08 17:04:45 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.230 2020/03/26 20:19:06 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1593,12 +1593,6 @@
        KASSERT(l->l_stat == LSONPROC);
        p = l->l_proc;
 
-#ifndef __HAVE_FAST_SOFTINTS
-       /* Run pending soft interrupts. */
-       if (l->l_cpu->ci_data.cpu_softints != 0)
-               softint_overlay();
-#endif
-
        /*
         * It is safe to do this read unlocked on a MP system..
         */
diff -r edf9b0a3a9bf -r cbaa3c1ee73d sys/kern/kern_softint.c
--- a/sys/kern/kern_softint.c   Thu Mar 26 19:47:23 2020 +0000
+++ b/sys/kern/kern_softint.c   Thu Mar 26 20:19:06 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: kern_softint.c,v 1.62 2020/03/08 15:05:18 ad Exp $     */
+/*     $NetBSD: kern_softint.c,v 1.63 2020/03/26 20:19:06 ad Exp $     */
 
 /*-
  * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.62 2020/03/08 15:05:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.63 2020/03/26 20:19:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -196,7 +196,7 @@
        uintptr_t               si_machdep;
        struct evcnt            si_evcnt;
        struct evcnt            si_evcnt_block;
-       int                     si_active;
+       volatile int            si_active;
        char                    si_name[8];
        char                    si_name_block[8+6];
 } softint_t;
@@ -546,11 +546,7 @@
 {
        softhand_t *sh;
 
-#ifdef __HAVE_FAST_SOFTINTS
        KASSERT(si->si_lwp == curlwp);
-#else
-       /* May be running in user context. */
-#endif
        KASSERT(si->si_cpu == curcpu());
        KASSERT(si->si_lwp->l_wchan == NULL);
        KASSERT(si->si_active);
@@ -678,12 +674,22 @@
        ci = curcpu();
        ci->ci_data.cpu_softints |= machdep;
        l = ci->ci_onproc;
+
+       /*
+        * Arrange for mi_switch() to be called.  If called from interrupt
+        * mode, we don't know if curlwp is executing in kernel or user, so
+        * post an AST and have it take a trip through userret().  If not in
+        * interrupt mode, curlwp is running in kernel and will notice the
+        * resched soon enough; avoid the AST.
+        */
        if (l == ci->ci_data.cpu_idlelwp) {
                atomic_or_uint(&ci->ci_want_resched,
                    RESCHED_IDLE | RESCHED_UPREEMPT);
        } else {
-               /* MI equivalent of aston() */
-               cpu_signotify(l);
+               atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
+               if (cpu_intr_p()) {
+                       cpu_signotify(l);
+               }
        }
 }
 
@@ -754,65 +760,6 @@
        return l;
 }
 
-/*
- * softint_overlay:
- *
- *     Slow path: called from lwp_userret() to run a soft interrupt
- *     within the context of a user thread.
- */
-void
-softint_overlay(void)
-{
-       struct cpu_info *ci;
-       u_int softints, oflag;
-       softint_t *si;
-       pri_t obase;
-       lwp_t *l;
-       int s;
-
-       l = curlwp;
-       KASSERT((l->l_pflag & LP_INTR) == 0);
-
-       /*
-        * Arrange to elevate priority if the LWP blocks.  Also, bind LWP
-        * to the CPU.  Note: disable kernel preemption before doing that.
-        */
-       s = splhigh();
-       ci = l->l_cpu;
-       si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
-
-       obase = l->l_kpribase;
-       l->l_kpribase = PRI_KERNEL_RT;
-       oflag = l->l_pflag;
-       l->l_pflag = oflag | LP_INTR | LP_BOUND;
-
-       while ((softints = ci->ci_data.cpu_softints) != 0) {
-               if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
-                       ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
-                       softint_execute(&si[SOFTINT_SERIAL], l, s);
-                       continue;
-               }
-               if ((softints & (1 << SOFTINT_NET)) != 0) {
-                       ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
-                       softint_execute(&si[SOFTINT_NET], l, s);
-                       continue;
-               }
-               if ((softints & (1 << SOFTINT_BIO)) != 0) {
-                       ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
-                       softint_execute(&si[SOFTINT_BIO], l, s);
-                       continue;
-               }
-               if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
-                       ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
-                       softint_execute(&si[SOFTINT_CLOCK], l, s);
-                       continue;
-               }
-       }
-       l->l_pflag = oflag;
-       l->l_kpribase = obase;
-       splx(s);
-}
-
 #else  /*  !__HAVE_FAST_SOFTINTS */
 
 /*
@@ -892,20 +839,13 @@
 
        /*
         * If we blocked while handling the interrupt, the pinned LWP is
-        * gone so switch to the idle LWP.  It will select a new LWP to
-        * run.
-        *
-        * We must drop the priority level as switching at IPL_HIGH could
-        * deadlock the system.  We have already set si->si_active = 0,
-        * which means another interrupt at this level can be triggered. 
-        * That's not be a problem: we are lowering to level 's' which will
-        * prevent softint_dispatch() from being reentered at level 's',
-        * until the priority is finally dropped to IPL_NONE on entry to
-        * the LWP chosen by mi_switch().
+        * gone, so find another LWP to run.  It will select a new LWP to
+        * run.  softint_dispatch() won't be reentered until the priority
+        * is finally dropped to IPL_NONE on entry to the LWP chosen by
+        * mi_switch().
         */
        l->l_stat = LSIDL;
        if (l->l_switchto == NULL) {
-               splx(s);
                lwp_lock(l);
                spc_lock(l->l_cpu);
                mi_switch(l);
diff -r edf9b0a3a9bf -r cbaa3c1ee73d sys/sys/intr.h
--- a/sys/sys/intr.h    Thu Mar 26 19:47:23 2020 +0000
+++ b/sys/sys/intr.h    Thu Mar 26 20:19:06 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: intr.h,v 1.19 2015/08/17 06:16:03 knakahara Exp $      */
+/*     $NetBSD: intr.h,v 1.20 2020/03/26 20:19:06 ad Exp $     */
 
 /*-
  * Copyright (c) 2007 The NetBSD Foundation, Inc.
@@ -50,7 +50,6 @@
 /* MI hooks. */
 void   softint_init(struct cpu_info *);
 lwp_t  *softint_picklwp(void);
-void   softint_overlay(void);
 void   softint_block(lwp_t *);
 
 /* MD-MI interface. */
diff -r edf9b0a3a9bf -r cbaa3c1ee73d sys/sys/userret.h
--- a/sys/sys/userret.h Thu Mar 26 19:47:23 2020 +0000
+++ b/sys/sys/userret.h Thu Mar 26 20:19:06 2020 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: userret.h,v 1.32 2020/01/22 12:23:04 ad Exp $  */
+/*     $NetBSD: userret.h,v 1.33 2020/03/26 20:19:06 ad Exp $  */
 
 /*-
  * Copyright (c) 1998, 2000, 2003, 2006, 2008, 2019, 2020
@@ -91,11 +91,7 @@
                preempt();
                ci = l->l_cpu;
        }
-#ifdef __HAVE_FAST_SOFTINTS
        if (__predict_false(l->l_flag & LW_USERRET)) {
-#else
-       if (((l->l_flag & LW_USERRET) | ci->ci_data.cpu_softints) != 0) {
-#endif
                KPREEMPT_ENABLE(l);
                lwp_userret(l);
                KPREEMPT_DISABLE(l);



Home | Main Index | Thread Index | Old Index