Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/kern For secondary CPUs, the idle LWP is the first to ru...
details: https://anonhg.NetBSD.org/src/rev/3c242763da34
branches: trunk
changeset: 744157:3c242763da34
user: ad <ad%NetBSD.org@localhost>
date: Sat Jan 25 20:29:43 2020 +0000
description:
For secondary CPUs, the idle LWP is the first to run, and it's directly
entered from MD code without a trip through mi_switch(). Make the picture
look good in case the CPU takes an interrupt before it calls idle_loop().
diffstat:
sys/kern/kern_idle.c | 27 ++++++++++++++++++---------
1 files changed, 18 insertions(+), 9 deletions(-)
diffs (59 lines):
diff -r 0536c6d5a8fb -r 3c242763da34 sys/kern/kern_idle.c
--- a/sys/kern/kern_idle.c Sat Jan 25 19:22:05 2020 +0000
+++ b/sys/kern/kern_idle.c Sat Jan 25 20:29:43 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $ */
+/* $NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@@ -28,7 +28,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.30 2020/01/08 17:38:42 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.31 2020/01/25 20:29:43 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@@ -49,17 +49,15 @@
struct schedstate_percpu *spc;
struct lwp *l = curlwp;
- kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
+ lwp_lock(l);
spc = &ci->ci_schedstate;
- ci->ci_onproc = l;
-
+ KASSERT(lwp_locked(l, spc->spc_lwplock));
+ kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
/* Update start time for this thread. */
- lwp_lock(l);
- KASSERT(lwp_locked(l, spc->spc_lwplock));
binuptime(&l->l_stime);
spc->spc_flags |= SPCF_RUNNING;
- l->l_stat = LSONPROC;
- l->l_flag |= LW_RUNNING;
+ KASSERT(l->l_stat == LSONPROC);
+ KASSERT((l->l_flag & LW_RUNNING) != 0);
lwp_unlock(l);
/*
@@ -114,6 +112,17 @@
panic("create_idle_lwp: error %d", error);
lwp_lock(l);
l->l_flag |= LW_IDLE;
+ if (ci != lwp0.l_cpu) {
+ /*
+ * For secondary CPUs, the idle LWP is the first to run, and
+ * it's directly entered from MD code without a trip through
+ * mi_switch(). Make the picture look good in case the CPU
+ * takes an interrupt before it calls idle_loop().
+ */
+ l->l_stat = LSONPROC;
+ l->l_flag |= LW_RUNNING;
+ ci->ci_onproc = l;
+ }
lwp_unlock(l);
ci->ci_data.cpu_idlelwp = l;
Home |
Main Index |
Thread Index |
Old Index