Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Add intr_mask() and corresponding intr_unmask() cal...
details: https://anonhg.NetBSD.org/src/rev/ce358e793109
branches: trunk
changeset: 967785:ce358e793109
user: thorpej <thorpej%NetBSD.org@localhost>
date: Sun Dec 22 15:09:39 2019 +0000
description:
Add intr_mask() and corresponding intr_unmask() calls that allow specific
interrupt lines / sources to be masked as needed (rather than making a
set of sources by IPL as with spl*()).
diffstat:
sys/arch/amd64/amd64/genassym.cf | 5 +-
sys/arch/amd64/amd64/vector.S | 6 +-
sys/arch/i386/i386/genassym.cf | 3 +-
sys/arch/i386/i386/vector.S | 8 +-
sys/arch/x86/include/intr.h | 16 +++-
sys/arch/x86/x86/intr.c | 174 ++++++++++++++++++++++++++++++++++++--
6 files changed, 195 insertions(+), 17 deletions(-)
diffs (truncated from 377 to 300 lines):
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/amd64/amd64/genassym.cf
--- a/sys/arch/amd64/amd64/genassym.cf Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/amd64/amd64/genassym.cf Sun Dec 22 15:09:39 2019 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.78 2019/11/21 19:27:54 ad Exp $
+# $NetBSD: genassym.cf,v 1.79 2019/12/22 15:09:39 thorpej Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -321,7 +321,8 @@
define IS_PIN offsetof(struct intrsource, is_pin)
define IS_TYPE offsetof(struct intrsource, is_type)
define IS_MAXLEVEL offsetof(struct intrsource, is_maxlevel)
-define IS_LWP offsetof(struct intrsource, is_lwp)
+define IS_LWP offsetof(struct intrsource, is_lwp)
+define IS_MASK_COUNT offsetof(struct intrsource, is_mask_count)
define IPL_NONE IPL_NONE
define IPL_PREEMPT IPL_PREEMPT
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/amd64/amd64/vector.S
--- a/sys/arch/amd64/amd64/vector.S Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/amd64/amd64/vector.S Sun Dec 22 15:09:39 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vector.S,v 1.71 2019/11/17 14:07:00 maxv Exp $ */
+/* $NetBSD: vector.S,v 1.72 2019/12/22 15:09:39 thorpej Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -391,6 +391,8 @@
sti ;\
incl CPUVAR(IDEPTH) ;\
movq IS_HANDLERS(%r14),%rbx ;\
+ cmpl $0,IS_MASK_COUNT(%r14) /* source currently masked? */ ;\
+ jne 7f /* yes, hold it */ ;\
6: \
movl IH_LEVEL(%rbx),%r12d ;\
cmpl %r13d,%r12d ;\
@@ -403,6 +405,8 @@
testq %rbx,%rbx ;\
jnz 6b ;\
5: \
+ cmpl $0,IS_MASK_COUNT(%r14) /* source now masked? */ ;\
+ jne 7f /* yes, deal */ ;\
cli ;\
unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/i386/i386/genassym.cf
--- a/sys/arch/i386/i386/genassym.cf Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/i386/i386/genassym.cf Sun Dec 22 15:09:39 2019 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.115 2019/11/21 19:27:54 ad Exp $
+# $NetBSD: genassym.cf,v 1.116 2019/12/22 15:09:39 thorpej Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -322,6 +322,7 @@
define IS_TYPE offsetof(struct intrsource, is_type)
define IS_MAXLEVEL offsetof(struct intrsource, is_maxlevel)
define IS_LWP offsetof(struct intrsource, is_lwp)
+define IS_MASK_COUNT offsetof(struct intrsource, is_mask_count)
define IPL_NONE IPL_NONE
define IPL_PREEMPT IPL_PREEMPT
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/i386/i386/vector.S
--- a/sys/arch/i386/i386/vector.S Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/i386/i386/vector.S Sun Dec 22 15:09:39 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vector.S,v 1.83 2019/02/15 08:54:01 nonaka Exp $ */
+/* $NetBSD: vector.S,v 1.84 2019/12/22 15:09:39 thorpej Exp $ */
/*
* Copyright 2002 (c) Wasabi Systems, Inc.
@@ -65,7 +65,7 @@
*/
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.83 2019/02/15 08:54:01 nonaka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.84 2019/12/22 15:09:39 thorpej Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@@ -408,6 +408,8 @@
IDEPTH_INCR ;\
sti ;\
movl IS_HANDLERS(%ebp),%ebx ;\
+ cmpl $0,IS_MASK_COUNT(%ebp) /* source currently masked? */ ;\
+ jne 7f /* yes, hold it */ ;\
6: \
movl IH_LEVEL(%ebx),%edi ;\
cmpl %esi,%edi ;\
@@ -420,6 +422,8 @@
addl $4,%esp /* toss the arg */ ;\
testl %ebx,%ebx ;\
jnz 6b ;\
+ cmpl $0,IS_MASK_COUNT(%ebp) /* source now masked? */ ;\
+ jne 7f /* yes, deal */ ;\
cli ;\
unmask(num) /* unmask it in hardware */ ;\
late_ack(num) ;\
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/x86/include/intr.h
--- a/sys/arch/x86/include/intr.h Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/x86/include/intr.h Sun Dec 22 15:09:39 2019 +0000
@@ -1,7 +1,7 @@
-/* $NetBSD: intr.h,v 1.60 2019/02/14 08:18:25 cherry Exp $ */
+/* $NetBSD: intr.h,v 1.61 2019/12/22 15:09:39 thorpej Exp $ */
/*-
- * Copyright (c) 1998, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
+ * Copyright (c) 1998, 2001, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -95,6 +95,16 @@
u_long ipl_evt_mask2[NR_EVENT_CHANNELS];
#endif
struct evcnt is_evcnt; /* interrupt counter per cpu */
+ /*
+ * is_mask_count requires special handling; it can only be modifed
+ * or examined on the CPU that owns the interrupt source, and such
+ * references need to be protected by disabling interrupts. This
+ * is because intr_mask() can be called from an interrupt handler.
+ * is_distribute_pending does not require such special handling
+ * because intr_unmask() cannot be called from an interrupt handler.
+ */
+ u_int is_mask_count; /* masked? (nested) [see above] */
+ int is_distribute_pending; /* ci<->ci move pending [cpu_lock] */
int is_flags; /* see below */
int is_type; /* level, edge */
int is_idtvec;
@@ -215,6 +225,8 @@
void *intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *),
void *, bool, const char *);
void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool);
+void intr_mask(struct intrhand *);
+void intr_unmask(struct intrhand *);
void intr_disestablish(struct intrhand *);
void intr_add_pcibus(struct pcibus_attach_args *);
const char *intr_string(intr_handle_t, char *, size_t);
diff -r afb50ee85cb5 -r ce358e793109 sys/arch/x86/x86/intr.c
--- a/sys/arch/x86/x86/intr.c Sun Dec 22 15:00:42 2019 +0000
+++ b/sys/arch/x86/x86/intr.c Sun Dec 22 15:09:39 2019 +0000
@@ -1,11 +1,11 @@
-/* $NetBSD: intr.c,v 1.147 2019/11/08 04:15:02 msaitoh Exp $ */
+/* $NetBSD: intr.c,v 1.148 2019/12/22 15:09:39 thorpej Exp $ */
/*
- * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
+ * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Andrew Doran.
+ * by Andrew Doran, and by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -133,7 +133,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.147 2019/11/08 04:15:02 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.148 2019/12/22 15:09:39 thorpej Exp $");
#include "opt_intrdebug.h"
#include "opt_multiprocessor.h"
@@ -743,6 +743,34 @@
}
/*
+ * Called on bound CPU to handle calling pic_hwunmask from contexts
+ * that are not already running on the bound CPU.
+ *
+ * => caller (on initiating CPU) holds cpu_lock on our behalf
+ * => arg1: struct intrhand *ih
+ */
+static void
+intr_hwunmask_xcall(void *arg1, void *arg2)
+{
+ struct intrhand * const ih = arg1;
+ struct cpu_info * const ci = ih->ih_cpu;
+
+ KASSERT(ci == curcpu() || !mp_online);
+
+ const u_long psl = x86_read_psl();
+ x86_disable_intr();
+
+ struct intrsource * const source = ci->ci_isources[ih->ih_slot];
+ struct pic * const pic = source->is_pic;
+
+ if (source->is_mask_count == 0) {
+ (*pic->pic_hwunmask)(pic, ih->ih_pin);
+ }
+
+ x86_write_psl(psl);
+}
+
+/*
* Handle per-CPU component of interrupt establish.
*
* => caller (on initiating CPU) holds cpu_lock on our behalf
@@ -958,7 +986,12 @@
/* All set up, so add a route for the interrupt and unmask it. */
(*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
- (*pic->pic_hwunmask)(pic, pin);
+ if (ci == curcpu() || !mp_online) {
+ intr_hwunmask_xcall(ih, NULL);
+ } else {
+ where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci);
+ xc_wait(where);
+ }
mutex_exit(&cpu_lock);
if (bootverbose || cpu_index(ci) != 0)
@@ -980,6 +1013,118 @@
}
/*
+ * Called on bound CPU to handle intr_mask() / intr_unmask().
+ *
+ * => caller (on initiating CPU) holds cpu_lock on our behalf
+ * => arg1: struct intrhand *ih
+ * => arg2: true -> mask, false -> unmask.
+ */
+static void
+intr_mask_xcall(void *arg1, void *arg2)
+{
+ struct intrhand * const ih = arg1;
+ const uintptr_t mask = (uintptr_t)arg2;
+ struct cpu_info * const ci = ih->ih_cpu;
+ bool force_pending = false;
+
+ KASSERT(ci == curcpu() || !mp_online);
+
+ /*
+ * We need to disable interrupts to hold off the interrupt
+ * vectors.
+ */
+ const u_long psl = x86_read_psl();
+ x86_disable_intr();
+
+ struct intrsource * const source = ci->ci_isources[ih->ih_slot];
+ struct pic * const pic = source->is_pic;
+
+ if (mask) {
+ source->is_mask_count++;
+ KASSERT(source->is_mask_count != 0);
+ if (source->is_mask_count == 1) {
+ (*pic->pic_hwmask)(pic, ih->ih_pin);
+ }
+ } else {
+ KASSERT(source->is_mask_count != 0);
+ if (--source->is_mask_count == 0) {
+ /*
+ * If this interrupt source is being moved, don't
+ * unmask it at the hw.
+ */
+ if (! source->is_distribute_pending)
+ (*pic->pic_hwunmask)(pic, ih->ih_pin);
+ force_pending = true;
+ }
+ }
+
+ /* Re-enable interrupts. */
+ x86_write_psl(psl);
+
+ if (force_pending) {
+ /* Force processing of any pending interrupts. */
+ splx(splhigh());
+ }
+}
+
+static void
+intr_mask_internal(struct intrhand * const ih, const bool mask)
+{
+
+ /*
+ * Call out to the remote CPU to update its interrupt state.
+ * Only make RPCs if the APs are up and running.
+ */
+ mutex_enter(&cpu_lock);
+ struct cpu_info * const ci = ih->ih_cpu;
+ void * const mask_arg = (void *)(uintptr_t)mask;
+ if (ci == curcpu() || !mp_online) {
+ intr_mask_xcall(ih, mask_arg);
+ } else {
+ const uint64_t where =
+ xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci);
+ xc_wait(where);
+ }
+ mutex_exit(&cpu_lock);
+}
+
+void
+intr_mask(struct intrhand *ih)
+{
+
+ if (cpu_intr_p()) {
+ /*
+ * Special case of calling intr_mask() from an interrupt
Home |
Main Index |
Thread Index |
Old Index