Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/pic Adapt pic to deal with the BCM2836 interrupts.
details: https://anonhg.NetBSD.org/src/rev/ecb8dfbb2d67
branches: trunk
changeset: 337335:ecb8dfbb2d67
user: matt <matt%NetBSD.org@localhost>
date: Sat Apr 11 19:39:09 2015 +0000
description:
Adapt pic to deal with the BCM2836 interrupts.
Move pic_pending_pics, pic_pending_ipls, and pic_blocked_irqs into a
structure and make then per-cpu. There is no global interrupt state anymore.
diffstat:
sys/arch/arm/pic/pic.c | 145 ++++++++++++++++++++++++++++++++++++------------
1 files changed, 108 insertions(+), 37 deletions(-)
diffs (283 lines):
diff -r e4c6396579fa -r ecb8dfbb2d67 sys/arch/arm/pic/pic.c
--- a/sys/arch/arm/pic/pic.c Sat Apr 11 19:24:13 2015 +0000
+++ b/sys/arch/arm/pic/pic.c Sat Apr 11 19:39:09 2015 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pic.c,v 1.28 2015/04/08 21:43:30 matt Exp $ */
+/* $NetBSD: pic.c,v 1.29 2015/04/11 19:39:09 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -33,7 +33,7 @@
#include "opt_multiprocessor.h"
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.28 2015/04/08 21:43:30 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.29 2015/04/11 19:39:09 matt Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@@ -61,17 +61,31 @@
#include <arm/pic/picvar.h>
#if defined(__HAVE_PIC_PENDING_INTRS)
+/*
+ * This implementation of pending interrupts on a MULTIPROCESSOR system makes
+ * the assumption that a PIC (pic_softc) shall only have all its interrupts
+ * come from the same CPU. In other words, interrupts from a single PIC will
+ * not be distributed among multiple CPUs.
+ */
+struct pic_pending {
+ volatile uint32_t blocked_pics;
+ volatile uint32_t pending_pics;
+ volatile uint32_t pending_ipls;
+};
static uint32_t
pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
static struct pic_softc *
- pic_list_find_pic_by_pending_ipl(uint32_t);
+ pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t);
static void
- pic_deliver_irqs(struct pic_softc *, int, void *);
+ pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *);
static void
- pic_list_deliver_irqs(register_t, int, void *);
-volatile uint32_t pic_blocked_pics;
-volatile uint32_t pic_pending_pics;
-volatile uint32_t pic_pending_ipls;
+ pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *);
+
+#ifdef MULTIPROCESSOR
+percpu_t *pic_pending_percpu;
+#else
+struct pic_pending pic_pending;
+#endif /* MULTIPROCESSOR */
#endif /* __HAVE_PIC_PENDING_INTRS */
struct pic_softc *pic_list[PIC_MAXPICS];
@@ -162,12 +176,22 @@
void
intr_ipi_send(const kcpuset_t *kcp, u_long ipi)
{
- struct pic_softc * const pic = pic_ipi_sender();
KASSERT(ipi < NIPI);
- if (cold && pic == NULL)
- return;
- KASSERT(pic != NULL);
- (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
+ bool __diagused sent_p = false;
+ for (size_t slot = 0; slot < PIC_MAXPICS; slot++) {
+ struct pic_softc * const pic = pic_list[slot];
+ if (pic == NULL || pic->pic_cpus == NULL)
+ continue;
+ if (kcp == NULL || kcpuset_intersect(kcp, pic->pic_cpus)) {
+ (*pic->pic_ops->pic_ipi_send)(pic, kcp, ipi);
+ // If we were targeting a single CPU or this pic
+ // handles all cpus, we're done.
+ if (kcp != NULL || pic->pic_cpus == kcpuset_running)
+ return;
+ sent_p = true;
+ }
+ }
+ KASSERT(cold || sent_p);
}
#endif /* MULTIPROCESSOR */
@@ -208,8 +232,16 @@
__BIT(is->is_irq & 0x1f));
atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
- atomic_or_32(&pic_pending_ipls, ipl_mask);
- atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+ struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+ struct pic_pending *pend = &pic_pending;
+#endif
+ atomic_or_32(&pend->pending_ipls, ipl_mask);
+ atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+ percpu_putref(pic_pending_percpu);
+#endif
}
void
@@ -252,9 +284,16 @@
}
atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
- atomic_or_32(&pic_pending_ipls, ipl_mask);
- atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));
-
+#ifdef MULTIPROCESSOR
+ struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+ struct pic_pending *pend = &pic_pending;
+#endif
+ atomic_or_32(&pend->pending_ipls, ipl_mask);
+ atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id));
+#ifdef MULTIPROCESSOR
+ percpu_putref(pic_pending_percpu);
+#endif
return ipl_mask;
}
@@ -324,7 +363,8 @@
#if defined(__HAVE_PIC_PENDING_INTRS)
void
-pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame)
+pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl,
+ void *frame)
{
const uint32_t ipl_mask = __BIT(ipl);
struct intrsource *is;
@@ -339,7 +379,7 @@
uint32_t blocked_irqs;
int irq;
bool progress __diagused = false;
-
+
KASSERT(pic->pic_pending_ipls & ipl_mask);
irq_base = 0;
@@ -403,7 +443,7 @@
} while (pending_irqs);
if (blocked_irqs) {
atomic_or_32(iblocked, blocked_irqs);
- atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id));
+ atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id));
}
}
@@ -413,15 +453,16 @@
* about these.
*/
if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
- atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id));
+ atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id));
}
static void
-pic_list_unblock_irqs(void)
+pic_list_unblock_irqs(struct pic_pending *pend)
{
- uint32_t blocked_pics = pic_blocked_pics;
+ uint32_t blocked_pics = pend->blocked_pics;
- pic_blocked_pics = 0;
+ pend->blocked_pics = 0;
+
for (;;) {
struct pic_softc *pic;
#if PIC_MAXSOURCES > 32
@@ -458,9 +499,9 @@
struct pic_softc *
-pic_list_find_pic_by_pending_ipl(uint32_t ipl_mask)
+pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask)
{
- uint32_t pending_pics = pic_pending_pics;
+ uint32_t pending_pics = pend->pending_pics;
struct pic_softc *pic;
for (;;) {
@@ -477,16 +518,17 @@
}
void
-pic_list_deliver_irqs(register_t psw, int ipl, void *frame)
+pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl,
+ void *frame)
{
const uint32_t ipl_mask = __BIT(ipl);
struct pic_softc *pic;
- while ((pic = pic_list_find_pic_by_pending_ipl(ipl_mask)) != NULL) {
- pic_deliver_irqs(pic, ipl, frame);
+ while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) {
+ pic_deliver_irqs(pend, pic, ipl, frame);
KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
}
- atomic_and_32(&pic_pending_ipls, ~ipl_mask);
+ atomic_and_32(&pend->pending_ipls, ~ipl_mask);
}
#endif /* __HAVE_PIC_PENDING_INTRS */
@@ -499,19 +541,27 @@
return;
}
#if defined(__HAVE_PIC_PENDING_INTRS)
- while ((pic_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
- KASSERT(pic_pending_ipls < __BIT(NIPL));
+#ifdef MULTIPROCESSOR
+ struct pic_pending *pend = percpu_getref(pic_pending_percpu);
+#else
+ struct pic_pending *pend = &pic_pending;
+#endif
+ while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
+ KASSERT(pend->pending_ipls < __BIT(NIPL));
for (;;) {
- int ipl = 31 - __builtin_clz(pic_pending_ipls);
+ int ipl = 31 - __builtin_clz(pend->pending_ipls);
KASSERT(ipl < NIPL);
if (ipl <= newipl)
break;
pic_set_priority(ci, ipl);
- pic_list_deliver_irqs(psw, ipl, frame);
- pic_list_unblock_irqs();
+ pic_list_deliver_irqs(pend, psw, ipl, frame);
+ pic_list_unblock_irqs(pend);
}
}
+#ifdef MULTIPROCESSOR
+ percpu_putref(pic_pending_percpu);
+#endif
#endif /* __HAVE_PIC_PENDING_INTRS */
#ifdef __HAVE_PREEEMPTION
if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
@@ -554,6 +604,15 @@
#endif
}
+#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
+static void
+pic_pending_zero(void *v0, void *v1, struct cpu_info *ci)
+{
+ struct pic_pending * const p = v0;
+ memset(p, 0, sizeof(*p));
+}
+#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
+
void
pic_add(struct pic_softc *pic, int irqbase)
{
@@ -561,6 +620,18 @@
KASSERT(strlen(pic->pic_name) > 0);
+#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR)
+ if (__predict_false(pic_pending_percpu == NULL)) {
+ pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending));
+ KASSERT(pic_pending_percpu != NULL);
+
+ /*
+ * Now zero the per-cpu pending data.
+ */
+ percpu_foreach(pic_pending_percpu, pic_pending_zero, NULL);
+ }
+#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */
+
for (slot = 0; slot < PIC_MAXPICS; slot++) {
struct pic_softc * const xpic = pic_list[slot];
if (xpic == NULL) {
@@ -614,7 +685,7 @@
KASSERT((slot == 0) == (pic->pic_ops->pic_set_priority != NULL));
#endif
#ifdef MULTIPROCESSOR
- KASSERT((slot == 0) == (pic->pic_ops->pic_ipi_send != NULL));
+ KASSERT((pic->pic_cpus != NULL) == (pic->pic_ops->pic_ipi_send != NULL));
#endif
pic_list[slot] = pic;
}
Home |
Main Index |
Thread Index |
Old Index