Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src-draft/trunk]: src/sys/arch/arm/vfp Add kthread_fpu_enter/exit support to...
details: https://anonhg.NetBSD.org/src-all/rev/88114cdf465e
branches: trunk
changeset: 936594:88114cdf465e
user: Taylor R Campbell <riastradh%NetBSD.org@localhost>
date: Fri Jul 31 03:11:19 2020 +0000
description:
Add kthread_fpu_enter/exit support to arm.
diffstat:
sys/arch/arm/vfp/vfp_init.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 42 insertions(+), 2 deletions(-)
diffs (91 lines):
diff -r 3a9b1ca3581a -r 88114cdf465e sys/arch/arm/vfp/vfp_init.c
--- a/sys/arch/arm/vfp/vfp_init.c Fri Jul 31 03:10:13 2020 +0000
+++ b/sys/arch/arm/vfp/vfp_init.c Fri Jul 31 03:11:19 2020 +0000
@@ -38,6 +38,7 @@
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/device.h>
+#include <sys/kthread.h>
#include <sys/proc.h>
#include <sys/cpu.h>
@@ -504,7 +505,8 @@
return 1;
/* This shouldn't ever happen. */
- if (fault_code != FAULT_USER)
+ if (fault_code != FAULT_USER &&
+ (curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) == LW_SYSTEM)
panic("NEON fault in non-user mode");
/* if we already own the FPU and it's enabled, raise SIGILL */
@@ -668,6 +670,19 @@
sizeof(mcp->__fpu.__vfpregs.__vfp_fstmx));
}
+/*
+ * True if this is a system thread with its own private FPU state.
+ */
+static inline bool
+lwp_system_fpu_p(struct lwp *l)
+{
+
+ return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
+ (LW_SYSTEM|LW_SYSTEM_FPU);
+}
+
+static const struct vfpreg zero_vfpreg;
+
void
fpu_kern_enter(void)
{
@@ -675,6 +690,11 @@
uint32_t fpexc;
int s;
+ if (lwp_system_fpu_p(curlwp) && !cpu_intr_p()) {
+ KASSERT(!cpu_softintr_p());
+ return;
+ }
+
/*
* Block interrupts up to IPL_VM. We must block preemption
* since -- if this is a user thread -- there is nowhere to
@@ -701,11 +721,15 @@
void
fpu_kern_leave(void)
{
- static const struct vfpreg zero_vfpreg;
struct cpu_info *ci = curcpu();
int s;
uint32_t fpexc;
+ if (lwp_system_fpu_p(curlwp) && !cpu_intr_p()) {
+ KASSERT(!cpu_softintr_p());
+ return;
+ }
+
KASSERT(ci->ci_cpl == IPL_VM);
KASSERT(ci->ci_kfpu_spl != -1);
@@ -730,4 +754,20 @@
splx(s);
}
+void
+kthread_fpu_enter_md(void)
+{
+
+ pcu_load(&arm_vfp_ops);
+}
+
+void
+kthread_fpu_exit_md(void)
+{
+
+ /* XXX Should vfp_state_release zero the registers itself? */
+ load_vfpregs(&zero_vfpreg);
+ vfp_discardcontext(curlwp, 0);
+}
+
#endif /* FPU_VFP */
Home |
Main Index |
Thread Index |
Old Index