2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/cpu.h>
13 #include <linux/cpu_pm.h>
14 #include <linux/kernel.h>
15 #include <linux/notifier.h>
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/init.h>
20 #include <linux/uaccess.h>
21 #include <linux/user.h>
24 #include <asm/cputype.h>
25 #include <asm/system_info.h>
26 #include <asm/thread_notify.h>
33 * Our undef handlers (in entry.S)
35 void vfp_testing_entry(void);
36 void vfp_support_entry(void);
37 void vfp_null_entry(void);
39 void (*vfp_vector)(void) = vfp_null_entry;
43 * Used in startup: set to non-zero if VFP checks fail
44 * After startup, holds VFP architecture
46 unsigned int VFP_arch;
49 * The pointer to the vfpstate structure of the thread which currently
50 * owns the context held in the VFP hardware, or NULL if the hardware
53 * For UP, this is sufficient to tell which thread owns the VFP context.
54 * However, for SMP, we also need to check the CPU number stored in the
55 * saved state too to catch migrations.
57 union vfp_state *vfp_current_hw_state[NR_CPUS];
60 * Is 'thread's most up to date state stored in this CPUs hardware?
61 * Must be called from non-preemptible context.
63 static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
66 if (thread->vfpstate.hard.cpu != cpu)
69 return vfp_current_hw_state[cpu] == &thread->vfpstate;
73 * Force a reload of the VFP context from the thread structure. We do
74 * this by ensuring that access to the VFP hardware is disabled, and
75 * clear vfp_current_hw_state. Must be called from non-preemptible context.
77 static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
79 if (vfp_state_in_hw(cpu, thread)) {
80 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
81 vfp_current_hw_state[cpu] = NULL;
84 thread->vfpstate.hard.cpu = NR_CPUS;
89 * Per-thread VFP initialization.
91 static void vfp_thread_flush(struct thread_info *thread)
93 union vfp_state *vfp = &thread->vfpstate;
97 * Disable VFP to ensure we initialize it first. We must ensure
98 * that the modification of vfp_current_hw_state[] and hardware
99 * disable are done for the same CPU and without preemption.
101 * Do this first to ensure that preemption won't overwrite our
102 * state saving should access to the VFP be enabled at this point.
105 if (vfp_current_hw_state[cpu] == vfp)
106 vfp_current_hw_state[cpu] = NULL;
107 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
110 memset(vfp, 0, sizeof(union vfp_state));
112 vfp->hard.fpexc = FPEXC_EN;
113 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
115 vfp->hard.cpu = NR_CPUS;
119 static void vfp_thread_exit(struct thread_info *thread)
121 /* release case: Per-thread VFP cleanup. */
122 union vfp_state *vfp = &thread->vfpstate;
123 unsigned int cpu = get_cpu();
125 if (vfp_current_hw_state[cpu] == vfp)
126 vfp_current_hw_state[cpu] = NULL;
130 static void vfp_thread_copy(struct thread_info *thread)
132 struct thread_info *parent = current_thread_info();
134 vfp_sync_hwstate(parent);
135 thread->vfpstate = parent->vfpstate;
137 thread->vfpstate.hard.cpu = NR_CPUS;
142 * When this function is called with the following 'cmd's, the following
143 * is true while this function is being run:
144 * THREAD_NOFTIFY_SWTICH:
145 * - the previously running thread will not be scheduled onto another CPU.
146 * - the next thread to be run (v) will not be running on another CPU.
147 * - thread->cpu is the local CPU number
148 * - not preemptible as we're called in the middle of a thread switch
149 * THREAD_NOTIFY_FLUSH:
150 * - the thread (v) will be running on the local CPU, so
151 * v === current_thread_info()
152 * - thread->cpu is the local CPU number at the time it is accessed,
153 * but may change at any time.
154 * - we could be preempted if tree preempt rcu is enabled, so
155 * it is unsafe to use thread->cpu.
157 * - the thread (v) will be running on the local CPU, so
158 * v === current_thread_info()
159 * - thread->cpu is the local CPU number at the time it is accessed,
160 * but may change at any time.
161 * - we could be preempted if tree preempt rcu is enabled, so
162 * it is unsafe to use thread->cpu.
164 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
166 struct thread_info *thread = v;
173 case THREAD_NOTIFY_SWITCH:
180 * On SMP, if VFP is enabled, save the old state in
181 * case the thread migrates to a different CPU. The
182 * restoring is done lazily.
184 if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
185 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
189 * Always disable VFP so we can lazily save/restore the
192 fmxr(FPEXC, fpexc & ~FPEXC_EN);
195 case THREAD_NOTIFY_FLUSH:
196 vfp_thread_flush(thread);
199 case THREAD_NOTIFY_EXIT:
200 vfp_thread_exit(thread);
203 case THREAD_NOTIFY_COPY:
204 vfp_thread_copy(thread);
211 static struct notifier_block vfp_notifier_block = {
212 .notifier_call = vfp_notifier,
216 * Raise a SIGFPE for the current process.
217 * sicode describes the signal being raised.
219 static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
223 memset(&info, 0, sizeof(info));
225 info.si_signo = SIGFPE;
226 info.si_code = sicode;
227 info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
230 * This is the same as NWFPE, because it's not clear what
233 current->thread.error_code = 0;
234 current->thread.trap_no = 6;
236 send_sig_info(SIGFPE, &info, current);
239 static void vfp_panic(char *reason, u32 inst)
243 printk(KERN_ERR "VFP: Error: %s\n", reason);
244 printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
245 fmrx(FPEXC), fmrx(FPSCR), inst);
246 for (i = 0; i < 32; i += 2)
247 printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
248 i, vfp_get_float(i), i+1, vfp_get_float(i+1));
252 * Process bitmask of exception conditions.
254 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
258 pr_debug("VFP: raising exceptions %08x\n", exceptions);
260 if (exceptions == VFP_EXCEPTION_ERROR) {
261 vfp_panic("unhandled bounce", inst);
262 vfp_raise_sigfpe(0, regs);
267 * If any of the status flags are set, update the FPSCR.
268 * Comparison instructions always return at least one of
271 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
272 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
278 #define RAISE(stat,en,sig) \
279 if (exceptions & stat && fpscr & en) \
283 * These are arranged in priority order, least to highest.
285 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
286 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
287 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
288 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
289 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
292 vfp_raise_sigfpe(si_code, regs);
296 * Emulate a VFP instruction.
298 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
300 u32 exceptions = VFP_EXCEPTION_ERROR;
302 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
304 if (INST_CPRTDO(inst)) {
305 if (!INST_CPRT(inst)) {
309 if (vfp_single(inst)) {
310 exceptions = vfp_single_cpdo(inst, fpscr);
312 exceptions = vfp_double_cpdo(inst, fpscr);
316 * A CPRT instruction can not appear in FPINST2, nor
317 * can it cause an exception. Therefore, we do not
318 * have to emulate it.
323 * A CPDT instruction can not appear in FPINST2, nor can
324 * it cause an exception. Therefore, we do not have to
328 return exceptions & ~VFP_NAN_FLAG;
332 * Package up a bounce condition.
334 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
336 u32 fpscr, orig_fpscr, fpsid, exceptions;
338 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
341 * At this point, FPEXC can have the following configuration:
344 * 0 1 x - synchronous exception
345 * 1 x 0 - asynchronous exception
346 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
347 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
348 * implementation), undefined otherwise
350 * Clear various bits and enable access to the VFP so we can
353 fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
356 orig_fpscr = fpscr = fmrx(FPSCR);
359 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
361 if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
362 && (fpscr & FPSCR_IXE)) {
364 * Synchronous exception, emulate the trigger instruction
369 if (fpexc & FPEXC_EX) {
370 #ifndef CONFIG_CPU_FEROCEON
372 * Asynchronous exception. The instruction is read from FPINST
373 * and the interrupted instruction has to be restarted.
375 trigger = fmrx(FPINST);
378 } else if (!(fpexc & FPEXC_DEX)) {
380 * Illegal combination of bits. It can be caused by an
381 * unallocated VFP instruction but with FPSCR.IXE set and not
384 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
389 * Modify fpscr to indicate the number of iterations remaining.
390 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
391 * whether FPEXC.VECITR or FPSCR.LEN is used.
393 if (fpexc & (FPEXC_EX | FPEXC_VV)) {
396 len = fpexc + (1 << FPEXC_LENGTH_BIT);
398 fpscr &= ~FPSCR_LENGTH_MASK;
399 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
403 * Handle the first FP instruction. We used to take note of the
404 * FPEXC bounce reason, but this appears to be unreliable.
405 * Emulate the bounced instruction instead.
407 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
409 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
412 * If there isn't a second FP instruction, exit now. Note that
413 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
415 if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
419 * The barrier() here prevents fpinst2 being read
420 * before the condition above.
423 trigger = fmrx(FPINST2);
426 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
428 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
433 static void vfp_enable(void *unused)
435 u32 access = get_copro_access();
438 * Enable full access to VFP (cp10 and cp11)
440 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
444 static int vfp_pm_suspend(void)
446 struct thread_info *ti = current_thread_info();
447 u32 fpexc = fmrx(FPEXC);
449 /* if vfp is on, then save state for resumption */
450 if (fpexc & FPEXC_EN) {
451 printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
452 vfp_save_state(&ti->vfpstate, fpexc);
454 /* disable, just in case */
455 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
458 /* clear any information we had about last context state */
459 memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
464 static void vfp_pm_resume(void)
466 /* ensure we have access to the vfp */
469 /* and disable it to ensure the next usage restores the state */
470 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
473 static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
480 case CPU_PM_ENTER_FAILED:
488 static struct notifier_block vfp_cpu_pm_notifier_block = {
489 .notifier_call = vfp_cpu_pm_notifier,
492 static void vfp_pm_init(void)
494 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
498 static inline void vfp_pm_init(void) { }
499 #endif /* CONFIG_CPU_PM */
502 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
503 * with the hardware state.
505 void vfp_sync_hwstate(struct thread_info *thread)
507 unsigned int cpu = get_cpu();
509 if (vfp_state_in_hw(cpu, thread)) {
510 u32 fpexc = fmrx(FPEXC);
513 * Save the last VFP state on this CPU.
515 fmxr(FPEXC, fpexc | FPEXC_EN);
516 vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
523 /* Ensure that the thread reloads the hardware VFP state on the next use. */
524 void vfp_flush_hwstate(struct thread_info *thread)
526 unsigned int cpu = get_cpu();
528 vfp_force_reload(cpu, thread);
534 * Save the current VFP state into the provided structures and prepare
535 * for entry into a new function (signal handler).
537 int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
538 struct user_vfp_exc __user *ufp_exc)
540 struct thread_info *thread = current_thread_info();
541 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
544 /* Ensure that the saved hwstate is up-to-date. */
545 vfp_sync_hwstate(thread);
548 * Copy the floating point registers. There can be unused
549 * registers see asm/hwcap.h for details.
551 err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
552 sizeof(hwstate->fpregs));
554 * Copy the status and control register.
556 __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
559 * Copy the exception registers.
561 __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
562 __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
563 __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
568 /* Ensure that VFP is disabled. */
569 vfp_flush_hwstate(thread);
572 * As per the PCS, clear the length and stride bits for function
575 hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
578 * Disable VFP in the hwstate so that we can detect if it gets
581 hwstate->fpexc &= ~FPEXC_EN;
585 /* Sanitise and restore the current VFP state from the provided structures. */
586 int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
587 struct user_vfp_exc __user *ufp_exc)
589 struct thread_info *thread = current_thread_info();
590 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
595 * If VFP has been used, then disable it to avoid corrupting
596 * the new thread state.
598 if (hwstate->fpexc & FPEXC_EN)
599 vfp_flush_hwstate(thread);
602 * Copy the floating point registers. There can be unused
603 * registers see asm/hwcap.h for details.
605 err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
606 sizeof(hwstate->fpregs));
608 * Copy the status and control register.
610 __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
613 * Sanitise and restore the exception registers.
615 __get_user_error(fpexc, &ufp_exc->fpexc, err);
617 /* Ensure the VFP is enabled. */
620 /* Ensure FPINST2 is invalid and the exception flag is cleared. */
621 fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
622 hwstate->fpexc = fpexc;
624 __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
625 __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
627 return err ? -EFAULT : 0;
631 * VFP hardware can lose all context when a CPU goes offline.
632 * As we will be running in SMP mode with CPU hotplug, we will save the
633 * hardware state at every thread switch. We clear our held state when
634 * a CPU has been killed, indicating that the VFP hardware doesn't contain
635 * a threads VFP state. When a CPU starts up, we re-enable access to the
638 * Both CPU_DYING and CPU_STARTING are called on the CPU which
639 * is being offlined/onlined.
641 static int vfp_hotplug(struct notifier_block *b, unsigned long action,
644 if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
645 vfp_force_reload((long)hcpu, current_thread_info());
646 } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
652 * VFP support code initialisation.
654 static int __init vfp_init(void)
657 unsigned int cpu_arch = cpu_architecture();
659 if (cpu_arch >= CPU_ARCH_ARMv6)
663 * First check that there is a VFP that we can use.
664 * The handler is already setup to just log calls, so
665 * we just need to read the VFPSID register.
667 vfp_vector = vfp_testing_entry;
669 vfpsid = fmrx(FPSID);
671 vfp_vector = vfp_null_entry;
673 printk(KERN_INFO "VFP support v0.3: ");
675 printk("not present\n");
676 else if (vfpsid & FPSID_NODOUBLE) {
677 printk("no double precision support\n");
679 hotcpu_notifier(vfp_hotplug, 0);
681 smp_call_function(vfp_enable, NULL, 1);
683 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
684 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
685 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
686 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
687 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
688 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
689 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
691 vfp_vector = vfp_support_entry;
693 thread_register_notifier(&vfp_notifier_block);
697 * We detected VFP, and the support code is
698 * in place; report VFP support to userspace.
700 elf_hwcap |= HWCAP_VFP;
703 elf_hwcap |= HWCAP_VFPv3;
706 * Check for VFPv3 D16. CPUs in this configuration
707 * only have 16 x 64bit registers.
709 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
710 elf_hwcap |= HWCAP_VFPv3D16;
714 * Check for the presence of the Advanced SIMD
715 * load/store instructions, integer and single
716 * precision floating point operations. Only check
717 * for NEON if the hardware has the MVFR registers.
719 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
721 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
722 elf_hwcap |= HWCAP_NEON;
724 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
725 elf_hwcap |= HWCAP_VFPv4;
731 late_initcall(vfp_init);