ARM: 7400/1: vfp: clear fpscr length and stride bits on entry to sig handler
[linux-flexiantxendom0-3.2.10.git] / arch / arm / vfp / vfpmodule.c
index 9f476a1..bc683b8 100644 (file)
@@ -8,13 +8,21 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/module.h>
 #include <linux/types.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
 #include <linux/kernel.h>
+#include <linux/notifier.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
+#include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
 
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/system_info.h>
 #include <asm/thread_notify.h>
 #include <asm/vfp.h>
 
@@ -29,7 +37,6 @@ void vfp_support_entry(void);
 void vfp_null_entry(void);
 
 void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
 
 /*
  * Dual-use variable.
@@ -38,32 +45,144 @@ union vfp_state *last_VFP_context[NR_CPUS];
  */
 unsigned int VFP_arch;
 
+/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ *
+ * For UP, this is sufficient to tell which thread owns the VFP context.
+ * However, for SMP, we also need to check the CPU number stored in the
+ * saved state too to catch migrations.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
+{
+#ifdef CONFIG_SMP
+       if (thread->vfpstate.hard.cpu != cpu)
+               return false;
+#endif
+       return vfp_current_hw_state[cpu] == &thread->vfpstate;
+}
+
+/*
+ * Force a reload of the VFP context from the thread structure.  We do
+ * this by ensuring that access to the VFP hardware is disabled, and
+ * clear vfp_current_hw_state.  Must be called from non-preemptible context.
+ */
+static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
+{
+       if (vfp_state_in_hw(cpu, thread)) {
+               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+               vfp_current_hw_state[cpu] = NULL;
+       }
+#ifdef CONFIG_SMP
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
+ * Per-thread VFP initialization.
+ */
+static void vfp_thread_flush(struct thread_info *thread)
+{
+       union vfp_state *vfp = &thread->vfpstate;
+       unsigned int cpu;
+
+       /*
+        * Disable VFP to ensure we initialize it first.  We must ensure
+        * that the modification of vfp_current_hw_state[] and hardware
+        * disable are done for the same CPU and without preemption.
+        *
+        * Do this first to ensure that preemption won't overwrite our
+        * state saving should access to the VFP be enabled at this point.
+        */
+       cpu = get_cpu();
+       if (vfp_current_hw_state[cpu] == vfp)
+               vfp_current_hw_state[cpu] = NULL;
+       fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       put_cpu();
+
+       memset(vfp, 0, sizeof(union vfp_state));
+
+       vfp->hard.fpexc = FPEXC_EN;
+       vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+#ifdef CONFIG_SMP
+       vfp->hard.cpu = NR_CPUS;
+#endif
+}
+
+static void vfp_thread_exit(struct thread_info *thread)
+{
+       /* release case: Per-thread VFP cleanup. */
+       union vfp_state *vfp = &thread->vfpstate;
+       unsigned int cpu = get_cpu();
+
+       if (vfp_current_hw_state[cpu] == vfp)
+               vfp_current_hw_state[cpu] = NULL;
+       put_cpu();
+}
+
+static void vfp_thread_copy(struct thread_info *thread)
+{
+       struct thread_info *parent = current_thread_info();
+
+       vfp_sync_hwstate(parent);
+       thread->vfpstate = parent->vfpstate;
+#ifdef CONFIG_SMP
+       thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
+ * When this function is called with the following 'cmd's, the following
+ * is true while this function is being run:
+ *  THREAD_NOFTIFY_SWTICH:
+ *   - the previously running thread will not be scheduled onto another CPU.
+ *   - the next thread to be run (v) will not be running on another CPU.
+ *   - thread->cpu is the local CPU number
+ *   - not preemptible as we're called in the middle of a thread switch
+ *  THREAD_NOTIFY_FLUSH:
+ *   - the thread (v) will be running on the local CPU, so
+ *     v === current_thread_info()
+ *   - thread->cpu is the local CPU number at the time it is accessed,
+ *     but may change at any time.
+ *   - we could be preempted if tree preempt rcu is enabled, so
+ *     it is unsafe to use thread->cpu.
+ *  THREAD_NOTIFY_EXIT
+ *   - the thread (v) will be running on the local CPU, so
+ *     v === current_thread_info()
+ *   - thread->cpu is the local CPU number at the time it is accessed,
+ *     but may change at any time.
+ *   - we could be preempted if tree preempt rcu is enabled, so
+ *     it is unsafe to use thread->cpu.
+ */
 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
        struct thread_info *thread = v;
-       union vfp_state *vfp;
-       __u32 cpu = thread->cpu;
+       u32 fpexc;
+#ifdef CONFIG_SMP
+       unsigned int cpu;
+#endif
 
-       if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
-               u32 fpexc = fmrx(FPEXC);
+       switch (cmd) {
+       case THREAD_NOTIFY_SWITCH:
+               fpexc = fmrx(FPEXC);
 
 #ifdef CONFIG_SMP
+               cpu = thread->cpu;
+
                /*
                 * On SMP, if VFP is enabled, save the old state in
                 * case the thread migrates to a different CPU. The
                 * restoring is done lazily.
                 */
-               if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
-                       vfp_save_state(last_VFP_context[cpu], fpexc);
-                       last_VFP_context[cpu]->hard.cpu = cpu;
-               }
-               /*
-                * Thread migration, just force the reloading of the
-                * state on the new CPU in case the VFP registers
-                * contain stale data.
-                */
-               if (thread->vfpstate.hard.cpu != cpu)
-                       last_VFP_context[cpu] = NULL;
+               if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+                       vfp_save_state(vfp_current_hw_state[cpu], fpexc);
 #endif
 
                /*
@@ -71,29 +190,21 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
                 * old state.
                 */
                fmxr(FPEXC, fpexc & ~FPEXC_EN);
-               return NOTIFY_DONE;
-       }
+               break;
 
-       vfp = &thread->vfpstate;
-       if (cmd == THREAD_NOTIFY_FLUSH) {
-               /*
-                * Per-thread VFP initialisation.
-                */
-               memset(vfp, 0, sizeof(union vfp_state));
+       case THREAD_NOTIFY_FLUSH:
+               vfp_thread_flush(thread);
+               break;
 
-               vfp->hard.fpexc = FPEXC_EN;
-               vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+       case THREAD_NOTIFY_EXIT:
+               vfp_thread_exit(thread);
+               break;
 
-               /*
-                * Disable VFP to ensure we initialise it first.
-                */
-               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       case THREAD_NOTIFY_COPY:
+               vfp_thread_copy(thread);
+               break;
        }
 
-       /* flush and release case: Per-thread VFP cleanup. */
-       if (last_VFP_context[cpu] == vfp)
-               last_VFP_context[cpu] = NULL;
-
        return NOTIFY_DONE;
 }
 
@@ -105,7 +216,7 @@ static struct notifier_block vfp_notifier_block = {
  * Raise a SIGFPE for the current process.
  * sicode describes the signal being raised.
  */
-void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
 {
        siginfo_t info;
 
@@ -153,10 +264,13 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
        }
 
        /*
-        * Update the FPSCR with the additional exception flags.
+        * If any of the status flags are set, update the FPSCR.
         * Comparison instructions always return at least one of
         * these flags set.
         */
+       if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
+               fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
+
        fpscr |= exceptions;
 
        fmxr(FPSCR, fpscr);
@@ -253,12 +367,14 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
        }
 
        if (fpexc & FPEXC_EX) {
+#ifndef CONFIG_CPU_FEROCEON
                /*
                 * Asynchronous exception. The instruction is read from FPINST
                 * and the interrupted instruction has to be restarted.
                 */
                trigger = fmrx(FPINST);
                regs->ARM_pc -= 4;
+#endif
        } else if (!(fpexc & FPEXC_DEX)) {
                /*
                 * Illegal combination of bits. It can be caused by an
@@ -266,7 +382,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
                 * on VFP subarch 1.
                 */
                 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
-                return;
+               goto exit;
        }
 
        /*
@@ -297,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
         * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
         */
        if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
-               return;
+               goto exit;
 
        /*
         * The barrier() here prevents fpinst2 being read
@@ -310,6 +426,8 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
        exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
        if (exceptions)
                vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
+ exit:
+       preempt_enable();
 }
 
 static void vfp_enable(void *unused)
@@ -322,10 +440,8 @@ static void vfp_enable(void *unused)
        set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
 }
 
-#ifdef CONFIG_PM
-#include <linux/sysdev.h>
-
-static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
+#ifdef CONFIG_CPU_PM
+static int vfp_pm_suspend(void)
 {
        struct thread_info *ti = current_thread_info();
        u32 fpexc = fmrx(FPEXC);
@@ -340,44 +456,197 @@ static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
        }
 
        /* clear any information we had about last context state */
-       memset(last_VFP_context, 0, sizeof(last_VFP_context));
+       memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
 
        return 0;
 }
 
-static int vfp_pm_resume(struct sys_device *dev)
+static void vfp_pm_resume(void)
 {
        /* ensure we have access to the vfp */
        vfp_enable(NULL);
 
        /* and disable it to ensure the next usage restores the state */
        fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
-
-       return 0;
 }
 
-static struct sysdev_class vfp_pm_sysclass = {
-       .name           = "vfp",
-       .suspend        = vfp_pm_suspend,
-       .resume         = vfp_pm_resume,
-};
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+       void *v)
+{
+       switch (cmd) {
+       case CPU_PM_ENTER:
+               vfp_pm_suspend();
+               break;
+       case CPU_PM_ENTER_FAILED:
+       case CPU_PM_EXIT:
+               vfp_pm_resume();
+               break;
+       }
+       return NOTIFY_OK;
+}
 
-static struct sys_device vfp_pm_sysdev = {
-       .cls    = &vfp_pm_sysclass,
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+       .notifier_call = vfp_cpu_pm_notifier,
 };
 
 static void vfp_pm_init(void)
 {
-       sysdev_class_register(&vfp_pm_sysclass);
-       sysdev_register(&vfp_pm_sysdev);
+       cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
 }
 
-
 #else
 static inline void vfp_pm_init(void) { }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_CPU_PM */
 
-#include <linux/smp.h>
+/*
+ * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
+ * with the hardware state.
+ */
+void vfp_sync_hwstate(struct thread_info *thread)
+{
+       unsigned int cpu = get_cpu();
+
+       if (vfp_state_in_hw(cpu, thread)) {
+               u32 fpexc = fmrx(FPEXC);
+
+               /*
+                * Save the last VFP state on this CPU.
+                */
+               fmxr(FPEXC, fpexc | FPEXC_EN);
+               vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
+               fmxr(FPEXC, fpexc);
+       }
+
+       put_cpu();
+}
+
+/* Ensure that the thread reloads the hardware VFP state on the next use. */
+void vfp_flush_hwstate(struct thread_info *thread)
+{
+       unsigned int cpu = get_cpu();
+
+       vfp_force_reload(cpu, thread);
+
+       put_cpu();
+}
+
+/*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+                                   struct user_vfp_exc __user *ufp_exc)
+{
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+       int err = 0;
+
+       /* Ensure that the saved hwstate is up-to-date. */
+       vfp_sync_hwstate(thread);
+
+       /*
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+       err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+                             sizeof(hwstate->fpregs));
+       /*
+        * Copy the status and control register.
+        */
+       __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+       /*
+        * Copy the exception registers.
+        */
+       __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+       __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+       __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+       if (err)
+               return -EFAULT;
+
+       /* Ensure that VFP is disabled. */
+       vfp_flush_hwstate(thread);
+
+       /*
+        * As per the PCS, clear the length and stride bits for function
+        * entry.
+        */
+       hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+
+       /*
+        * Disable VFP in the hwstate so that we can detect if it gets
+        * used.
+        */
+       hwstate->fpexc &= ~FPEXC_EN;
+       return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+                            struct user_vfp_exc __user *ufp_exc)
+{
+       struct thread_info *thread = current_thread_info();
+       struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+       unsigned long fpexc;
+       int err = 0;
+
+       /*
+        * If VFP has been used, then disable it to avoid corrupting
+        * the new thread state.
+        */
+       if (hwstate->fpexc & FPEXC_EN)
+               vfp_flush_hwstate(thread);
+
+       /*
+        * Copy the floating point registers. There can be unused
+        * registers see asm/hwcap.h for details.
+        */
+       err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+                               sizeof(hwstate->fpregs));
+       /*
+        * Copy the status and control register.
+        */
+       __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+       /*
+        * Sanitise and restore the exception registers.
+        */
+       __get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+       /* Ensure the VFP is enabled. */
+       fpexc |= FPEXC_EN;
+
+       /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+       fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+       hwstate->fpexc = fpexc;
+
+       __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+       __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+       return err ? -EFAULT : 0;
+}
+
+/*
+ * VFP hardware can lose all context when a CPU goes offline.
+ * As we will be running in SMP mode with CPU hotplug, we will save the
+ * hardware state at every thread switch.  We clear our held state when
+ * a CPU has been killed, indicating that the VFP hardware doesn't contain
+ * a threads VFP state.  When a CPU starts up, we re-enable access to the
+ * VFP hardware.
+ *
+ * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * is being offlined/onlined.
+ */
+static int vfp_hotplug(struct notifier_block *b, unsigned long action,
+       void *hcpu)
+{
+       if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
+               vfp_force_reload((long)hcpu, current_thread_info());
+       } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+               vfp_enable(NULL);
+       return NOTIFY_OK;
+}
 
 /*
  * VFP support code initialisation.
@@ -407,6 +676,8 @@ static int __init vfp_init(void)
        else if (vfpsid & FPSID_NODOUBLE) {
                printk("no double precision support\n");
        } else {
+               hotcpu_notifier(vfp_hotplug, 0);
+
                smp_call_function(vfp_enable, NULL, 1);
 
                VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
@@ -427,15 +698,32 @@ static int __init vfp_init(void)
                 * in place; report VFP support to userspace.
                 */
                elf_hwcap |= HWCAP_VFP;
-#ifdef CONFIG_NEON
+#ifdef CONFIG_VFPv3
+               if (VFP_arch >= 2) {
+                       elf_hwcap |= HWCAP_VFPv3;
+
+                       /*
+                        * Check for VFPv3 D16. CPUs in this configuration
+                        * only have 16 x 64bit registers.
+                        */
+                       if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+                               elf_hwcap |= HWCAP_VFPv3D16;
+               }
+#endif
                /*
                 * Check for the presence of the Advanced SIMD
                 * load/store instructions, integer and single
-                * precision floating point operations.
+                * precision floating point operations. Only check
+                * for NEON if the hardware has the MVFR registers.
                 */
-               if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
-                       elf_hwcap |= HWCAP_NEON;
+               if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
+                       if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+                               elf_hwcap |= HWCAP_NEON;
 #endif
+                       if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+                               elf_hwcap |= HWCAP_VFPv4;
+               }
        }
        return 0;
 }