* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+#include <asm/cp15.h>
#include <asm/cputype.h>
+#include <asm/system_info.h>
#include <asm/thread_notify.h>
#include <asm/vfp.h>
/*
* Force a reload of the VFP context from the thread structure. We do
* this by ensuring that access to the VFP hardware is disabled, and
- * clear last_VFP_context. Must be called from non-preemptible context.
+ * clear vfp_current_hw_state. Must be called from non-preemptible context.
*/
static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
{
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
-#ifdef CONFIG_PM
-#include <linux/syscore_ops.h>
-
+#ifdef CONFIG_CPU_PM
static int vfp_pm_suspend(void)
{
struct thread_info *ti = current_thread_info();
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
-static struct syscore_ops vfp_pm_syscore_ops = {
- .suspend = vfp_pm_suspend,
- .resume = vfp_pm_resume,
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ vfp_pm_suspend();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ vfp_pm_resume();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vfp_cpu_pm_notifier_block = {
+ .notifier_call = vfp_cpu_pm_notifier,
};
static void vfp_pm_init(void)
{
- register_syscore_ops(&vfp_pm_syscore_ops);
+ cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
}
#else
static inline void vfp_pm_init(void) { }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_CPU_PM */
/*
* Ensure that the VFP state stored in 'thread->vfpstate' is up to date
}
/*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ int err = 0;
+
+ /* Ensure that the saved hwstate is up-to-date. */
+ vfp_sync_hwstate(thread);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+ /*
+ * Copy the exception registers.
+ */
+ __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+ __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+ if (err)
+ return -EFAULT;
+
+ /* Ensure that VFP is disabled. */
+ vfp_flush_hwstate(thread);
+
+ /*
+ * As per the PCS, clear the length and stride bits for function
+ * entry.
+ */
+ hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+
+ /*
+ * Disable VFP in the hwstate so that we can detect if it gets
+ * used.
+ */
+ hwstate->fpexc &= ~FPEXC_EN;
+ return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+ struct user_vfp_exc __user *ufp_exc)
+{
+ struct thread_info *thread = current_thread_info();
+ struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+ unsigned long fpexc;
+ int err = 0;
+
+ /*
+ * If VFP has been used, then disable it to avoid corrupting
+ * the new thread state.
+ */
+ if (hwstate->fpexc & FPEXC_EN)
+ vfp_flush_hwstate(thread);
+
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+ */
+ err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+ sizeof(hwstate->fpregs));
+ /*
+ * Copy the status and control register.
+ */
+ __get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+ /*
+ * Sanitise and restore the exception registers.
+ */
+ __get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+ /* Ensure the VFP is enabled. */
+ fpexc |= FPEXC_EN;
+
+ /* Ensure FPINST2 is invalid and the exception flag is cleared. */
+ fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+ hwstate->fpexc = fpexc;
+
+ __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+ __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+ return err ? -EFAULT : 0;
+}
+
+/*
* VFP hardware can lose all context when a CPU goes offline.
* As we will be running in SMP mode with CPU hotplug, we will save the
* hardware state at every thread switch. We clear our held state when
elf_hwcap |= HWCAP_VFPv3D16;
}
#endif
-#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
* for NEON if the hardware has the MVFR registers.
*/
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
- }
#endif
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
+ }
}
return 0;
}