2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/highmem.h>
25 #include <linux/kallsyms.h>
26 #include <linux/ptrace.h>
27 #include <linux/utsname.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
31 #include <linux/uaccess.h>
32 #include <linux/nmi.h>
33 #include <linux/bug.h>
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
41 #include <linux/mca.h>
45 #include <linux/kdb.h>
46 #endif /* CONFIG_KDB */
48 #include <asm/processor.h>
49 #include <asm/system.h>
51 #include <asm/atomic.h>
52 #include <asm/debugreg.h>
56 #include <asm/unwind.h>
58 #include <asm/arch_hooks.h>
59 #include <linux/kdebug.h>
60 #include <asm/stacktrace.h>
62 #include <linux/module.h>
64 #include "mach_traps.h"
66 int panic_on_unrecovered_nmi;
68 asmlinkage int system_call(void);
70 /* Do we ignore FPU interrupts ? */
71 char ignore_fpu_irq = 0;
74 * The IDT has to be page-aligned to simplify the Pentium
75 * F0 0F bug workaround.. We have a special link segment
78 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
80 asmlinkage void divide_error(void);
81 asmlinkage void debug(void);
82 asmlinkage void nmi(void);
83 asmlinkage void int3(void);
84 asmlinkage void overflow(void);
85 asmlinkage void bounds(void);
86 asmlinkage void invalid_op(void);
87 asmlinkage void device_not_available(void);
88 asmlinkage void coprocessor_segment_overrun(void);
89 asmlinkage void invalid_TSS(void);
90 asmlinkage void segment_not_present(void);
91 asmlinkage void stack_segment(void);
92 asmlinkage void general_protection(void);
93 asmlinkage void page_fault(void);
94 asmlinkage void coprocessor_error(void);
95 asmlinkage void simd_coprocessor_error(void);
96 asmlinkage void alignment_check(void);
97 asmlinkage void spurious_interrupt_bug(void);
98 asmlinkage void machine_check(void);
100 int kstack_depth_to_print = 24;
101 static unsigned int code_bytes = 64;
103 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
105 return p > (void *)tinfo &&
106 p < (void *)tinfo + THREAD_SIZE - 3;
109 static inline unsigned long print_context_stack(struct thread_info *tinfo,
110 unsigned long *stack, unsigned long ebp,
111 struct stacktrace_ops *ops, void *data)
115 #ifdef CONFIG_FRAME_POINTER
116 while (valid_stack_ptr(tinfo, (void *)ebp)) {
117 unsigned long new_ebp;
118 addr = *(unsigned long *)(ebp + 4);
119 ops->address(data, addr);
121 * break out of recursive entries (such as
122 * end_of_stack_stop_unwind_function). Also,
123 * we can never allow a frame pointer to
126 new_ebp = *(unsigned long *)ebp;
132 while (valid_stack_ptr(tinfo, stack)) {
134 if (__kernel_text_address(addr))
135 ops->address(data, addr);
141 #define MSG(msg) ops->warning(data, msg)
143 void dump_trace(struct task_struct *task, struct pt_regs *regs,
144 unsigned long *stack,
145 struct stacktrace_ops *ops, void *data)
147 unsigned long ebp = 0;
155 if (task && task != current)
156 stack = (unsigned long *)task->thread.esp;
159 #ifdef CONFIG_FRAME_POINTER
161 if (task == current) {
162 /* Grab ebp right from our regs */
163 asm ("movl %%ebp, %0" : "=r" (ebp) : );
165 /* ebp is the last reg pushed by switch_to */
166 ebp = *(unsigned long *) task->thread.esp;
172 struct thread_info *context;
173 context = (struct thread_info *)
174 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
175 ebp = print_context_stack(context, stack, ebp, ops, data);
176 /* Should be after the line below, but somewhere
177 in early boot context comes out corrupted and we
178 can't reference it -AK */
179 if (ops->stack(data, "IRQ") < 0)
181 stack = (unsigned long*)context->previous_esp;
184 touch_nmi_watchdog();
187 EXPORT_SYMBOL(dump_trace);
190 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
193 print_symbol(msg, symbol);
197 static void print_trace_warning(void *data, char *msg)
199 printk("%s%s\n", (char *)data, msg);
202 static int print_trace_stack(void *data, char *name)
208 * Print one address/symbol entries per line.
210 static void print_trace_address(void *data, unsigned long addr)
212 printk("%s [<%08lx>] ", (char *)data, addr);
213 print_symbol("%s\n", addr);
216 static struct stacktrace_ops print_trace_ops = {
217 .warning = print_trace_warning,
218 .warning_symbol = print_trace_warning_symbol,
219 .stack = print_trace_stack,
220 .address = print_trace_address,
224 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
225 unsigned long * stack, char *log_lvl)
227 dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
228 printk("%s =======================\n", log_lvl);
231 void show_trace(struct task_struct *task, struct pt_regs *regs,
232 unsigned long * stack)
234 show_trace_log_lvl(task, regs, stack, "");
237 static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
238 unsigned long *esp, char *log_lvl)
240 unsigned long *stack;
245 esp = (unsigned long*)task->thread.esp;
247 esp = (unsigned long *)&esp;
251 for(i = 0; i < kstack_depth_to_print; i++) {
252 if (kstack_end(stack))
254 if (i && ((i % 8) == 0))
255 printk("\n%s ", log_lvl);
256 printk("%08lx ", *stack++);
258 printk("\n%sCall Trace:\n", log_lvl);
259 show_trace_log_lvl(task, regs, esp, log_lvl);
262 void show_stack(struct task_struct *task, unsigned long *esp)
265 show_stack_log_lvl(task, NULL, esp, "");
269 * The architecture-independent dump_stack generator
271 void dump_stack(void)
275 show_trace(current, NULL, &stack);
278 EXPORT_SYMBOL(dump_stack);
280 void show_registers(struct pt_regs *regs)
285 unsigned short ss, gs;
287 esp = (unsigned long) (®s->esp);
290 if (user_mode_vm(regs)) {
293 ss = regs->xss & 0xffff;
296 printk(KERN_EMERG "CPU: %d\n"
297 KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
298 KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
299 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
300 print_tainted(), regs->eflags, init_utsname()->release,
301 (int)strcspn(init_utsname()->version, " "),
302 init_utsname()->version);
303 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
304 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
305 regs->eax, regs->ebx, regs->ecx, regs->edx);
306 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
307 regs->esi, regs->edi, regs->ebp, esp);
308 printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
309 regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
310 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
311 TASK_COMM_LEN, current->comm, current->pid,
312 current_thread_info(), current, task_thread_info(current));
314 * When in-kernel, we also print out the stack and code at the
315 * time of the fault..
319 unsigned int code_prologue = code_bytes * 43 / 64;
320 unsigned int code_len = code_bytes;
323 printk("\n" KERN_EMERG "Stack: ");
324 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
326 printk(KERN_EMERG "Code: ");
328 eip = (u8 *)regs->eip - code_prologue;
329 if (eip < (u8 *)PAGE_OFFSET ||
330 probe_kernel_address(eip, c)) {
331 /* try starting at EIP */
332 eip = (u8 *)regs->eip;
333 code_len = code_len - code_prologue + 1;
335 for (i = 0; i < code_len; i++, eip++) {
336 if (eip < (u8 *)PAGE_OFFSET ||
337 probe_kernel_address(eip, c)) {
338 printk(" Bad EIP value.");
341 if (eip == (u8 *)regs->eip)
342 printk("<%02x> ", c);
350 int is_valid_bugaddr(unsigned long eip)
354 if (eip < PAGE_OFFSET)
356 if (probe_kernel_address((unsigned short *)eip, ud2))
359 return ud2 == 0x0b0f;
363 * This is gone through when something in the kernel has done something bad and
364 * is about to be terminated.
366 void die(const char * str, struct pt_regs * regs, long err)
371 int lock_owner_depth;
373 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
375 .lock_owner_depth = 0
377 static int die_counter;
382 if (die.lock_owner != raw_smp_processor_id()) {
384 spin_lock_irqsave(&die.lock, flags);
385 die.lock_owner = smp_processor_id();
386 die.lock_owner_depth = 0;
390 local_save_flags(flags);
392 if (++die.lock_owner_depth < 3) {
397 report_bug(regs->eip);
399 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
400 #ifdef CONFIG_PREEMPT
401 printk(KERN_EMERG "PREEMPT ");
410 #ifdef CONFIG_DEBUG_PAGEALLOC
413 printk("DEBUG_PAGEALLOC");
418 sysfs_printk_last_file();
419 if (notify_die(DIE_OOPS, str, regs, err,
420 current->thread.trap_no, SIGSEGV) !=
422 show_registers(regs);
423 /* Executive summary in case the oops scrolled away */
424 esp = (unsigned long) (®s->esp);
426 if (user_mode(regs)) {
428 ss = regs->xss & 0xffff;
430 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
431 print_symbol("%s", regs->eip);
432 printk(" SS:ESP %04x:%08lx\n", ss, esp);
437 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
441 spin_unlock_irqrestore(&die.lock, flags);
444 kdb(KDB_REASON_OOPS, err, regs);
445 #endif /* CONFIG_KDB */
450 if (kexec_should_crash(current))
454 panic("Fatal exception in interrupt");
457 panic("Fatal exception");
463 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
465 if (!user_mode_vm(regs))
469 static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
470 struct pt_regs * regs, long error_code,
473 struct task_struct *tsk = current;
475 if (regs->eflags & VM_MASK) {
481 if (!user_mode(regs))
486 * We want error_code and trap_no set for userspace faults and
487 * kernelspace faults which result in die(), but not
488 * kernelspace faults which are fixed up. die() gives the
489 * process no chance to handle the signal and notice the
490 * kernel fault information, so that won't result in polluting
491 * the information about previously queued, but not yet
492 * delivered, faults. See also do_general_protection below.
494 tsk->thread.error_code = error_code;
495 tsk->thread.trap_no = trapnr;
498 force_sig_info(signr, info, tsk);
500 force_sig(signr, tsk);
505 if (!fixup_exception(regs)) {
506 tsk->thread.error_code = error_code;
507 tsk->thread.trap_no = trapnr;
508 die(str, regs, error_code);
514 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
515 if (ret) goto trap_signal;
520 #define DO_ERROR(trapnr, signr, str, name) \
521 fastcall void do_##name(struct pt_regs * regs, long error_code) \
523 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
526 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
529 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
530 fastcall void do_##name(struct pt_regs * regs, long error_code) \
533 info.si_signo = signr; \
535 info.si_code = sicode; \
536 info.si_addr = (void __user *)siaddr; \
537 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
540 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
543 #define DO_VM86_ERROR(trapnr, signr, str, name) \
544 fastcall void do_##name(struct pt_regs * regs, long error_code) \
546 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
549 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
552 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
553 fastcall void do_##name(struct pt_regs * regs, long error_code) \
556 info.si_signo = signr; \
558 info.si_code = sicode; \
559 info.si_addr = (void __user *)siaddr; \
560 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
563 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
566 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
567 #if !defined(CONFIG_KPROBES) && !defined(CONFIG_KDB)
568 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
570 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
571 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
572 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
573 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
574 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
575 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
576 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
577 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
578 DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
580 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
584 struct tss_struct *tss = &per_cpu(init_tss, cpu);
585 struct thread_struct *thread = ¤t->thread;
588 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
589 * invalid offset set (the LAZY one) and the faulting thread has
590 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
591 * and we set the offset field correctly. Then we let the CPU to
592 * restart the faulting instruction.
594 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
595 thread->io_bitmap_ptr) {
596 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
597 thread->io_bitmap_max);
599 * If the previously set map was extending to higher ports
600 * than the current one, pad extra space with 0xff (no access).
602 if (thread->io_bitmap_max < tss->io_bitmap_max)
603 memset((char *) tss->io_bitmap +
604 thread->io_bitmap_max, 0xff,
605 tss->io_bitmap_max - thread->io_bitmap_max);
606 tss->io_bitmap_max = thread->io_bitmap_max;
607 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
608 tss->io_bitmap_owner = thread;
614 if (regs->eflags & VM_MASK)
617 if (!user_mode(regs))
620 current->thread.error_code = error_code;
621 current->thread.trap_no = 13;
622 force_sig(SIGSEGV, current);
627 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
631 if (!fixup_exception(regs)) {
632 current->thread.error_code = error_code;
633 current->thread.trap_no = 13;
634 if (notify_die(DIE_GPF, "general protection fault", regs,
635 error_code, 13, SIGSEGV) == NOTIFY_STOP)
637 die("general protection fault", regs, error_code);
641 static __kprobes void
642 mem_parity_error(unsigned char reason, struct pt_regs * regs)
644 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
645 "CPU %d.\n", reason, smp_processor_id());
646 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
647 if (panic_on_unrecovered_nmi)
648 panic("NMI: Not continuing");
650 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
652 /* Clear and disable the memory parity error line. */
653 clear_mem_error(reason);
656 static __kprobes void
657 io_check_error(unsigned char reason, struct pt_regs * regs)
661 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
662 show_registers(regs);
664 /* Re-enable the IOCK line, wait for a few seconds */
665 reason = (reason & 0xf) | 8;
668 while (--i) udelay(1000);
673 static __kprobes void
674 unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
677 (void)kdb(KDB_REASON_NMI, reason, regs);
678 #endif /* CONFIG_KDB */
680 /* Might actually be able to figure out what the guilty party
687 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
688 "CPU %d.\n", reason, smp_processor_id());
689 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
690 if (panic_on_unrecovered_nmi)
691 panic("NMI: Not continuing");
693 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
696 static DEFINE_SPINLOCK(nmi_print_lock);
698 void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
700 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
704 spin_lock(&nmi_print_lock);
706 * We are in trouble anyway, lets at least try
707 * to get a message out.
710 printk(KERN_EMERG "%s", msg);
711 printk(" on CPU%d, eip %08lx, registers:\n",
712 smp_processor_id(), regs->eip);
713 show_registers(regs);
715 kdb(KDB_REASON_NMI, 0, regs);
716 #endif /* CONFIG_KDB */
718 spin_unlock(&nmi_print_lock);
721 /* If we are in kernel we are probably nested up pretty bad
722 * and might aswell get out now while we still can.
724 if (!user_mode_vm(regs)) {
725 current->thread.trap_no = 2;
732 static __kprobes void default_do_nmi(struct pt_regs * regs)
734 unsigned char reason = 0;
736 /* Only the BSP gets external NMIs from the system. */
737 if (!smp_processor_id())
738 reason = get_nmi_reason();
740 #if defined(CONFIG_SMP) && defined(CONFIG_KDB)
742 * Call the kernel debugger to see if this NMI is due
743 * to an KDB requested IPI. If so, kdb will handle it.
745 if (kdb_ipi(regs, NULL)) {
748 #endif /* defined(CONFIG_SMP) && defined(CONFIG_KDB) */
750 if (!(reason & 0xc0)) {
751 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
754 #ifdef CONFIG_X86_LOCAL_APIC
756 * Ok, so this is none of the documented NMI sources,
757 * so it must be the NMI watchdog.
759 if (nmi_watchdog_tick(regs, reason))
761 if (!do_nmi_callback(regs, smp_processor_id()))
763 unknown_nmi_error(reason, regs);
767 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
770 mem_parity_error(reason, regs);
772 io_check_error(reason, regs);
774 * Reassert NMI in case it became active meanwhile
775 * as it's edge-triggered.
780 fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
786 cpu = smp_processor_id();
790 default_do_nmi(regs);
795 #ifdef CONFIG_KPROBES
796 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
799 if (kdb(KDB_REASON_BREAK, error_code, regs))
802 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
805 /* This is an interrupt gate, because kprobes wants interrupts
806 disabled. Normal trap handlers don't. */
807 restore_interrupts(regs);
808 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
813 * Our handling of the processor debug registers is non-trivial.
814 * We do not clear them on entry and exit from the kernel. Therefore
815 * it is possible to get a watchpoint trap here from inside the kernel.
816 * However, the code in ./ptrace.c has ensured that the user can
817 * only set watchpoints on userspace addresses. Therefore the in-kernel
818 * watchpoint trap can only occur in code which is reading/writing
819 * from user space. Such code must not hold kernel locks (since it
820 * can equally take a page fault), therefore it is safe to call
821 * force_sig_info even though that claims and releases locks.
823 * Code in ./signal.c ensures that the debug control register
824 * is restored before we deliver any signal, and therefore that
825 * user code runs with the correct debug control register even though
828 * Being careful here means that we don't have to be as careful in a
829 * lot of more complicated places (task switching can be a bit lazy
830 * about restoring all the debug state, and ptrace doesn't have to
831 * find every occurrence of the TF bit that could be saved away even
834 fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
836 unsigned int condition;
837 struct task_struct *tsk = current;
839 get_debugreg(condition, 6);
842 if (kdb(KDB_REASON_DEBUG, error_code, regs))
844 #endif /* CONFIG_KDB */
846 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
847 SIGTRAP) == NOTIFY_STOP)
849 /* It's safe to allow irq's after DR6 has been saved */
850 if (regs->eflags & X86_EFLAGS_IF)
853 /* Mask out spurious debug traps due to lazy DR7 setting */
854 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
855 if (!tsk->thread.debugreg[7])
859 if (regs->eflags & VM_MASK)
862 /* Save debug status register where ptrace can see it */
863 tsk->thread.debugreg[6] = condition;
866 * Single-stepping through TF: make sure we ignore any events in
867 * kernel space (but re-enable TF when returning to user mode).
869 if (condition & DR_STEP) {
871 * We already checked v86 mode above, so we can
872 * check for kernel mode by just checking the CPL
875 if (!user_mode(regs))
876 goto clear_TF_reenable;
879 /* Ok, finally something we can handle */
880 send_sigtrap(tsk, regs, error_code);
882 /* Disable additional traps. They'll be re-enabled when
883 * the signal is delivered.
890 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
894 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
895 regs->eflags &= ~TF_MASK;
899 #if defined(CONFIG_KDB) && !defined(CONFIG_KPROBES)
900 fastcall void do_int3(struct pt_regs * regs, long error_code)
902 if (kdb(KDB_REASON_BREAK, error_code, regs))
904 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
906 #endif /* CONFIG_KDB && !CONFIG_KPROBES */
910 * Note that we play around with the 'TS' bit in an attempt to get
911 * the correct behaviour even in the presence of the asynchronous
914 void math_error(void __user *eip)
916 struct task_struct * task;
918 unsigned short cwd, swd;
921 * Save the info for the exception handler and clear the error.
925 task->thread.trap_no = 16;
926 task->thread.error_code = 0;
927 info.si_signo = SIGFPE;
929 info.si_code = __SI_FAULT;
932 * (~cwd & swd) will mask out exceptions that are not set to unmasked
933 * status. 0x3f is the exception bits in these regs, 0x200 is the
934 * C1 reg you need in case of a stack fault, 0x040 is the stack
935 * fault bit. We should only be taking one exception at a time,
936 * so if this combination doesn't produce any single exception,
937 * then we have a bad program that isn't syncronizing its FPU usage
938 * and it will suffer the consequences since we won't be able to
939 * fully reproduce the context of the exception
941 cwd = get_fpu_cwd(task);
942 swd = get_fpu_swd(task);
943 switch (swd & ~cwd & 0x3f) {
944 case 0x000: /* No unmasked exception */
946 default: /* Multiple exceptions */
948 case 0x001: /* Invalid Op */
950 * swd & 0x240 == 0x040: Stack Underflow
951 * swd & 0x240 == 0x240: Stack Overflow
952 * User must clear the SF bit (0x40) if set
954 info.si_code = FPE_FLTINV;
956 case 0x002: /* Denormalize */
957 case 0x010: /* Underflow */
958 info.si_code = FPE_FLTUND;
960 case 0x004: /* Zero Divide */
961 info.si_code = FPE_FLTDIV;
963 case 0x008: /* Overflow */
964 info.si_code = FPE_FLTOVF;
966 case 0x020: /* Precision */
967 info.si_code = FPE_FLTRES;
970 force_sig_info(SIGFPE, &info, task);
973 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
976 math_error((void __user *)regs->eip);
979 static void simd_math_error(void __user *eip)
981 struct task_struct * task;
983 unsigned short mxcsr;
986 * Save the info for the exception handler and clear the error.
990 task->thread.trap_no = 19;
991 task->thread.error_code = 0;
992 info.si_signo = SIGFPE;
994 info.si_code = __SI_FAULT;
997 * The SIMD FPU exceptions are handled a little differently, as there
998 * is only a single status/control register. Thus, to determine which
999 * unmasked exception was caught we must mask the exception mask bits
1000 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1002 mxcsr = get_fpu_mxcsr(task);
1003 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1007 case 0x001: /* Invalid Op */
1008 info.si_code = FPE_FLTINV;
1010 case 0x002: /* Denormalize */
1011 case 0x010: /* Underflow */
1012 info.si_code = FPE_FLTUND;
1014 case 0x004: /* Zero Divide */
1015 info.si_code = FPE_FLTDIV;
1017 case 0x008: /* Overflow */
1018 info.si_code = FPE_FLTOVF;
1020 case 0x020: /* Precision */
1021 info.si_code = FPE_FLTRES;
1024 force_sig_info(SIGFPE, &info, task);
1027 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
1031 /* Handle SIMD FPU exceptions on PIII+ processors. */
1033 simd_math_error((void __user *)regs->eip);
1036 * Handle strange cache flush from user space exception
1037 * in all other cases. This is undocumented behaviour.
1039 if (regs->eflags & VM_MASK) {
1040 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1044 current->thread.trap_no = 19;
1045 current->thread.error_code = error_code;
1046 die_if_kernel("cache flush denied", regs, error_code);
1047 force_sig(SIGSEGV, current);
1051 fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1055 /* No need to warn about this any longer. */
1056 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1060 fastcall unsigned long patch_espfix_desc(unsigned long uesp,
1063 struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
1064 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
1065 unsigned long new_kesp = kesp - base;
1066 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
1067 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
1068 /* Set up base for espfix segment */
1069 desc &= 0x00f0ff0000000000ULL;
1070 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
1071 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
1072 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
1073 (lim_pages & 0xffff);
1074 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
1079 * 'math_state_restore()' saves the current math information in the
1080 * old math state array, and gets the new ones from the current task
1082 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1083 * Don't touch unless you *really* know how it works.
1085 * Must be called with kernel preemption disabled (in this case,
1086 * local interrupts are disabled at the call-site in entry.S).
1088 asmlinkage void math_state_restore(void)
1090 struct thread_info *thread = current_thread_info();
1091 struct task_struct *tsk = thread->task;
1093 clts(); /* Allow maths ops (or we recurse) */
1094 if (!tsk_used_math(tsk))
1097 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1101 #ifndef CONFIG_MATH_EMULATION
1103 asmlinkage void math_emulate(long arg)
1105 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
1106 printk(KERN_EMERG "killing %s.\n",current->comm);
1107 force_sig(SIGFPE,current);
1111 #endif /* CONFIG_MATH_EMULATION */
1113 #ifdef CONFIG_X86_F00F_BUG
1114 void __init trap_init_f00f_bug(void)
1116 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1119 * Update the IDT descriptor and reload the IDT so that
1120 * it uses the read-only mapped virtual address.
1122 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1123 load_idt(&idt_descr);
1128 * This needs to use 'idt_table' rather than 'idt', and
1129 * thus use the _nonmapped_ version of the IDT, as the
1130 * Pentium F0 0F bugfix can have resulted in the mapped
1131 * IDT being write-protected.
1133 void set_intr_gate(unsigned int n, void *addr)
1135 _set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);
1139 * This routine sets up an interrupt gate at directory privilege level 3.
1141 static inline void set_system_intr_gate(unsigned int n, void *addr)
1143 _set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);
1146 static void __init set_trap_gate(unsigned int n, void *addr)
1148 _set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);
1151 static void __init set_system_gate(unsigned int n, void *addr)
1153 _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);
1156 static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1158 _set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));
1162 void __init trap_init(void)
1165 void __iomem *p = ioremap(0x0FFFD9, 4);
1166 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1172 #ifdef CONFIG_X86_LOCAL_APIC
1173 init_apic_mappings();
1176 set_trap_gate(0,÷_error);
1177 set_intr_gate(1,&debug);
1178 set_intr_gate(2,&nmi);
1179 set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
1180 set_system_gate(4,&overflow);
1181 set_trap_gate(5,&bounds);
1182 set_trap_gate(6,&invalid_op);
1183 set_trap_gate(7,&device_not_available);
1184 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
1185 set_trap_gate(9,&coprocessor_segment_overrun);
1186 set_trap_gate(10,&invalid_TSS);
1187 set_trap_gate(11,&segment_not_present);
1188 set_trap_gate(12,&stack_segment);
1189 set_trap_gate(13,&general_protection);
1190 set_intr_gate(14,&page_fault);
1191 set_trap_gate(15,&spurious_interrupt_bug);
1192 set_trap_gate(16,&coprocessor_error);
1193 set_trap_gate(17,&alignment_check);
1194 #ifdef CONFIG_X86_MCE
1195 set_trap_gate(18,&machine_check);
1197 set_trap_gate(19,&simd_coprocessor_error);
1201 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1202 * Generates a compile-time "error: zero width for bit-field" if
1203 * the alignment is wrong.
1205 struct fxsrAlignAssert {
1206 int _:!(offsetof(struct task_struct,
1207 thread.i387.fxsave) & 15);
1210 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1211 set_in_cr4(X86_CR4_OSFXSR);
1215 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1217 set_in_cr4(X86_CR4_OSXMMEXCPT);
1221 set_system_gate(SYSCALL_VECTOR,&system_call);
1224 * Should be a barrier for any external CPU state.
1231 static int __init kstack_setup(char *s)
1233 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1236 __setup("kstack=", kstack_setup);
1238 static int __init code_bytes_setup(char *s)
1240 code_bytes = simple_strtoul(s, NULL, 0);
1241 if (code_bytes > 8192)
1246 __setup("code_bytes=", code_bytes_setup);