2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/spinlock.h>
15 #include <linux/kprobes.h>
16 #include <linux/uaccess.h>
17 #include <linux/kdebug.h>
18 #include <linux/kgdb.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/ptrace.h>
22 #include <linux/string.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/kexec.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/init.h>
29 #include <linux/bug.h>
30 #include <linux/nmi.h>
32 #include <linux/smp.h>
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
41 #include <linux/mca.h>
44 #if defined(CONFIG_EDAC)
45 #include <linux/edac.h>
48 #include <asm/kmemcheck.h>
49 #include <asm/stacktrace.h>
50 #include <asm/processor.h>
51 #include <asm/debugreg.h>
52 #include <asm/atomic.h>
53 #include <asm/system.h>
54 #include <asm/traps.h>
59 #include <asm/mach_traps.h>
62 #include <asm/x86_init.h>
63 #include <asm/pgalloc.h>
64 #include <asm/proto.h>
66 #include <asm/processor-flags.h>
67 #include <asm/setup.h>
69 asmlinkage int system_call(void);
71 /* Do we ignore FPU interrupts ? */
75 * The IDT has to be page-aligned to simplify the Pentium
76 * F0 0F bug workaround.
78 gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
81 DECLARE_BITMAP(used_vectors, NR_VECTORS);
82 EXPORT_SYMBOL_GPL(used_vectors);
84 static int ignore_nmis;
86 int unknown_nmi_panic;
88 * Prevent NMI reason port (0x61) being accessed simultaneously, can
89 * only be used in NMI handler.
91 static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
93 static inline void conditional_sti(struct pt_regs *regs)
95 if (regs->flags & X86_EFLAGS_IF)
99 static inline void preempt_conditional_sti(struct pt_regs *regs)
102 if (regs->flags & X86_EFLAGS_IF)
106 static inline void conditional_cli(struct pt_regs *regs)
108 if (regs->flags & X86_EFLAGS_IF)
112 static inline void preempt_conditional_cli(struct pt_regs *regs)
114 if (regs->flags & X86_EFLAGS_IF)
121 __compare_user_cs_desc(const struct desc_struct *desc1,
122 const struct desc_struct *desc2)
124 return ((desc1->limit0 != desc2->limit0) ||
125 (desc1->limit != desc2->limit) ||
126 (desc1->base0 != desc2->base0) ||
127 (desc1->base1 != desc2->base1) ||
128 (desc1->base2 != desc2->base2));
132 * lazy-check for CS validity on exec-shield binaries:
134 * the original non-exec stack patch was written by
135 * Solar Designer <solar at openwall.com>. Thanks!
138 check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
140 struct desc_struct *desc1, *desc2;
141 struct vm_area_struct *vma;
144 if (current->mm == NULL)
148 if (current->mm->context.exec_limit != -1UL) {
150 spin_lock(¤t->mm->page_table_lock);
151 for (vma = current->mm->mmap; vma; vma = vma->vm_next)
152 if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
154 vma = get_gate_vma(current);
155 if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
157 spin_unlock(¤t->mm->page_table_lock);
158 if (limit >= TASK_SIZE)
160 current->mm->context.exec_limit = limit;
162 set_user_cs(¤t->mm->context.user_cs, limit);
164 desc1 = ¤t->mm->context.user_cs;
165 desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;
167 if (__compare_user_cs_desc(desc1, desc2)) {
169 * The CS was not in sync - reload it and retry the
170 * instruction. If the instruction still faults then
171 * we won't hit this branch next time around.
173 if (print_fatal_signals >= 2) {
174 printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
175 error_code, error_code/8, regs->ip,
177 printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
178 current->mm->context.exec_limit,
179 desc1->a, desc1->b, desc2->a, desc2->b);
182 load_user_cs_desc(cpu, current->mm);
191 static void __kprobes
192 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
193 long error_code, siginfo_t *info)
195 struct task_struct *tsk = current;
198 if (regs->flags & X86_VM_MASK) {
200 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
201 * On nmi (interrupt 2), do_trap should not be called.
209 if (!user_mode(regs))
216 * We want error_code and trap_no set for userspace faults and
217 * kernelspace faults which result in die(), but not
218 * kernelspace faults which are fixed up. die() gives the
219 * process no chance to handle the signal and notice the
220 * kernel fault information, so that won't result in polluting
221 * the information about previously queued, but not yet
222 * delivered, faults. See also do_general_protection below.
224 tsk->thread.error_code = error_code;
225 tsk->thread.trap_no = trapnr;
228 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
229 printk_ratelimit()) {
231 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
232 tsk->comm, tsk->pid, str,
233 regs->ip, regs->sp, error_code);
234 print_vma_addr(" in ", regs->ip);
240 force_sig_info(signr, info, tsk);
242 force_sig(signr, tsk);
246 if (!fixup_exception(regs)) {
247 tsk->thread.error_code = error_code;
248 tsk->thread.trap_no = trapnr;
249 die(str, regs, error_code);
255 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
262 #define DO_ERROR(trapnr, signr, str, name) \
263 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
265 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
268 conditional_sti(regs); \
269 do_trap(trapnr, signr, str, regs, error_code, NULL); \
272 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
273 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
276 info.si_signo = signr; \
278 info.si_code = sicode; \
279 info.si_addr = (void __user *)siaddr; \
280 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
283 conditional_sti(regs); \
284 do_trap(trapnr, signr, str, regs, error_code, &info); \
287 DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
288 DO_ERROR(4, SIGSEGV, "overflow", overflow)
289 DO_ERROR(5, SIGSEGV, "bounds", bounds)
290 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
291 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
292 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
293 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
295 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
297 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
300 /* Runs on IST stack */
301 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
303 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
304 12, SIGBUS) == NOTIFY_STOP)
306 preempt_conditional_sti(regs);
307 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
308 preempt_conditional_cli(regs);
311 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
313 static const char str[] = "double fault";
314 struct task_struct *tsk = current;
316 /* Return not checked because double check cannot be ignored */
317 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
319 tsk->thread.error_code = error_code;
320 tsk->thread.trap_no = 8;
323 * This is always a kernel trap and never fixable (and thus must
327 die(str, regs, error_code);
331 dotraplinkage void __kprobes
332 do_general_protection(struct pt_regs *regs, long error_code)
334 struct task_struct *tsk;
336 conditional_sti(regs);
339 if (regs->flags & X86_VM_MASK)
344 if (!user_mode(regs))
353 ok = check_lazy_exec_limit(cpu, regs, error_code);
359 if (print_fatal_signals) {
360 printk(KERN_ERR "#GPF(%ld[seg:%lx]) at %08lx, CPU#%d.\n",
361 error_code, error_code/8, regs->ip, smp_processor_id());
362 printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x.\n",
363 current->mm->context.exec_limit,
364 current->mm->context.user_cs.a,
365 current->mm->context.user_cs.b);
368 #endif /*CONFIG_X86_32*/
370 tsk->thread.error_code = error_code;
371 tsk->thread.trap_no = 13;
373 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
374 printk_ratelimit()) {
376 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
377 tsk->comm, task_pid_nr(tsk),
378 regs->ip, regs->sp, error_code);
379 print_vma_addr(" in ", regs->ip);
383 force_sig(SIGSEGV, tsk);
389 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
394 if (fixup_exception(regs))
397 tsk->thread.error_code = error_code;
398 tsk->thread.trap_no = 13;
399 if (notify_die(DIE_GPF, "general protection fault", regs,
400 error_code, 13, SIGSEGV) == NOTIFY_STOP)
402 die("general protection fault", regs, error_code);
405 static int __init setup_unknown_nmi_panic(char *str)
407 unknown_nmi_panic = 1;
410 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
412 static notrace __kprobes void
413 pci_serr_error(unsigned char reason, struct pt_regs *regs)
415 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
416 reason, smp_processor_id());
419 * On some machines, PCI SERR line is used to report memory
420 * errors. EDAC makes use of it.
422 #if defined(CONFIG_EDAC)
423 if (edac_handler_set()) {
424 edac_atomic_assert_error();
429 if (panic_on_unrecovered_nmi)
430 panic("NMI: Not continuing");
432 pr_emerg("Dazed and confused, but trying to continue\n");
434 /* Clear and disable the PCI SERR error line. */
435 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
436 outb(reason, NMI_REASON_PORT);
439 static notrace __kprobes void
440 io_check_error(unsigned char reason, struct pt_regs *regs)
445 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
446 reason, smp_processor_id());
447 show_registers(regs);
450 panic("NMI IOCK error: Not continuing");
452 /* Re-enable the IOCK line, wait for a few seconds */
453 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
454 outb(reason, NMI_REASON_PORT);
458 touch_nmi_watchdog();
462 reason &= ~NMI_REASON_CLEAR_IOCHK;
463 outb(reason, NMI_REASON_PORT);
466 static notrace __kprobes void
467 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
469 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
474 * Might actually be able to figure out what the guilty party
482 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
483 reason, smp_processor_id());
485 pr_emerg("Do you have a strange power saving mode enabled?\n");
486 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
487 panic("NMI: Not continuing");
489 pr_emerg("Dazed and confused, but trying to continue\n");
492 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
494 unsigned char reason = 0;
497 * CPU-specific NMI must be processed before non-CPU-specific
498 * NMI, otherwise we may lose it, because the CPU-specific
499 * NMI can not be detected/processed on other CPUs.
501 if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
504 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
505 raw_spin_lock(&nmi_reason_lock);
506 reason = get_nmi_reason();
508 if (reason & NMI_REASON_MASK) {
509 if (reason & NMI_REASON_SERR)
510 pci_serr_error(reason, regs);
511 else if (reason & NMI_REASON_IOCHK)
512 io_check_error(reason, regs);
515 * Reassert NMI in case it became active
516 * meanwhile as it's edge-triggered:
520 raw_spin_unlock(&nmi_reason_lock);
523 raw_spin_unlock(&nmi_reason_lock);
525 unknown_nmi_error(reason, regs);
528 dotraplinkage notrace __kprobes void
529 do_nmi(struct pt_regs *regs, long error_code)
533 inc_irq_stat(__nmi_count);
536 default_do_nmi(regs);
546 void restart_nmi(void)
551 /* May run on IST stack. */
552 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
554 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
555 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
558 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
559 #ifdef CONFIG_KPROBES
560 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
564 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
569 preempt_conditional_sti(regs);
570 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
571 preempt_conditional_cli(regs);
576 * Help handler running on IST stack to switch back to user stack
577 * for scheduling or signal handling. The actual stack switch is done in
580 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
582 struct pt_regs *regs = eregs;
583 /* Did already sync */
584 if (eregs == (struct pt_regs *)eregs->sp)
586 /* Exception from user space */
587 else if (user_mode(eregs))
588 regs = task_pt_regs(current);
590 * Exception from kernel and interrupts are enabled. Move to
591 * kernel process stack.
593 else if (eregs->flags & X86_EFLAGS_IF)
594 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
602 * Our handling of the processor debug registers is non-trivial.
603 * We do not clear them on entry and exit from the kernel. Therefore
604 * it is possible to get a watchpoint trap here from inside the kernel.
605 * However, the code in ./ptrace.c has ensured that the user can
606 * only set watchpoints on userspace addresses. Therefore the in-kernel
607 * watchpoint trap can only occur in code which is reading/writing
608 * from user space. Such code must not hold kernel locks (since it
609 * can equally take a page fault), therefore it is safe to call
610 * force_sig_info even though that claims and releases locks.
612 * Code in ./signal.c ensures that the debug control register
613 * is restored before we deliver any signal, and therefore that
614 * user code runs with the correct debug control register even though
617 * Being careful here means that we don't have to be as careful in a
618 * lot of more complicated places (task switching can be a bit lazy
619 * about restoring all the debug state, and ptrace doesn't have to
620 * find every occurrence of the TF bit that could be saved away even
623 * May run on IST stack.
625 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
627 struct task_struct *tsk = current;
632 get_debugreg(dr6, 6);
634 /* Filter out all the reserved bits which are preset to 1 */
635 dr6 &= ~DR6_RESERVED;
638 * If dr6 has no reason to give us about the origin of this trap,
639 * then it's very likely the result of an icebp/int01 trap.
640 * User wants a sigtrap for that.
642 if (!dr6 && user_mode(regs))
645 /* Catch kmemcheck conditions first of all! */
646 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
649 /* DR6 may or may not be cleared by the CPU */
653 * The processor cleared BTF, so don't mark that we need it set.
655 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
657 /* Store the virtualized DR6 value */
658 tsk->thread.debugreg6 = dr6;
660 if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
661 SIGTRAP) == NOTIFY_STOP)
664 /* It's safe to allow irq's after DR6 has been saved */
665 preempt_conditional_sti(regs);
667 if (regs->flags & X86_VM_MASK) {
668 handle_vm86_trap((struct kernel_vm86_regs *) regs,
670 preempt_conditional_cli(regs);
675 * Single-stepping through system calls: ignore any exceptions in
676 * kernel space, but re-enable TF when returning to user mode.
678 * We already checked v86 mode above, so we can check for kernel mode
679 * by just checking the CPL of CS.
681 if ((dr6 & DR_STEP) && !user_mode(regs)) {
682 tsk->thread.debugreg6 &= ~DR_STEP;
683 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
684 regs->flags &= ~X86_EFLAGS_TF;
686 si_code = get_si_code(tsk->thread.debugreg6);
687 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
688 send_sigtrap(tsk, regs, error_code, si_code);
689 preempt_conditional_cli(regs);
695 * Note that we play around with the 'TS' bit in an attempt to get
696 * the correct behaviour even in the presence of the asynchronous
699 void math_error(struct pt_regs *regs, int error_code, int trapnr)
701 struct task_struct *task = current;
704 char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
706 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
708 conditional_sti(regs);
710 if (!user_mode_vm(regs))
712 if (!fixup_exception(regs)) {
713 task->thread.error_code = error_code;
714 task->thread.trap_no = trapnr;
715 die(str, regs, error_code);
721 * Save the info for the exception handler and clear the error.
724 task->thread.trap_no = trapnr;
725 task->thread.error_code = error_code;
726 info.si_signo = SIGFPE;
728 info.si_addr = (void __user *)regs->ip;
730 unsigned short cwd, swd;
732 * (~cwd & swd) will mask out exceptions that are not set to unmasked
733 * status. 0x3f is the exception bits in these regs, 0x200 is the
734 * C1 reg you need in case of a stack fault, 0x040 is the stack
735 * fault bit. We should only be taking one exception at a time,
736 * so if this combination doesn't produce any single exception,
737 * then we have a bad program that isn't synchronizing its FPU usage
738 * and it will suffer the consequences since we won't be able to
739 * fully reproduce the context of the exception
741 cwd = get_fpu_cwd(task);
742 swd = get_fpu_swd(task);
747 * The SIMD FPU exceptions are handled a little differently, as there
748 * is only a single status/control register. Thus, to determine which
749 * unmasked exception was caught we must mask the exception mask bits
750 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
752 unsigned short mxcsr = get_fpu_mxcsr(task);
753 err = ~(mxcsr >> 7) & mxcsr;
756 if (err & 0x001) { /* Invalid op */
758 * swd & 0x240 == 0x040: Stack Underflow
759 * swd & 0x240 == 0x240: Stack Overflow
760 * User must clear the SF bit (0x40) if set
762 info.si_code = FPE_FLTINV;
763 } else if (err & 0x004) { /* Divide by Zero */
764 info.si_code = FPE_FLTDIV;
765 } else if (err & 0x008) { /* Overflow */
766 info.si_code = FPE_FLTOVF;
767 } else if (err & 0x012) { /* Denormal, Underflow */
768 info.si_code = FPE_FLTUND;
769 } else if (err & 0x020) { /* Precision */
770 info.si_code = FPE_FLTRES;
773 * If we're using IRQ 13, or supposedly even some trap 16
774 * implementations, it's possible we get a spurious trap...
776 return; /* Spurious trap, no error */
778 force_sig_info(SIGFPE, &info, task);
781 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
787 math_error(regs, error_code, 16);
791 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
793 math_error(regs, error_code, 19);
797 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
799 conditional_sti(regs);
801 /* No need to warn about this any longer. */
802 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
806 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
810 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
815 * __math_state_restore assumes that cr0.TS is already clear and the
816 * fpu state is all ready for use. Used during context switch.
818 void __math_state_restore(void)
820 struct thread_info *thread = current_thread_info();
821 struct task_struct *tsk = thread->task;
824 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
826 if (unlikely(restore_fpu_checking(tsk))) {
828 force_sig(SIGSEGV, tsk);
832 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
837 * 'math_state_restore()' saves the current math information in the
838 * old math state array, and gets the new ones from the current task
840 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
841 * Don't touch unless you *really* know how it works.
843 * Must be called with kernel preemption disabled (in this case,
844 * local interrupts are disabled at the call-site in entry.S).
846 asmlinkage void math_state_restore(void)
848 struct thread_info *thread = current_thread_info();
849 struct task_struct *tsk = thread->task;
851 if (!tsk_used_math(tsk)) {
854 * does a slab alloc which can sleep
860 do_group_exit(SIGKILL);
866 clts(); /* Allow maths ops (or we recurse) */
868 __math_state_restore();
870 EXPORT_SYMBOL_GPL(math_state_restore);
872 dotraplinkage void __kprobes
873 do_device_not_available(struct pt_regs *regs, long error_code)
875 #ifdef CONFIG_MATH_EMULATION
876 if (read_cr0() & X86_CR0_EM) {
877 struct math_emu_info info = { };
879 conditional_sti(regs);
886 math_state_restore(); /* interrupts still off */
888 conditional_sti(regs);
894 * The fixup code for errors in iret jumps to here (iret_exc). It loses
895 * the original trap number and erorr code. The bogus trap 32 and error
896 * code 0 are what the vanilla kernel delivers via:
897 * DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
899 * NOTE: Because of the final "1" in the macro we need to enable interrupts.
901 * In case of a general protection fault in the iret instruction, we
902 * need to check for a lazy CS update for exec-shield.
904 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
912 ok = check_lazy_exec_limit(cpu, regs, error_code);
915 if (!ok && notify_die(DIE_TRAP, "iret exception", regs,
916 error_code, 32, SIGSEGV) != NOTIFY_STOP) {
918 info.si_signo = SIGSEGV;
920 info.si_code = ILL_BADSTK;
922 do_trap(32, SIGSEGV, "iret exception", regs, error_code, &info);
927 /* Set of traps needed for early debugging. */
928 void __init early_trap_init(void)
930 set_intr_gate_ist(1, &debug, DEBUG_STACK);
931 /* int3 can be called from all */
932 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
933 set_intr_gate(14, &page_fault);
934 load_idt(&idt_descr);
937 void __init trap_init(void)
942 void __iomem *p = early_ioremap(0x0FFFD9, 4);
944 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
949 set_intr_gate(0, ÷_error);
950 set_intr_gate_ist(2, &nmi, NMI_STACK);
951 /* int4 can be called from all */
952 set_system_intr_gate(4, &overflow);
953 set_intr_gate(5, &bounds);
954 set_intr_gate(6, &invalid_op);
955 set_intr_gate(7, &device_not_available);
957 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
959 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
961 set_intr_gate(9, &coprocessor_segment_overrun);
962 set_intr_gate(10, &invalid_TSS);
963 set_intr_gate(11, &segment_not_present);
964 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
965 set_intr_gate(13, &general_protection);
966 set_intr_gate(15, &spurious_interrupt_bug);
967 set_intr_gate(16, &coprocessor_error);
968 set_intr_gate(17, &alignment_check);
969 #ifdef CONFIG_X86_MCE
970 set_intr_gate_ist(18, &machine_check, MCE_STACK);
972 set_intr_gate(19, &simd_coprocessor_error);
974 /* Reserve all the builtin and the syscall vector: */
975 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
976 set_bit(i, used_vectors);
978 #ifdef CONFIG_IA32_EMULATION
979 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
980 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
984 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
985 set_bit(SYSCALL_VECTOR, used_vectors);
989 * Should be a barrier for any external CPU state:
993 x86_init.irqs.trap_init();