3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
41 * "current" is in register %ebx during any slow entries.
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/irqflags.h>
47 #include <asm/errno.h>
48 #include <asm/segment.h>
50 #include <asm/page_types.h>
51 #include <asm/percpu.h>
52 #include <asm/dwarf2.h>
53 #include <asm/processor-flags.h>
54 #include <asm/ftrace.h>
55 #include <asm/irq_vectors.h>
56 #include <xen/interface/xen.h>
58 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
59 #include <linux/elf-em.h>
60 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
61 #define __AUDIT_ARCH_LE 0x40000000
63 #ifndef CONFIG_AUDITSYSCALL
64 #define sysenter_audit syscall_trace_entry
65 #define sysexit_audit syscall_exit_work
69 * We use macros for low-level operations which need to be overridden
70 * for paravirtualization. The following will never clobber any registers:
71 * INTERRUPT_RETURN (aka. "iret")
72 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
73 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
75 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
76 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
77 * Allowing a register to be clobbered can shrink the paravirt replacement
78 * enough to patch inline, increasing performance.
81 #define nr_syscalls ((syscall_table_size)/4)
87 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89 #define preempt_stop(clobbers)
90 #define resume_kernel restore_all
93 .macro TRACE_IRQS_IRET
94 #ifdef CONFIG_TRACE_IRQFLAGS
95 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
103 #define resume_userspace_sig check_userspace
105 #define resume_userspace_sig resume_userspace
109 * User gs save/restore
111 * %gs is used for userland TLS and kernel only uses it for stack
112 * canary which is required to be at %gs:20 by gcc. Read the comment
113 * at the top of stackprotector.h for more info.
115 * Local labels 98 and 99 are used.
117 #ifdef CONFIG_X86_32_LAZY_GS
119 /* unfortunately push/pop can't be no-op */
122 CFI_ADJUST_CFA_OFFSET 4
125 addl $(4 + \pop), %esp
126 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
131 /* all the rest are no-op */
138 .macro REG_TO_PTGS reg
140 .macro SET_KERNEL_GS reg
143 #else /* CONFIG_X86_32_LAZY_GS */
147 CFI_ADJUST_CFA_OFFSET 4
148 /*CFI_REL_OFFSET gs, 0*/
153 CFI_ADJUST_CFA_OFFSET -4
157 CFI_ADJUST_CFA_OFFSET -\pop
161 .pushsection .fixup, "ax"
164 .section __ex_table, "a"
171 98: mov PT_GS(%esp), %gs
174 .pushsection .fixup, "ax"
175 99: movl $0, PT_GS(%esp)
177 .section __ex_table, "a"
185 /*CFI_REGISTER gs, \reg*/
187 .macro REG_TO_PTGS reg
188 movl \reg, PT_GS(%esp)
189 /*CFI_REL_OFFSET gs, PT_GS*/
191 .macro SET_KERNEL_GS reg
192 movl $(__KERNEL_STACK_CANARY), \reg
196 #endif /* CONFIG_X86_32_LAZY_GS */
202 CFI_ADJUST_CFA_OFFSET 4
203 /*CFI_REL_OFFSET fs, 0;*/
205 CFI_ADJUST_CFA_OFFSET 4
206 /*CFI_REL_OFFSET es, 0;*/
208 CFI_ADJUST_CFA_OFFSET 4
209 /*CFI_REL_OFFSET ds, 0;*/
211 CFI_ADJUST_CFA_OFFSET 4
212 CFI_REL_OFFSET eax, 0
214 CFI_ADJUST_CFA_OFFSET 4
215 CFI_REL_OFFSET ebp, 0
217 CFI_ADJUST_CFA_OFFSET 4
218 CFI_REL_OFFSET edi, 0
220 CFI_ADJUST_CFA_OFFSET 4
221 CFI_REL_OFFSET esi, 0
223 CFI_ADJUST_CFA_OFFSET 4
224 CFI_REL_OFFSET edx, 0
226 CFI_ADJUST_CFA_OFFSET 4
227 CFI_REL_OFFSET ecx, 0
229 CFI_ADJUST_CFA_OFFSET 4
230 CFI_REL_OFFSET ebx, 0
231 movl $(__USER_DS), %edx
234 movl $(__KERNEL_PERCPU), %edx
239 .macro RESTORE_INT_REGS
241 CFI_ADJUST_CFA_OFFSET -4
244 CFI_ADJUST_CFA_OFFSET -4
247 CFI_ADJUST_CFA_OFFSET -4
250 CFI_ADJUST_CFA_OFFSET -4
253 CFI_ADJUST_CFA_OFFSET -4
256 CFI_ADJUST_CFA_OFFSET -4
259 CFI_ADJUST_CFA_OFFSET -4
263 .macro RESTORE_REGS pop=0
266 CFI_ADJUST_CFA_OFFSET -4
269 CFI_ADJUST_CFA_OFFSET -4
272 CFI_ADJUST_CFA_OFFSET -4
275 .pushsection .fixup, "ax"
282 .section __ex_table, "a"
291 .macro RING0_INT_FRAME
295 /*CFI_OFFSET cs, -2*4;*/
299 .macro RING0_EC_FRAME
303 /*CFI_OFFSET cs, -2*4;*/
307 .macro RING0_PTREGS_FRAME
310 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
311 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
312 CFI_OFFSET eip, PT_EIP-PT_OLDESP
313 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
314 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
315 CFI_OFFSET eax, PT_EAX-PT_OLDESP
316 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
317 CFI_OFFSET edi, PT_EDI-PT_OLDESP
318 CFI_OFFSET esi, PT_ESI-PT_OLDESP
319 CFI_OFFSET edx, PT_EDX-PT_OLDESP
320 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
321 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
327 CFI_ADJUST_CFA_OFFSET 4
329 GET_THREAD_INFO(%ebp)
331 CFI_ADJUST_CFA_OFFSET -4
332 pushl $0x0202 # Reset kernel eflags
333 CFI_ADJUST_CFA_OFFSET 4
335 CFI_ADJUST_CFA_OFFSET -4
341 * Interrupt exit functions should be protected against kprobes
343 .pushsection .kprobes.text, "ax"
345 * Return to user mode is not as complex as all this looks,
346 * but we want the default path for a system call return to
347 * go as quickly as possible which is why some of this is
348 * less clear than it otherwise should be.
351 # userspace resumption stub bypassing syscall exit tracing
355 preempt_stop(CLBR_ANY)
357 GET_THREAD_INFO(%ebp)
359 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
360 movb PT_CS(%esp), %al
361 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
363 jb resume_kernel # not returning to v8086 or userspace
365 ENTRY(resume_userspace)
367 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
368 # setting need_resched or sigpending
369 # between sampling and the iret
371 movl TI_flags(%ebp), %ecx
372 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
373 # int/exception return?
376 END(ret_from_exception)
378 #ifdef CONFIG_PREEMPT
380 DISABLE_INTERRUPTS(CLBR_ANY)
381 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
384 movl TI_flags(%ebp), %ecx # need_resched set ?
385 testb $_TIF_NEED_RESCHED, %cl
387 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
389 call preempt_schedule_irq
395 * End of kprobes section
399 /* SYSENTER_RETURN points to after the "sysenter" instruction in
400 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
402 # sysenter call handler stub
403 ENTRY(ia32_sysenter_target)
407 CFI_REGISTER esp, ebp
408 movl SYSENTER_stack_sp0(%esp),%esp
411 * Interrupts are disabled here, but we can't trace it until
412 * enough kernel state to call TRACE_IRQS_OFF can be called - but
413 * we immediately enable interrupts at that point anyway.
416 CFI_ADJUST_CFA_OFFSET 4
417 /*CFI_REL_OFFSET ss, 0*/
419 CFI_ADJUST_CFA_OFFSET 4
420 CFI_REL_OFFSET esp, 0
422 orl $X86_EFLAGS_IF, (%esp)
423 CFI_ADJUST_CFA_OFFSET 4
425 CFI_ADJUST_CFA_OFFSET 4
426 /*CFI_REL_OFFSET cs, 0*/
428 * Push current_thread_info()->sysenter_return to the stack.
429 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
430 * pushed above; +8 corresponds to copy_thread's esp0 setting.
432 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
433 CFI_ADJUST_CFA_OFFSET 4
434 CFI_REL_OFFSET eip, 0
437 CFI_ADJUST_CFA_OFFSET 4
439 ENABLE_INTERRUPTS(CLBR_NONE)
442 * Load the potential sixth argument from user stack.
443 * Careful about security.
445 cmpl $__PAGE_OFFSET-3,%ebp
448 movl %ebp,PT_EBP(%esp)
449 .section __ex_table,"a"
451 .long 1b,syscall_fault
454 GET_THREAD_INFO(%ebp)
456 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
459 cmpl $(nr_syscalls), %eax
461 call *sys_call_table(,%eax,4)
462 movl %eax,PT_EAX(%esp)
464 DISABLE_INTERRUPTS(CLBR_ANY)
466 movl TI_flags(%ebp), %ecx
467 testl $_TIF_ALLWORK_MASK, %ecx
470 /* if something modifies registers it must also disable sysexit */
471 movl PT_EIP(%esp), %edx
472 movl PT_OLDESP(%esp), %ecx
474 #ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT
478 1: mov PT_FS(%esp), %fs
480 ENABLE_INTERRUPTS_SYSEXIT
482 #ifdef CONFIG_AUDITSYSCALL
484 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
485 jnz syscall_trace_entry
487 CFI_ADJUST_CFA_OFFSET -4
488 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
489 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
490 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
491 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
492 movl %eax,%edx /* 2nd arg: syscall number */
493 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
494 call audit_syscall_entry
496 CFI_ADJUST_CFA_OFFSET 4
497 movl PT_EAX(%esp),%eax /* reload syscall number */
501 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
502 jne syscall_exit_work
504 ENABLE_INTERRUPTS(CLBR_ANY)
505 movl %eax,%edx /* second arg, syscall return value */
506 cmpl $0,%eax /* is it < 0? */
507 setl %al /* 1 if so, 0 if not */
508 movzbl %al,%eax /* zero-extend that */
509 inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
510 call audit_syscall_exit
511 DISABLE_INTERRUPTS(CLBR_ANY)
513 movl TI_flags(%ebp), %ecx
514 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
515 jne syscall_exit_work
516 movl PT_EAX(%esp),%eax /* reload syscall return value */
521 .pushsection .fixup,"ax"
522 2: movl $0,PT_FS(%esp)
524 .section __ex_table,"a"
529 ENDPROC(ia32_sysenter_target)
531 # pv sysenter call handler stub
532 ENTRY(ia32pv_sysenter_target)
534 movl $__USER_DS,16(%esp)
536 movl $__USER_CS,4(%esp)
538 CFI_ADJUST_CFA_OFFSET -4
539 /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
540 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
541 CFI_ADJUST_CFA_OFFSET 4
543 * Load the potential sixth argument from user stack.
544 * Careful about security.
546 cmpl $__PAGE_OFFSET-3,%ebp
549 .section __ex_table,"a"
551 .long 1b,syscall_fault
555 ENDPROC(ia32pv_sysenter_target)
558 * syscall stub including irq exit should be protected against kprobes
560 .pushsection .kprobes.text, "ax"
561 # system call handler stub
563 RING0_INT_FRAME # can't unwind into user space anyway
564 pushl %eax # save orig_eax
565 CFI_ADJUST_CFA_OFFSET 4
567 GET_THREAD_INFO(%ebp)
568 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
569 jnz syscall_trace_entry
570 cmpl $(nr_syscalls), %eax
573 call *sys_call_table(,%eax,4)
574 movl %eax,PT_EAX(%esp) # store the return value
577 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
578 # setting need_resched or sigpending
579 # between sampling and the iret
581 movl TI_flags(%ebp), %ecx
582 testl $_TIF_ALLWORK_MASK, %ecx # current->work
583 jne syscall_exit_work
589 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
590 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
591 # are returning to the kernel.
592 # See comments in process.c:copy_thread() for details.
593 movb PT_OLDSS(%esp), %ah
594 movb PT_CS(%esp), %al
595 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
596 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
598 je ldt_ss # returning to user-space with LDT SS
602 movl PT_EFLAGS(%esp), %eax
603 testl $(X86_EFLAGS_VM|NMI_MASK), %eax
606 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
608 andb evtchn_upcall_mask(%esi),%al
609 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
611 jnz restore_all_enable_events # != 0 => enable event delivery
613 RESTORE_REGS 4 # skip orig_eax/error_code
614 CFI_ADJUST_CFA_OFFSET -4
619 pushl $0 # no error code
623 .section __ex_table,"a"
625 .long irq_return,iret_exc
631 larl PT_OLDSS(%esp), %eax
633 testl $0x00400000, %eax # returning to 32bit stack?
634 jnz restore_nocheck # allright, normal return
636 #ifdef CONFIG_PARAVIRT
638 * The kernel can't run on a non-flat stack if paravirt mode
639 * is active. Rather than try to fixup the high bits of
640 * ESP, bypass this code entirely. This may break DOSemu
641 * and/or Wine support in a paravirt VM, although the option
642 * is still available to implement the setting of the high
643 * 16-bits in the INTERRUPT_RETURN paravirt-op.
645 cmpl $0, pv_info+PARAVIRT_enabled
650 * Setup and switch to ESPFIX stack
652 * We're returning to userspace with a 16 bit stack. The CPU will not
653 * restore the high word of ESP for us on executing iret... This is an
654 * "official" bug of all the x86-compatible CPUs, which we can work
655 * around to make dosemu and wine happy. We do this by preloading the
656 * high word of ESP with the high word of the userspace ESP while
657 * compensating for the offset by changing to the ESPFIX segment with
658 * a base address that matches for the difference.
660 mov %esp, %edx /* load kernel esp */
661 mov PT_OLDESP(%esp), %eax /* load userspace esp */
662 mov %dx, %ax /* eax: new kernel esp */
663 sub %eax, %edx /* offset (low word is 0) */
664 PER_CPU(gdt_page, %ebx)
666 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
667 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
669 CFI_ADJUST_CFA_OFFSET 4
670 push %eax /* new kernel esp */
671 CFI_ADJUST_CFA_OFFSET 4
672 /* Disable interrupts, but do not irqtrace this section: we
673 * will soon execute iret and the tracer was already set to
674 * the irqstate after the iret */
675 DISABLE_INTERRUPTS(CLBR_EAX)
676 lss (%esp), %esp /* switch to espfix segment */
677 CFI_ADJUST_CFA_OFFSET -8
681 restore_all_enable_events:
684 scrit: /**** START OF CRITICAL REGION ****/
686 jnz 14f # process more events if necessary...
689 .section __ex_table,"a"
693 14: __DISABLE_INTERRUPTS
695 ecrit: /**** END OF CRITICAL REGION ****/
700 andl $~NMI_MASK, PT_EFLAGS(%esp)
702 jmp hypercall_page + (__HYPERVISOR_iret * 32)
707 # perform work that needs to be done immediately before resumption
709 RING0_PTREGS_FRAME # can't unwind into user space anyway
711 testb $_TIF_NEED_RESCHED, %cl
716 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
717 # setting need_resched or sigpending
718 # between sampling and the iret
720 movl TI_flags(%ebp), %ecx
721 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
722 # than syscall tracing?
724 testb $_TIF_NEED_RESCHED, %cl
727 work_notifysig: # deal with pending signals and
728 # notify-resume requests
730 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
732 jne work_notifysig_v86 # returning to kernel-space or
735 call do_notify_resume
736 jmp resume_userspace_sig
740 pushl %ecx # save ti_flags for do_notify_resume
741 CFI_ADJUST_CFA_OFFSET 4
742 call save_v86_state # %eax contains pt_regs pointer
744 CFI_ADJUST_CFA_OFFSET -4
750 call do_notify_resume
751 jmp resume_userspace_sig
754 # perform syscall exit tracing
757 movl $-ENOSYS,PT_EAX(%esp)
759 call syscall_trace_enter
760 /* What it returned is what we'll actually use. */
761 cmpl $(nr_syscalls), %eax
764 END(syscall_trace_entry)
766 # perform syscall exit tracing
769 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
772 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
775 call syscall_trace_leave
777 END(syscall_exit_work)
780 RING0_INT_FRAME # can't unwind into user space anyway
782 GET_THREAD_INFO(%ebp)
783 movl $-EFAULT,PT_EAX(%esp)
788 movl $-ENOSYS,PT_EAX(%esp)
793 * End of kprobes section
798 * System calls that need a pt_regs pointer.
800 #define PTREGSCALL0(name) \
806 #define PTREGSCALL1(name) \
810 movl (PT_EBX+4)(%esp),%eax; \
813 #define PTREGSCALL2(name) \
817 movl (PT_ECX+4)(%esp),%edx; \
818 movl (PT_EBX+4)(%esp),%eax; \
821 #define PTREGSCALL3(name) \
826 movl PT_EDX(%eax),%ecx; \
827 movl PT_ECX(%eax),%edx; \
828 movl PT_EBX(%eax),%eax; \
837 PTREGSCALL2(sigaltstack)
838 PTREGSCALL0(sigreturn)
839 PTREGSCALL0(rt_sigreturn)
843 /* Clone is an oddball. The 4th arg is in %edi */
849 movl PT_EDX(%eax),%ecx
850 movl PT_ECX(%eax),%edx
851 movl PT_EBX(%eax),%eax
857 .macro FIXUP_ESPFIX_STACK
859 * Switch back for ESPFIX stack to the normal zerobased stack
861 * We can't call C functions using the ESPFIX stack. This code reads
862 * the high word of the segment base from the GDT and swiches to the
863 * normal stack and adjusts ESP with the matching offset.
865 /* fixup the stack */
866 PER_CPU(gdt_page, %ebx)
867 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
868 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
870 addl %esp, %eax /* the adjusted stack pointer */
872 CFI_ADJUST_CFA_OFFSET 4
874 CFI_ADJUST_CFA_OFFSET 4
875 lss (%esp), %esp /* switch to the normal stack segment */
876 CFI_ADJUST_CFA_OFFSET -8
878 .macro UNWIND_ESPFIX_STACK
880 /* see if on espfix stack */
881 cmpw $__ESPFIX_SS, %ax
883 movl $__KERNEL_DS, %eax
886 /* switch to normal stack */
892 * Build the entry stubs and pointer table with some assembler magic.
893 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
894 * single cache line on all modern x86 implementations.
896 .section .init.rodata,"a"
900 .p2align CONFIG_X86_L1_CACHE_SHIFT
901 ENTRY(irq_entries_start)
903 vector=FIRST_EXTERNAL_VECTOR
904 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
907 .if vector < NR_VECTORS
908 .if vector <> FIRST_EXTERNAL_VECTOR
909 CFI_ADJUST_CFA_OFFSET -4
911 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
912 CFI_ADJUST_CFA_OFFSET 4
913 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
922 2: jmp common_interrupt
924 END(irq_entries_start)
931 * the CPU automatically disables interrupts when executing an IRQ vector,
932 * so IRQ-flags tracing has to follow that:
934 .p2align CONFIG_X86_L1_CACHE_SHIFT
936 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
942 ENDPROC(common_interrupt)
946 * Irq entries should be protected against kprobes
948 .pushsection .kprobes.text, "ax"
949 #define BUILD_INTERRUPT3(name, nr, fn) \
953 CFI_ADJUST_CFA_OFFSET 4; \
962 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
964 /* The include is where all of the SMP etc. interrupts come from */
965 #include <asm/entry_arch.h>
968 #define UNWIND_ESPFIX_STACK
970 .pushsection .kprobes.text, "ax"
972 # A note on the "critical region" in our callback handler.
973 # We want to avoid stacking callback handlers due to events occurring
974 # during handling of the last event. To do this, we keep events disabled
975 # until we've done all processing. HOWEVER, we must enable events before
976 # popping the stack frame (can't be done atomically) and so it would still
977 # be possible to get enough handler activations to overflow the stack.
978 # Although unlikely, bugs of that kind are hard to track down, so we'd
979 # like to avoid the possibility.
980 # So, on entry to the handler we detect whether we interrupted an
981 # existing activation in its critical region -- if so, we pop the current
982 # activation and restart the handler using the previous one.
984 # The sysexit critical region is slightly different. sysexit
985 # atomically removes the entire stack frame. If we interrupt in the
986 # critical region we know that the entire frame is present and correct
987 # so we can simply throw away the new one.
988 ENTRY(hypervisor_callback)
991 CFI_ADJUST_CFA_OFFSET 4
993 movl PT_CS(%esp),%ecx
994 movl PT_EIP(%esp),%eax
995 andl $SEGMENT_RPL_MASK,%ecx
1001 jb critical_region_fixup
1003 #ifdef CONFIG_XEN_SUPERVISOR_MODE_KERNEL
1004 cmpl $sysexit_scrit,%eax
1006 cmpl $sysexit_ecrit,%eax
1008 addl $PT_OLDESP,%esp # Remove eflags...ebx from stack frame.
1012 CFI_ADJUST_CFA_OFFSET 4
1013 call evtchn_do_upcall
1015 CFI_ADJUST_CFA_OFFSET -4
1019 # [How we do the fixup]. We want to merge the current stack frame with the
1020 # just-interrupted frame. How we do this depends on where in the critical
1021 # region the interrupted handler was executing, and so how many saved
1022 # registers are in each frame. We do this quickly using the lookup table
1023 # 'critical_fixup_table'. For each byte offset in the critical region, it
1024 # provides the number of bytes which have already been popped from the
1025 # interrupted stack frame.
1026 critical_region_fixup:
1027 movsbl critical_fixup_table-scrit(%eax),%ecx # %ecx contains num slots popped
1029 leal (%esp,%ecx,4),%esi # %esi points at end of src region
1030 leal PT_OLDESP(%esp),%edi # %edi points at end of dst region
1031 jle 17f # skip loop if nothing to copy
1032 16: subl $4,%esi # pre-decrementing copy loop
1037 17: movl %edi,%esp # final %edi is top of merged stack
1040 .section .rodata,"a"
1041 critical_fixup_table:
1042 .rept __SIZEOF_TEST_PENDING
1045 .byte -1,-1 # jnz 14f
1056 #ifndef CONFIG_X86_32_LAZY_GS
1057 .byte 10,10 # pop %gs
1058 .byte 11,11,11 # add $4,%esp
1060 .byte 10,10,10 # add $8,%esp
1063 .rept __SIZEOF_DISABLE_INTERRUPTS
1068 # Hypervisor uses this for application faults while it executes.
1069 # We get here for two reasons:
1070 # 1. Fault while reloading DS, ES, FS or GS
1071 # 2. Fault while executing IRET
1072 # Category 1 we fix up by reattempting the load, and zeroing the segment
1073 # register if the load fails.
1074 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1075 # normal Linux return path in this case because if we use the IRET hypercall
1076 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1077 # We distinguish between categories by maintaining a status value in EAX.
1078 ENTRY(failsafe_callback)
1088 addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
1090 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
1094 jmp ret_from_exception
1095 .section .fixup,"ax"; \
1096 6: xorl %eax,%eax; \
1097 movl %eax,4(%esp); \
1099 7: xorl %eax,%eax; \
1100 movl %eax,8(%esp); \
1102 8: xorl %eax,%eax; \
1103 movl %eax,12(%esp); \
1105 9: xorl %eax,%eax; \
1106 movl %eax,16(%esp); \
1109 .section __ex_table,"a"; \
1119 ENTRY(coprocessor_error)
1122 CFI_ADJUST_CFA_OFFSET 4
1123 pushl $do_coprocessor_error
1124 CFI_ADJUST_CFA_OFFSET 4
1127 END(coprocessor_error)
1129 ENTRY(simd_coprocessor_error)
1132 CFI_ADJUST_CFA_OFFSET 4
1133 pushl $do_simd_coprocessor_error
1134 CFI_ADJUST_CFA_OFFSET 4
1137 END(simd_coprocessor_error)
1139 ENTRY(device_not_available)
1141 pushl $-1 # mark this as an int
1142 CFI_ADJUST_CFA_OFFSET 4
1143 pushl $do_device_not_available
1144 CFI_ADJUST_CFA_OFFSET 4
1147 END(device_not_available)
1149 #ifdef CONFIG_PARAVIRT
1152 .section __ex_table,"a"
1154 .long native_iret, iret_exc
1158 ENTRY(native_irq_enable_sysexit)
1161 END(native_irq_enable_sysexit)
1167 CFI_ADJUST_CFA_OFFSET 4
1169 CFI_ADJUST_CFA_OFFSET 4
1177 CFI_ADJUST_CFA_OFFSET 4
1179 CFI_ADJUST_CFA_OFFSET 4
1187 CFI_ADJUST_CFA_OFFSET 4
1188 pushl $do_invalid_op
1189 CFI_ADJUST_CFA_OFFSET 4
1194 ENTRY(coprocessor_segment_overrun)
1197 CFI_ADJUST_CFA_OFFSET 4
1198 pushl $do_coprocessor_segment_overrun
1199 CFI_ADJUST_CFA_OFFSET 4
1202 END(coprocessor_segment_overrun)
1206 pushl $do_invalid_TSS
1207 CFI_ADJUST_CFA_OFFSET 4
1212 ENTRY(segment_not_present)
1214 pushl $do_segment_not_present
1215 CFI_ADJUST_CFA_OFFSET 4
1218 END(segment_not_present)
1220 ENTRY(stack_segment)
1222 pushl $do_stack_segment
1223 CFI_ADJUST_CFA_OFFSET 4
1228 ENTRY(alignment_check)
1230 pushl $do_alignment_check
1231 CFI_ADJUST_CFA_OFFSET 4
1234 END(alignment_check)
1238 pushl $0 # no error code
1239 CFI_ADJUST_CFA_OFFSET 4
1240 pushl $do_divide_error
1241 CFI_ADJUST_CFA_OFFSET 4
1246 #ifdef CONFIG_X86_MCE
1247 ENTRY(machine_check)
1250 CFI_ADJUST_CFA_OFFSET 4
1251 pushl machine_check_vector
1252 CFI_ADJUST_CFA_OFFSET 4
1259 ENTRY(spurious_interrupt_bug)
1262 CFI_ADJUST_CFA_OFFSET 4
1263 pushl $do_spurious_interrupt_bug
1264 CFI_ADJUST_CFA_OFFSET 4
1267 #endif /* !CONFIG_XEN */
1269 ENTRY(fixup_4gb_segment)
1271 pushl $do_fixup_4gb_segment
1272 CFI_ADJUST_CFA_OFFSET 4
1275 END(spurious_interrupt_bug)
1277 * End of kprobes section
1281 #ifdef CONFIG_STACK_UNWIND
1282 ENTRY(arch_unwind_init_running)
1287 movl %ebx, PT_EBX(%edx)
1289 movl %ebx, PT_ECX(%edx)
1290 movl %ebx, PT_EDX(%edx)
1291 movl %esi, PT_ESI(%edx)
1292 movl %edi, PT_EDI(%edx)
1293 movl %ebp, PT_EBP(%edx)
1294 movl %ebx, PT_EAX(%edx)
1295 movl $__USER_DS, PT_DS(%edx)
1296 movl $__USER_DS, PT_ES(%edx)
1297 movl $__KERNEL_PERCPU, PT_FS(%edx)
1298 movl $__KERNEL_STACK_CANARY, PT_GS(%edx)
1299 movl %eax, PT_OLDESP(%edx)
1301 movl %ebx, PT_ORIG_EAX(%edx)
1302 movl %ecx, PT_EIP(%edx)
1304 movl $__KERNEL_CS, PT_CS(%edx)
1308 movl %ebx, PT_EFLAGS(%edx)
1309 movl PT_EBX(%edx), %ebx
1310 movl $__KERNEL_DS, PT_OLDSS(%edx)
1313 ENDPROC(arch_unwind_init_running)
1316 ENTRY(kernel_thread_helper)
1317 pushl $0 # fake return address for unwinder
1322 ud2 # padding for call trace
1324 ENDPROC(kernel_thread_helper)
1326 #ifdef CONFIG_FUNCTION_TRACER
1327 #ifdef CONFIG_DYNAMIC_FTRACE
1333 ENTRY(ftrace_caller)
1334 cmpl $0, function_trace_stop
1340 movl 0xc(%esp), %eax
1341 movl 0x4(%ebp), %edx
1342 subl $MCOUNT_INSN_SIZE, %eax
1351 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1352 .globl ftrace_graph_call
1362 #else /* ! CONFIG_DYNAMIC_FTRACE */
1365 cmpl $0, function_trace_stop
1368 cmpl $ftrace_stub, ftrace_trace_function
1370 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1371 cmpl $ftrace_stub, ftrace_graph_return
1372 jnz ftrace_graph_caller
1374 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1375 jnz ftrace_graph_caller
1381 /* taken from glibc */
1386 movl 0xc(%esp), %eax
1387 movl 0x4(%ebp), %edx
1388 subl $MCOUNT_INSN_SIZE, %eax
1390 call *ftrace_trace_function
1397 #endif /* CONFIG_DYNAMIC_FTRACE */
1398 #endif /* CONFIG_FUNCTION_TRACER */
1400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1401 ENTRY(ftrace_graph_caller)
1402 cmpl $0, function_trace_stop
1408 movl 0xc(%esp), %edx
1411 subl $MCOUNT_INSN_SIZE, %edx
1412 call prepare_ftrace_return
1417 END(ftrace_graph_caller)
1419 .globl return_to_handler
1424 call ftrace_return_to_handler
1431 #include <asm/alternative-asm.h>
1433 # pv syscall call handler stub
1434 ENTRY(ia32pv_cstar_target)
1436 movl $__USER_DS,16(%esp)
1438 movl $__USER_CS,4(%esp)
1440 pushl %eax # save orig_eax
1441 CFI_ADJUST_CFA_OFFSET 4
1443 * Load the potential sixth argument from user stack.
1444 * Careful about security.
1446 cmpl $__PAGE_OFFSET-4,%ebp
1450 .section __ex_table,"a"
1452 .long 1b,cstar_fault
1455 GET_THREAD_INFO(%ebp)
1456 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1457 jnz cstar_trace_entry
1458 cmpl $nr_syscalls,%eax
1461 btl %eax,cstar_special
1463 call *cstar_call_table(,%eax,4)
1464 movl %eax,PT_EAX(%esp) # store the return value
1466 movl PT_ECX(%esp),%ecx
1467 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1470 movl PT_ECX(%esp),%ecx
1471 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1474 movl $cstar_clear_tif,(%esp) # replace return address
1476 orl $_TIF_CSTAR,TI_flags(%ebp)
1477 jmp *sys_call_table(,%eax,4)
1479 movl %eax,PT_EAX(%esp) # store the return value
1481 andl $~_TIF_CSTAR,TI_flags(%ebp)
1484 movl $-ENOSYS,PT_EAX(%esp)
1485 cmpl $nr_syscalls,%eax
1487 btl %eax,cstar_special
1488 jc .Lcstar_trace_special
1491 orl $_TIF_CSTAR,TI_flags(%ebp)
1492 call syscall_trace_enter
1494 andl $~_TIF_CSTAR,TI_flags(%ebp)
1495 /* What it returned is what we'll actually use. */
1496 cmpl $nr_syscalls,%eax
1499 .Lcstar_trace_special:
1500 movl PT_ECX(%esp),%ecx
1502 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1503 call syscall_trace_enter
1504 /* What it returned is what we'll actually use. */
1505 cmpl $nr_syscalls,%eax
1509 movl $-ENOSYS,PT_EAX(%esp)
1511 movl PT_ECX(%esp),%ecx
1512 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1513 jmp resume_userspace
1518 GET_THREAD_INFO(%ebp)
1521 ENDPROC(ia32pv_cstar_target)
1523 ENTRY(cstar_ret_from_fork)
1525 movl PT_ECX(%esp),%ecx
1526 GET_THREAD_INFO(%ebp)
1527 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1529 andl $~_TIF_CSTAR,TI_flags(%ebp)
1534 .section .rodata,"a"
1535 #include "syscall_table_32.S"
1537 syscall_table_size=(.-sys_call_table)
1539 #include <asm/unistd.h>
1543 .rept nr_syscalls+31
1544 .irp n, __NR_sigreturn, __NR_rt_sigreturn
1546 mask = mask | (1 << (\n & 31))
1555 #define sys_call_table cstar_call_table
1556 #define sys_fork cstar_set_tif
1557 #define sys_clone cstar_set_tif
1558 #define sys_vfork cstar_set_tif
1559 #include "syscall_table_32.S"
1560 #undef sys_call_table
1566 * Some functions should be protected against kprobes
1568 .pushsection .kprobes.text, "ax"
1572 pushl $do_page_fault
1573 CFI_ADJUST_CFA_OFFSET 4
1576 /* the function address is in %gs's slot on the stack */
1578 CFI_ADJUST_CFA_OFFSET 4
1579 /*CFI_REL_OFFSET fs, 0*/
1581 CFI_ADJUST_CFA_OFFSET 4
1582 /*CFI_REL_OFFSET es, 0*/
1584 CFI_ADJUST_CFA_OFFSET 4
1585 /*CFI_REL_OFFSET ds, 0*/
1587 CFI_ADJUST_CFA_OFFSET 4
1588 CFI_REL_OFFSET eax, 0
1590 CFI_ADJUST_CFA_OFFSET 4
1591 CFI_REL_OFFSET ebp, 0
1593 CFI_ADJUST_CFA_OFFSET 4
1594 CFI_REL_OFFSET edi, 0
1596 CFI_ADJUST_CFA_OFFSET 4
1597 CFI_REL_OFFSET esi, 0
1599 CFI_ADJUST_CFA_OFFSET 4
1600 CFI_REL_OFFSET edx, 0
1602 CFI_ADJUST_CFA_OFFSET 4
1603 CFI_REL_OFFSET ecx, 0
1605 CFI_ADJUST_CFA_OFFSET 4
1606 CFI_REL_OFFSET ebx, 0
1608 movl $(__KERNEL_PERCPU), %ecx
1612 movl PT_GS(%esp), %edi # get the function address
1613 movl PT_ORIG_EAX(%esp), %edx # get the error code
1614 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1617 movl $(__USER_DS), %ecx
1621 movl %esp,%eax # pt_regs pointer
1623 jmp ret_from_exception
1629 * Debug traps and NMI can happen at the one SYSENTER instruction
1630 * that sets up the real kernel stack. Check here, since we can't
1631 * allow the wrong stack to be used.
1633 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1634 * already pushed 3 words if it hits on the sysenter instruction:
1635 * eflags, cs and eip.
1637 * We just load the right stack, and push the three (known) values
1638 * by hand onto the new stack - while updating the return eip past
1639 * the instruction that would have done it for sysenter.
1641 .macro FIX_STACK offset ok label
1642 cmpw $__KERNEL_CS, 4(%esp)
1645 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1649 CFI_ADJUST_CFA_OFFSET 4
1651 CFI_ADJUST_CFA_OFFSET 4
1652 pushl $sysenter_past_esp
1653 CFI_ADJUST_CFA_OFFSET 4
1654 CFI_REL_OFFSET eip, 0
1656 #endif /* CONFIG_XEN */
1661 cmpl $ia32_sysenter_target,(%esp)
1662 jne debug_stack_correct
1663 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1664 debug_stack_correct:
1665 #endif /* !CONFIG_XEN */
1666 pushl $-1 # mark this as an int
1667 CFI_ADJUST_CFA_OFFSET 4
1670 xorl %edx,%edx # error code 0
1671 movl %esp,%eax # pt_regs pointer
1673 jmp ret_from_exception
1678 * NMI is doubly nasty. It can happen _while_ we're handling
1679 * a debug fault, and the debug fault hasn't yet been able to
1680 * clear up the stack. So we first check whether we got an
1681 * NMI on the sysenter entry path, but after that we need to
1682 * check whether we got an NMI on the debug path where the debug
1683 * fault happened on the sysenter path.
1688 CFI_ADJUST_CFA_OFFSET 4
1691 cmpw $__ESPFIX_SS, %ax
1693 CFI_ADJUST_CFA_OFFSET -4
1695 cmpl $ia32_sysenter_target,(%esp)
1698 CFI_ADJUST_CFA_OFFSET 4
1700 /* Do not access memory above the end of our stack page,
1701 * it might not exist.
1703 andl $(THREAD_SIZE-1),%eax
1704 cmpl $(THREAD_SIZE-20),%eax
1706 CFI_ADJUST_CFA_OFFSET -4
1707 jae nmi_stack_correct
1708 cmpl $ia32_sysenter_target,12(%esp)
1709 je nmi_debug_stack_check
1711 /* We have a RING0_INT_FRAME here */
1713 CFI_ADJUST_CFA_OFFSET 4
1715 xorl %edx,%edx # zero error code
1716 movl %esp,%eax # pt_regs pointer
1718 jmp restore_all_notrace
1723 FIX_STACK 12, nmi_stack_correct, 1
1724 jmp nmi_stack_correct
1726 nmi_debug_stack_check:
1727 /* We have a RING0_INT_FRAME here */
1728 cmpw $__KERNEL_CS,16(%esp)
1729 jne nmi_stack_correct
1731 jb nmi_stack_correct
1732 cmpl $debug_esp_fix_insn,(%esp)
1733 ja nmi_stack_correct
1734 FIX_STACK 24, nmi_stack_correct, 1
1735 jmp nmi_stack_correct
1738 /* We have a RING0_INT_FRAME here.
1740 * create the pointer to lss back
1743 CFI_ADJUST_CFA_OFFSET 4
1745 CFI_ADJUST_CFA_OFFSET 4
1747 /* copy the iret frame of 12 bytes */
1750 CFI_ADJUST_CFA_OFFSET 4
1753 CFI_ADJUST_CFA_OFFSET 4
1755 FIXUP_ESPFIX_STACK # %eax == %esp
1756 xorl %edx,%edx # zero error code
1759 lss 12+4(%esp), %esp # back to espfix stack
1760 CFI_ADJUST_CFA_OFFSET -24
1764 xorl %edx,%edx # zero error code
1765 movl %esp,%eax # pt_regs pointer
1767 orl $NMI_MASK, PT_EFLAGS(%esp)
1775 pushl $-1 # mark this as an int
1776 CFI_ADJUST_CFA_OFFSET 4
1779 xorl %edx,%edx # zero error code
1780 movl %esp,%eax # pt_regs pointer
1782 jmp ret_from_exception
1786 ENTRY(general_protection)
1788 pushl $do_general_protection
1789 CFI_ADJUST_CFA_OFFSET 4
1792 END(general_protection)
1795 * End of kprobes section