3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
41 * "current" is in register %ebx during any slow entries.
44 #include <linux/linkage.h>
45 #include <linux/err.h>
46 #include <asm/thread_info.h>
47 #include <asm/irqflags.h>
48 #include <asm/errno.h>
49 #include <asm/segment.h>
51 #include <asm/page_types.h>
52 #include <asm/percpu.h>
53 #include <asm/dwarf2.h>
54 #include <asm/processor-flags.h>
55 #include <asm/ftrace.h>
56 #include <asm/irq_vectors.h>
57 #include <asm/cpufeature.h>
58 #include <asm/alternative-asm.h>
59 #include <xen/interface/xen.h>
61 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62 #include <linux/elf-em.h>
63 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
64 #define __AUDIT_ARCH_LE 0x40000000
66 #ifndef CONFIG_AUDITSYSCALL
67 #define sysenter_audit syscall_trace_entry
68 #define sysexit_audit syscall_exit_work
71 .section .entry.text, "ax"
74 * We use macros for low-level operations which need to be overridden
75 * for paravirtualization. The following will never clobber any registers:
76 * INTERRUPT_RETURN (aka. "iret")
77 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
78 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
80 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
81 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
82 * Allowing a register to be clobbered can shrink the paravirt replacement
83 * enough to patch inline, increasing performance.
90 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
92 #define preempt_stop(clobbers)
93 #define resume_kernel restore_all
96 .macro TRACE_IRQS_IRET
97 #ifdef CONFIG_TRACE_IRQFLAGS
98 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
106 #define resume_userspace_sig check_userspace
108 #define resume_userspace_sig resume_userspace
112 * User gs save/restore
114 * %gs is used for userland TLS and kernel only uses it for stack
115 * canary which is required to be at %gs:20 by gcc. Read the comment
116 * at the top of stackprotector.h for more info.
118 * Local labels 98 and 99 are used.
120 #ifdef CONFIG_X86_32_LAZY_GS
122 /* unfortunately push/pop can't be no-op */
127 addl $(4 + \pop), %esp
128 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
133 /* all the rest are no-op */
140 .macro REG_TO_PTGS reg
142 .macro SET_KERNEL_GS reg
145 #else /* CONFIG_X86_32_LAZY_GS */
149 /*CFI_REL_OFFSET gs, 0*/
157 CFI_ADJUST_CFA_OFFSET -\pop
161 .pushsection .fixup, "ax"
164 .section __ex_table, "a"
171 98: mov PT_GS(%esp), %gs
174 .pushsection .fixup, "ax"
175 99: movl $0, PT_GS(%esp)
177 .section __ex_table, "a"
185 /*CFI_REGISTER gs, \reg*/
187 .macro REG_TO_PTGS reg
188 movl \reg, PT_GS(%esp)
189 /*CFI_REL_OFFSET gs, PT_GS*/
191 .macro SET_KERNEL_GS reg
192 movl $(__KERNEL_STACK_CANARY), \reg
196 #endif /* CONFIG_X86_32_LAZY_GS */
202 /*CFI_REL_OFFSET fs, 0;*/
204 /*CFI_REL_OFFSET es, 0;*/
206 /*CFI_REL_OFFSET ds, 0;*/
208 CFI_REL_OFFSET eax, 0
210 CFI_REL_OFFSET ebp, 0
212 CFI_REL_OFFSET edi, 0
214 CFI_REL_OFFSET esi, 0
216 CFI_REL_OFFSET edx, 0
218 CFI_REL_OFFSET ecx, 0
220 CFI_REL_OFFSET ebx, 0
221 movl $(__USER_DS), %edx
224 movl $(__KERNEL_PERCPU), %edx
229 .macro RESTORE_INT_REGS
246 .macro RESTORE_REGS pop=0
255 .pushsection .fixup, "ax"
262 .section __ex_table, "a"
271 .macro RING0_INT_FRAME
275 /*CFI_OFFSET cs, -2*4;*/
279 .macro RING0_EC_FRAME
283 /*CFI_OFFSET cs, -2*4;*/
287 .macro RING0_PTREGS_FRAME
290 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
291 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
292 CFI_OFFSET eip, PT_EIP-PT_OLDESP
293 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
294 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
295 CFI_OFFSET eax, PT_EAX-PT_OLDESP
296 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
297 CFI_OFFSET edi, PT_EDI-PT_OLDESP
298 CFI_OFFSET esi, PT_ESI-PT_OLDESP
299 CFI_OFFSET edx, PT_EDX-PT_OLDESP
300 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
301 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
308 GET_THREAD_INFO(%ebp)
310 pushl_cfi $0x0202 # Reset kernel eflags
317 * Interrupt exit functions should be protected against kprobes
319 .pushsection .kprobes.text, "ax"
321 * Return to user mode is not as complex as all this looks,
322 * but we want the default path for a system call return to
323 * go as quickly as possible which is why some of this is
324 * less clear than it otherwise should be.
327 # userspace resumption stub bypassing syscall exit tracing
331 preempt_stop(CLBR_ANY)
333 GET_THREAD_INFO(%ebp)
335 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
336 movb PT_CS(%esp), %al
337 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
339 jb resume_kernel # not returning to v8086 or userspace
341 ENTRY(resume_userspace)
343 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
344 # setting need_resched or sigpending
345 # between sampling and the iret
347 movl TI_flags(%ebp), %ecx
348 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
349 # int/exception return?
352 END(ret_from_exception)
354 #ifdef CONFIG_PREEMPT
356 DISABLE_INTERRUPTS(CLBR_ANY)
357 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
360 movl TI_flags(%ebp), %ecx # need_resched set ?
361 testb $_TIF_NEED_RESCHED, %cl
363 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
365 call preempt_schedule_irq
371 * End of kprobes section
375 /* SYSENTER_RETURN points to after the "sysenter" instruction in
376 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
378 # sysenter call handler stub
379 ENTRY(ia32_sysenter_target)
383 CFI_REGISTER esp, ebp
384 movl SYSENTER_stack_sp0(%esp),%esp
387 * Interrupts are disabled here, but we can't trace it until
388 * enough kernel state to call TRACE_IRQS_OFF can be called - but
389 * we immediately enable interrupts at that point anyway.
392 /*CFI_REL_OFFSET ss, 0*/
394 CFI_REL_OFFSET esp, 0
396 orl $X86_EFLAGS_IF, (%esp)
398 /*CFI_REL_OFFSET cs, 0*/
400 * Push current_thread_info()->sysenter_return to the stack.
401 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
402 * pushed above; +8 corresponds to copy_thread's esp0 setting.
404 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
405 CFI_REL_OFFSET eip, 0
409 ENABLE_INTERRUPTS(CLBR_NONE)
412 * Load the potential sixth argument from user stack.
413 * Careful about security.
415 cmpl $__PAGE_OFFSET-3,%ebp
418 movl %ebp,PT_EBP(%esp)
419 .section __ex_table,"a"
421 .long 1b,syscall_fault
424 GET_THREAD_INFO(%ebp)
426 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
429 cmpl $(NR_syscalls), %eax
431 call *sys_call_table(,%eax,4)
432 movl %eax,PT_EAX(%esp)
434 DISABLE_INTERRUPTS(CLBR_ANY)
436 movl TI_flags(%ebp), %ecx
437 testl $_TIF_ALLWORK_MASK, %ecx
440 /* if something modifies registers it must also disable sysexit */
441 movl PT_EIP(%esp), %edx
442 movl PT_OLDESP(%esp), %ecx
444 #ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT
448 1: mov PT_FS(%esp), %fs
450 ENABLE_INTERRUPTS_SYSEXIT
452 #ifdef CONFIG_AUDITSYSCALL
454 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
455 jnz syscall_trace_entry
457 CFI_ADJUST_CFA_OFFSET -4
458 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
459 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
460 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
461 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
462 movl %eax,%edx /* 2nd arg: syscall number */
463 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
464 call __audit_syscall_entry
466 movl PT_EAX(%esp),%eax /* reload syscall number */
470 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
471 jne syscall_exit_work
473 ENABLE_INTERRUPTS(CLBR_ANY)
474 movl %eax,%edx /* second arg, syscall return value */
475 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
476 setbe %al /* 1 if so, 0 if not */
477 movzbl %al,%eax /* zero-extend that */
478 call __audit_syscall_exit
479 DISABLE_INTERRUPTS(CLBR_ANY)
481 movl TI_flags(%ebp), %ecx
482 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
483 jne syscall_exit_work
484 movl PT_EAX(%esp),%eax /* reload syscall return value */
489 .pushsection .fixup,"ax"
490 2: movl $0,PT_FS(%esp)
492 .section __ex_table,"a"
497 ENDPROC(ia32_sysenter_target)
499 # pv sysenter call handler stub
500 ENTRY(ia32pv_sysenter_target)
502 movl $__USER_DS,16(%esp)
504 movl $__USER_CS,4(%esp)
506 CFI_ADJUST_CFA_OFFSET -4
507 /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
508 pushl_cfi (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
510 * Load the potential sixth argument from user stack.
511 * Careful about security.
513 cmpl $__PAGE_OFFSET-3,%ebp
516 .section __ex_table,"a"
518 .long 1b,syscall_fault
522 ENDPROC(ia32pv_sysenter_target)
525 * syscall stub including irq exit should be protected against kprobes
527 .pushsection .kprobes.text, "ax"
528 # system call handler stub
530 RING0_INT_FRAME # can't unwind into user space anyway
531 pushl_cfi %eax # save orig_eax
533 GET_THREAD_INFO(%ebp)
534 # system call tracing in operation / emulation
535 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
536 jnz syscall_trace_entry
537 cmpl $(NR_syscalls), %eax
540 call *sys_call_table(,%eax,4)
541 movl %eax,PT_EAX(%esp) # store the return value
544 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
545 # setting need_resched or sigpending
546 # between sampling and the iret
548 movl TI_flags(%ebp), %ecx
549 testl $_TIF_ALLWORK_MASK, %ecx # current->work
550 jne syscall_exit_work
556 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
557 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
558 # are returning to the kernel.
559 # See comments in process.c:copy_thread() for details.
560 movb PT_OLDSS(%esp), %ah
561 movb PT_CS(%esp), %al
562 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
563 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
565 je ldt_ss # returning to user-space with LDT SS
569 movl PT_EFLAGS(%esp), %eax
570 testl $(X86_EFLAGS_VM|NMI_MASK), %eax
573 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
575 andb evtchn_upcall_mask(%esi),%al
576 andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
578 jnz restore_all_enable_events # != 0 => enable event delivery
580 RESTORE_REGS 4 # skip orig_eax/error_code
585 pushl $0 # no error code
589 .section __ex_table,"a"
591 .long irq_return,iret_exc
597 larl PT_OLDSS(%esp), %eax
599 testl $0x00400000, %eax # returning to 32bit stack?
600 jnz restore_nocheck # allright, normal return
602 #ifdef CONFIG_PARAVIRT
604 * The kernel can't run on a non-flat stack if paravirt mode
605 * is active. Rather than try to fixup the high bits of
606 * ESP, bypass this code entirely. This may break DOSemu
607 * and/or Wine support in a paravirt VM, although the option
608 * is still available to implement the setting of the high
609 * 16-bits in the INTERRUPT_RETURN paravirt-op.
611 cmpl $0, pv_info+PARAVIRT_enabled
616 * Setup and switch to ESPFIX stack
618 * We're returning to userspace with a 16 bit stack. The CPU will not
619 * restore the high word of ESP for us on executing iret... This is an
620 * "official" bug of all the x86-compatible CPUs, which we can work
621 * around to make dosemu and wine happy. We do this by preloading the
622 * high word of ESP with the high word of the userspace ESP while
623 * compensating for the offset by changing to the ESPFIX segment with
624 * a base address that matches for the difference.
626 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
627 mov %esp, %edx /* load kernel esp */
628 mov PT_OLDESP(%esp), %eax /* load userspace esp */
629 mov %dx, %ax /* eax: new kernel esp */
630 sub %eax, %edx /* offset (low word is 0) */
632 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
633 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
634 pushl_cfi $__ESPFIX_SS
635 pushl_cfi %eax /* new kernel esp */
636 /* Disable interrupts, but do not irqtrace this section: we
637 * will soon execute iret and the tracer was already set to
638 * the irqstate after the iret */
639 DISABLE_INTERRUPTS(CLBR_EAX)
640 lss (%esp), %esp /* switch to espfix segment */
641 CFI_ADJUST_CFA_OFFSET -8
645 restore_all_enable_events:
648 scrit: /**** START OF CRITICAL REGION ****/
650 jnz 14f # process more events if necessary...
653 .section __ex_table,"a"
657 14: __DISABLE_INTERRUPTS
659 ecrit: /**** END OF CRITICAL REGION ****/
664 andl $~NMI_MASK, PT_EFLAGS(%esp)
666 jmp hypercall_page + (__HYPERVISOR_iret * 32)
671 # perform work that needs to be done immediately before resumption
673 RING0_PTREGS_FRAME # can't unwind into user space anyway
675 testb $_TIF_NEED_RESCHED, %cl
680 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
681 # setting need_resched or sigpending
682 # between sampling and the iret
684 movl TI_flags(%ebp), %ecx
685 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
686 # than syscall tracing?
688 testb $_TIF_NEED_RESCHED, %cl
691 work_notifysig: # deal with pending signals and
692 # notify-resume requests
694 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
696 jne work_notifysig_v86 # returning to kernel-space or
699 ENABLE_INTERRUPTS(CLBR_NONE)
701 call do_notify_resume
702 jmp resume_userspace_sig
706 pushl_cfi %ecx # save ti_flags for do_notify_resume
707 call save_v86_state # %eax contains pt_regs pointer
714 ENABLE_INTERRUPTS(CLBR_NONE)
716 call do_notify_resume
717 jmp resume_userspace_sig
720 # perform syscall exit tracing
723 movl $-ENOSYS,PT_EAX(%esp)
725 call syscall_trace_enter
726 /* What it returned is what we'll actually use. */
727 cmpl $(NR_syscalls), %eax
730 END(syscall_trace_entry)
732 # perform syscall exit tracing
735 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
738 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
741 call syscall_trace_leave
743 END(syscall_exit_work)
746 RING0_INT_FRAME # can't unwind into user space anyway
748 GET_THREAD_INFO(%ebp)
749 movl $-EFAULT,PT_EAX(%esp)
754 movl $-ENOSYS,PT_EAX(%esp)
759 * End of kprobes section
764 * System calls that need a pt_regs pointer.
766 #define PTREGSCALL0(name) \
767 ENTRY(ptregs_##name) ; \
770 ENDPROC(ptregs_##name)
772 #define PTREGSCALL1(name) \
773 ENTRY(ptregs_##name) ; \
775 movl (PT_EBX+4)(%esp),%eax; \
777 ENDPROC(ptregs_##name)
779 #define PTREGSCALL2(name) \
780 ENTRY(ptregs_##name) ; \
782 movl (PT_ECX+4)(%esp),%edx; \
783 movl (PT_EBX+4)(%esp),%eax; \
785 ENDPROC(ptregs_##name)
787 #define PTREGSCALL3(name) \
788 ENTRY(ptregs_##name) ; \
792 movl PT_EDX(%eax),%ecx; \
793 movl PT_ECX(%eax),%edx; \
794 movl PT_EBX(%eax),%eax; \
797 CFI_ADJUST_CFA_OFFSET -4; \
800 ENDPROC(ptregs_##name)
806 PTREGSCALL2(sigaltstack)
807 PTREGSCALL0(sigreturn)
808 PTREGSCALL0(rt_sigreturn)
812 /* Clone is an oddball. The 4th arg is in %edi */
817 pushl_cfi PT_EDI(%eax)
818 movl PT_EDX(%eax),%ecx
819 movl PT_ECX(%eax),%edx
820 movl PT_EBX(%eax),%eax
823 CFI_ADJUST_CFA_OFFSET -8
826 ENDPROC(ptregs_clone)
829 .macro FIXUP_ESPFIX_STACK
831 * Switch back for ESPFIX stack to the normal zerobased stack
833 * We can't call C functions using the ESPFIX stack. This code reads
834 * the high word of the segment base from the GDT and swiches to the
835 * normal stack and adjusts ESP with the matching offset.
837 /* fixup the stack */
838 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
839 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
841 addl %esp, %eax /* the adjusted stack pointer */
842 pushl_cfi $__KERNEL_DS
844 lss (%esp), %esp /* switch to the normal stack segment */
845 CFI_ADJUST_CFA_OFFSET -8
847 .macro UNWIND_ESPFIX_STACK
849 /* see if on espfix stack */
850 cmpw $__ESPFIX_SS, %ax
852 movl $__KERNEL_DS, %eax
855 /* switch to normal stack */
861 * Build the entry stubs and pointer table with some assembler magic.
862 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
863 * single cache line on all modern x86 implementations.
865 .section .init.rodata,"a"
867 .section .entry.text, "ax"
869 .p2align CONFIG_X86_L1_CACHE_SHIFT
870 ENTRY(irq_entries_start)
872 vector=FIRST_EXTERNAL_VECTOR
873 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
876 .if vector < NR_VECTORS
877 .if vector <> FIRST_EXTERNAL_VECTOR
878 CFI_ADJUST_CFA_OFFSET -4
880 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
881 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
886 .section .entry.text, "ax"
890 2: jmp common_interrupt
892 END(irq_entries_start)
899 * the CPU automatically disables interrupts when executing an IRQ vector,
900 * so IRQ-flags tracing has to follow that:
902 .p2align CONFIG_X86_L1_CACHE_SHIFT
904 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
910 ENDPROC(common_interrupt)
914 * Irq entries should be protected against kprobes
916 .pushsection .kprobes.text, "ax"
917 #define BUILD_INTERRUPT3(name, nr, fn) \
929 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
931 /* The include is where all of the SMP etc. interrupts come from */
932 #include <asm/entry_arch.h>
935 #define UNWIND_ESPFIX_STACK
937 .pushsection .kprobes.text, "ax"
939 # A note on the "critical region" in our callback handler.
940 # We want to avoid stacking callback handlers due to events occurring
941 # during handling of the last event. To do this, we keep events disabled
942 # until we've done all processing. HOWEVER, we must enable events before
943 # popping the stack frame (can't be done atomically) and so it would still
944 # be possible to get enough handler activations to overflow the stack.
945 # Although unlikely, bugs of that kind are hard to track down, so we'd
946 # like to avoid the possibility.
947 # So, on entry to the handler we detect whether we interrupted an
948 # existing activation in its critical region -- if so, we pop the current
949 # activation and restart the handler using the previous one.
951 # The sysexit critical region is slightly different. sysexit
952 # atomically removes the entire stack frame. If we interrupt in the
953 # critical region we know that the entire frame is present and correct
954 # so we can simply throw away the new one.
955 ENTRY(hypervisor_callback)
959 movl PT_CS(%esp),%ecx
960 movl PT_EIP(%esp),%eax
961 andl $SEGMENT_RPL_MASK,%ecx
967 jb critical_region_fixup
969 #ifdef CONFIG_XEN_SUPERVISOR_MODE_KERNEL
970 cmpl $sysexit_scrit,%eax
972 cmpl $sysexit_ecrit,%eax
974 addl $PT_OLDESP,%esp # Remove eflags...ebx from stack frame.
978 call evtchn_do_upcall
980 CFI_ADJUST_CFA_OFFSET -4
984 # [How we do the fixup]. We want to merge the current stack frame with the
985 # just-interrupted frame. How we do this depends on where in the critical
986 # region the interrupted handler was executing, and so how many saved
987 # registers are in each frame. We do this quickly using the lookup table
988 # 'critical_fixup_table'. For each byte offset in the critical region, it
989 # provides the number of bytes which have already been popped from the
990 # interrupted stack frame.
991 critical_region_fixup:
992 movsbl critical_fixup_table-scrit(%eax),%ecx # %ecx contains num slots popped
994 leal (%esp,%ecx,4),%esi # %esi points at end of src region
995 leal PT_OLDESP(%esp),%edi # %edi points at end of dst region
996 jle 17f # skip loop if nothing to copy
997 16: subl $4,%esi # pre-decrementing copy loop
1002 17: movl %edi,%esp # final %edi is top of merged stack
1005 .section .rodata,"a"
1006 critical_fixup_table:
1007 .rept __SIZEOF_TEST_PENDING
1010 .byte -1,-1 # jnz 14f
1021 #ifndef CONFIG_X86_32_LAZY_GS
1022 .byte 10,10 # pop %gs
1023 .byte 11,11,11 # add $4,%esp
1025 .byte 10,10,10 # add $8,%esp
1028 .rept __SIZEOF_DISABLE_INTERRUPTS
1033 # Hypervisor uses this for application faults while it executes.
1034 # We get here for two reasons:
1035 # 1. Fault while reloading DS, ES, FS or GS
1036 # 2. Fault while executing IRET
1037 # Category 1 we fix up by reattempting the load, and zeroing the segment
1038 # register if the load fails.
1039 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1040 # normal Linux return path in this case because if we use the IRET hypercall
1041 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1042 # We distinguish between categories by maintaining a status value in EAX.
1043 ENTRY(failsafe_callback)
1053 addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
1055 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
1059 jmp ret_from_exception
1060 .section .fixup,"ax"; \
1061 6: xorl %eax,%eax; \
1062 movl %eax,4(%esp); \
1064 7: xorl %eax,%eax; \
1065 movl %eax,8(%esp); \
1067 8: xorl %eax,%eax; \
1068 movl %eax,12(%esp); \
1070 9: xorl %eax,%eax; \
1071 movl %eax,16(%esp); \
1074 .section __ex_table,"a"; \
1084 ENTRY(coprocessor_error)
1087 pushl_cfi $do_coprocessor_error
1090 END(coprocessor_error)
1092 ENTRY(simd_coprocessor_error)
1095 #ifdef CONFIG_X86_INVD_BUG
1096 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1097 661: pushl_cfi $do_general_protection
1099 .section .altinstructions,"a"
1100 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
1102 .section .altinstr_replacement,"ax"
1103 663: pushl $do_simd_coprocessor_error
1107 pushl_cfi $do_simd_coprocessor_error
1111 END(simd_coprocessor_error)
1113 ENTRY(device_not_available)
1115 pushl_cfi $-1 # mark this as an int
1116 pushl_cfi $do_device_not_available
1119 END(device_not_available)
1121 #ifdef CONFIG_PARAVIRT
1124 .section __ex_table,"a"
1126 .long native_iret, iret_exc
1130 ENTRY(native_irq_enable_sysexit)
1133 END(native_irq_enable_sysexit)
1139 pushl_cfi $do_overflow
1147 pushl_cfi $do_bounds
1155 pushl_cfi $do_invalid_op
1160 ENTRY(coprocessor_segment_overrun)
1163 pushl_cfi $do_coprocessor_segment_overrun
1166 END(coprocessor_segment_overrun)
1170 pushl_cfi $do_invalid_TSS
1175 ENTRY(segment_not_present)
1177 pushl_cfi $do_segment_not_present
1180 END(segment_not_present)
1182 ENTRY(stack_segment)
1184 pushl_cfi $do_stack_segment
1189 ENTRY(alignment_check)
1191 pushl_cfi $do_alignment_check
1194 END(alignment_check)
1198 pushl_cfi $0 # no error code
1199 pushl_cfi $do_divide_error
1204 #ifdef CONFIG_X86_MCE
1205 ENTRY(machine_check)
1208 pushl_cfi machine_check_vector
1215 ENTRY(spurious_interrupt_bug)
1218 pushl_cfi $do_spurious_interrupt_bug
1221 END(spurious_interrupt_bug)
1222 #endif /* !CONFIG_XEN */
1224 ENTRY(fixup_4gb_segment)
1226 pushl_cfi $do_fixup_4gb_segment
1229 END(fixup_4gb_segment)
1231 * End of kprobes section
1235 #ifdef CONFIG_STACK_UNWIND
1236 ENTRY(arch_unwind_init_running)
1241 movl %ebx, PT_EBX(%edx)
1243 movl %ebx, PT_ECX(%edx)
1244 movl %ebx, PT_EDX(%edx)
1245 movl %esi, PT_ESI(%edx)
1246 movl %edi, PT_EDI(%edx)
1247 movl %ebp, PT_EBP(%edx)
1248 movl %ebx, PT_EAX(%edx)
1249 movl $__USER_DS, PT_DS(%edx)
1250 movl $__USER_DS, PT_ES(%edx)
1251 movl $__KERNEL_PERCPU, PT_FS(%edx)
1252 movl $__KERNEL_STACK_CANARY, PT_GS(%edx)
1253 movl %eax, PT_OLDESP(%edx)
1255 movl %ebx, PT_ORIG_EAX(%edx)
1256 movl %ecx, PT_EIP(%edx)
1258 movl $__KERNEL_CS, PT_CS(%edx)
1262 movl %ebx, PT_EFLAGS(%edx)
1263 movl PT_EBX(%edx), %ebx
1264 movl $__KERNEL_DS, PT_OLDSS(%edx)
1267 ENDPROC(arch_unwind_init_running)
1270 ENTRY(kernel_thread_helper)
1271 pushl $0 # fake return address for unwinder
1276 ud2 # padding for call trace
1278 ENDPROC(kernel_thread_helper)
1280 #ifdef CONFIG_FUNCTION_TRACER
1281 #ifdef CONFIG_DYNAMIC_FTRACE
1287 ENTRY(ftrace_caller)
1288 cmpl $0, function_trace_stop
1294 movl 0xc(%esp), %eax
1295 movl 0x4(%ebp), %edx
1296 subl $MCOUNT_INSN_SIZE, %eax
1305 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1306 .globl ftrace_graph_call
1316 #else /* ! CONFIG_DYNAMIC_FTRACE */
1319 cmpl $0, function_trace_stop
1322 cmpl $ftrace_stub, ftrace_trace_function
1324 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1325 cmpl $ftrace_stub, ftrace_graph_return
1326 jnz ftrace_graph_caller
1328 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1329 jnz ftrace_graph_caller
1335 /* taken from glibc */
1340 movl 0xc(%esp), %eax
1341 movl 0x4(%ebp), %edx
1342 subl $MCOUNT_INSN_SIZE, %eax
1344 call *ftrace_trace_function
1351 #endif /* CONFIG_DYNAMIC_FTRACE */
1352 #endif /* CONFIG_FUNCTION_TRACER */
1354 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1355 ENTRY(ftrace_graph_caller)
1356 cmpl $0, function_trace_stop
1362 movl 0xc(%esp), %edx
1365 subl $MCOUNT_INSN_SIZE, %edx
1366 call prepare_ftrace_return
1371 END(ftrace_graph_caller)
1373 .globl return_to_handler
1378 call ftrace_return_to_handler
1386 # pv syscall call handler stub
1387 ENTRY(ia32pv_cstar_target)
1389 movl $__USER_DS,16(%esp)
1391 movl $__USER_CS,4(%esp)
1393 pushl_cfi %eax # save orig_eax
1395 * Load the potential sixth argument from user stack.
1396 * Careful about security.
1398 cmpl $__PAGE_OFFSET-4,%ebp
1402 .section __ex_table,"a"
1404 .long 1b,cstar_fault
1407 GET_THREAD_INFO(%ebp)
1408 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1409 jnz cstar_trace_entry
1410 cmpl $NR_syscalls,%eax
1413 btl %eax,cstar_special
1415 call *cstar_call_table(,%eax,4)
1416 movl %eax,PT_EAX(%esp) # store the return value
1418 movl PT_ECX(%esp),%ecx
1419 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1422 movl PT_ECX(%esp),%ecx
1423 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1425 GLOBAL(cstar_set_tif)
1426 movl $cstar_clear_tif,(%esp) # replace return address
1428 orl $_TIF_CSTAR,TI_flags(%ebp)
1429 jmp *sys_call_table(,%eax,4)
1431 movl %eax,PT_EAX(%esp) # store the return value
1433 andl $~_TIF_CSTAR,TI_flags(%ebp)
1436 movl $-ENOSYS,PT_EAX(%esp)
1437 cmpl $NR_syscalls,%eax
1439 btl %eax,cstar_special
1440 jc .Lcstar_trace_special
1443 orl $_TIF_CSTAR,TI_flags(%ebp)
1444 call syscall_trace_enter
1446 andl $~_TIF_CSTAR,TI_flags(%ebp)
1447 /* What it returned is what we'll actually use. */
1448 cmpl $NR_syscalls,%eax
1451 .Lcstar_trace_special:
1452 movl PT_ECX(%esp),%ecx
1454 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1455 call syscall_trace_enter
1456 /* What it returned is what we'll actually use. */
1457 cmpl $NR_syscalls,%eax
1461 movl $-ENOSYS,PT_EAX(%esp)
1463 movl PT_ECX(%esp),%ecx
1464 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1465 jmp resume_userspace
1470 GET_THREAD_INFO(%ebp)
1473 ENDPROC(ia32pv_cstar_target)
1475 ENTRY(cstar_ret_from_fork)
1477 movl PT_ECX(%esp),%ecx
1478 GET_THREAD_INFO(%ebp)
1479 movl %ecx,PT_EBP(%esp) # put user EBP back in place
1481 andl $~_TIF_CSTAR,TI_flags(%ebp)
1484 END(cstar_ret_from_fork)
1486 #include <asm/unistd.h>
1487 .pushsection .rodata,"a"
1492 .rept NR_syscalls+31
1493 .irp n, __NR_sigreturn, __NR_rt_sigreturn
1495 mask = mask | (1 << (\n & 31))
1505 #endif /* TIF_CSTAR */
1508 * Some functions should be protected against kprobes
1510 .pushsection .kprobes.text, "ax"
1514 pushl_cfi $do_page_fault
1517 /* the function address is in %gs's slot on the stack */
1519 /*CFI_REL_OFFSET fs, 0*/
1521 /*CFI_REL_OFFSET es, 0*/
1523 /*CFI_REL_OFFSET ds, 0*/
1525 CFI_REL_OFFSET eax, 0
1527 CFI_REL_OFFSET ebp, 0
1529 CFI_REL_OFFSET edi, 0
1531 CFI_REL_OFFSET esi, 0
1533 CFI_REL_OFFSET edx, 0
1535 CFI_REL_OFFSET ecx, 0
1537 CFI_REL_OFFSET ebx, 0
1539 movl $(__KERNEL_PERCPU), %ecx
1543 movl PT_GS(%esp), %edi # get the function address
1544 movl PT_ORIG_EAX(%esp), %edx # get the error code
1545 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1548 movl $(__USER_DS), %ecx
1552 movl %esp,%eax # pt_regs pointer
1554 jmp ret_from_exception
1560 * Debug traps and NMI can happen at the one SYSENTER instruction
1561 * that sets up the real kernel stack. Check here, since we can't
1562 * allow the wrong stack to be used.
1564 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1565 * already pushed 3 words if it hits on the sysenter instruction:
1566 * eflags, cs and eip.
1568 * We just load the right stack, and push the three (known) values
1569 * by hand onto the new stack - while updating the return eip past
1570 * the instruction that would have done it for sysenter.
1572 .macro FIX_STACK offset ok label
1573 cmpw $__KERNEL_CS, 4(%esp)
1576 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1580 pushl_cfi $__KERNEL_CS
1581 pushl_cfi $sysenter_past_esp
1582 CFI_REL_OFFSET eip, 0
1584 #endif /* CONFIG_XEN */
1589 cmpl $ia32_sysenter_target,(%esp)
1590 jne debug_stack_correct
1591 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1592 debug_stack_correct:
1593 #endif /* !CONFIG_XEN */
1594 pushl_cfi $-1 # mark this as an int
1597 xorl %edx,%edx # error code 0
1598 movl %esp,%eax # pt_regs pointer
1600 jmp ret_from_exception
1605 * NMI is doubly nasty. It can happen _while_ we're handling
1606 * a debug fault, and the debug fault hasn't yet been able to
1607 * clear up the stack. So we first check whether we got an
1608 * NMI on the sysenter entry path, but after that we need to
1609 * check whether we got an NMI on the debug path where the debug
1610 * fault happened on the sysenter path.
1617 cmpw $__ESPFIX_SS, %ax
1620 cmpl $ia32_sysenter_target,(%esp)
1624 /* Do not access memory above the end of our stack page,
1625 * it might not exist.
1627 andl $(THREAD_SIZE-1),%eax
1628 cmpl $(THREAD_SIZE-20),%eax
1630 jae nmi_stack_correct
1631 cmpl $ia32_sysenter_target,12(%esp)
1632 je nmi_debug_stack_check
1634 /* We have a RING0_INT_FRAME here */
1637 xorl %edx,%edx # zero error code
1638 movl %esp,%eax # pt_regs pointer
1640 jmp restore_all_notrace
1645 FIX_STACK 12, nmi_stack_correct, 1
1646 jmp nmi_stack_correct
1648 nmi_debug_stack_check:
1649 /* We have a RING0_INT_FRAME here */
1650 cmpw $__KERNEL_CS,16(%esp)
1651 jne nmi_stack_correct
1653 jb nmi_stack_correct
1654 cmpl $debug_esp_fix_insn,(%esp)
1655 ja nmi_stack_correct
1656 FIX_STACK 24, nmi_stack_correct, 1
1657 jmp nmi_stack_correct
1660 /* We have a RING0_INT_FRAME here.
1662 * create the pointer to lss back
1667 /* copy the iret frame of 12 bytes */
1673 FIXUP_ESPFIX_STACK # %eax == %esp
1674 xorl %edx,%edx # zero error code
1677 lss 12+4(%esp), %esp # back to espfix stack
1678 CFI_ADJUST_CFA_OFFSET -24
1682 xorl %edx,%edx # zero error code
1683 movl %esp,%eax # pt_regs pointer
1685 orl $NMI_MASK, PT_EFLAGS(%esp)
1693 pushl_cfi $-1 # mark this as an int
1696 xorl %edx,%edx # zero error code
1697 movl %esp,%eax # pt_regs pointer
1699 jmp ret_from_exception
1703 ENTRY(general_protection)
1705 pushl_cfi $do_general_protection
1708 END(general_protection)
1710 #ifdef CONFIG_KVM_GUEST
1711 ENTRY(async_page_fault)
1713 pushl_cfi $do_async_page_fault
1716 END(async_page_fault)
1720 * End of kprobes section