2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/init.h>
19 #include <linux/tty.h>
20 #include <linux/vt_kern.h> /* For unblank_screen() */
21 #include <linux/highmem.h>
22 #include <linux/bootmem.h> /* for max_low_pfn */
23 #include <linux/vmalloc.h>
24 #include <linux/module.h>
25 #include <linux/kprobes.h>
26 #include <linux/uaccess.h>
27 #include <linux/kdebug.h>
28 #include <linux/kprobes.h>
30 #include <asm/system.h>
32 #include <asm/segment.h>
34 extern void die(const char *,struct pt_regs *,long);
37 static inline int notify_page_fault(struct pt_regs *regs)
41 /* kprobe_running() needs smp_processor_id() */
42 if (!user_mode_vm(regs)) {
44 if (kprobe_running() && kprobe_fault_handler(regs, 14))
52 static inline int notify_page_fault(struct pt_regs *regs)
59 * Return EIP plus the CS segment base. The segment limit is also
60 * adjusted, clamped to the kernel/user address space (whichever is
61 * appropriate), and returned in *eip_limit.
63 * The segment is checked, because it might have been changed by another
64 * task between the original faulting instruction and here.
66 * If CS is no longer a valid code segment, or if EIP is beyond the
67 * limit, or if it is a kernel address when CS is not a kernel segment,
68 * then the returned value will be greater than *eip_limit.
70 * This is slow, but is very rarely executed.
72 static inline unsigned long get_segment_eip(struct pt_regs *regs,
73 unsigned long *eip_limit)
75 unsigned long eip = regs->eip;
76 unsigned seg = regs->xcs & 0xffff;
77 u32 seg_ar, seg_limit, base, *desc;
79 /* Unlikely, but must come before segment checks. */
80 if (unlikely(regs->eflags & VM_MASK)) {
82 *eip_limit = base + 0xffff;
83 return base + (eip & 0xffff);
86 /* The standard kernel/user address space limit. */
87 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
89 /* By far the most common cases. */
90 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
93 /* Check the segment exists, is within the current LDT/GDT size,
94 that kernel/user (ring 0..3) has the appropriate privilege,
95 that it's a code segment, and get the limit. */
96 __asm__ ("larl %3,%0; lsll %3,%1"
97 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
98 if ((~seg_ar & 0x9800) || eip > seg_limit) {
100 return 1; /* So that returned eip > *eip_limit. */
103 /* Get the GDT/LDT descriptor base.
104 When you look for races in this code remember that
105 LDT and other horrors are only used in user space. */
107 /* Must lock the LDT while reading it. */
108 mutex_lock(¤t->mm->context.lock);
109 desc = current->mm->context.ldt;
110 desc = (void *)desc + (seg & ~7);
112 /* Must disable preemption while reading the GDT. */
113 desc = (u32 *)get_cpu_gdt_table(get_cpu());
114 desc = (void *)desc + (seg & ~7);
117 /* Decode the code segment base from the descriptor */
118 base = get_desc_base((unsigned long *)desc);
121 mutex_unlock(¤t->mm->context.lock);
125 /* Adjust EIP and segment limit, and clamp at the kernel limit.
126 It's legitimate for segments to wrap at 0xffffffff. */
128 if (seg_limit < *eip_limit && seg_limit >= base)
129 *eip_limit = seg_limit;
134 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
135 * Check that here and ignore it.
137 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
140 unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
145 for (i = 0; scan_more && i < 15; i++) {
146 unsigned char opcode;
147 unsigned char instr_hi;
148 unsigned char instr_lo;
150 if (instr > (unsigned char *)limit)
152 if (probe_kernel_address(instr, opcode))
155 instr_hi = opcode & 0xf0;
156 instr_lo = opcode & 0x0f;
162 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
163 scan_more = ((instr_lo & 7) == 0x6);
167 /* 0x64 thru 0x67 are valid prefixes in all modes. */
168 scan_more = (instr_lo & 0xC) == 0x4;
171 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
172 scan_more = !instr_lo || (instr_lo>>1) == 1;
175 /* Prefetch instruction is 0x0F0D or 0x0F18 */
177 if (instr > (unsigned char *)limit)
179 if (probe_kernel_address(instr, opcode))
181 prefetch = (instr_lo == 0xF) &&
182 (opcode == 0x0D || opcode == 0x18);
192 static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
193 unsigned long error_code)
195 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
196 boot_cpu_data.x86 >= 6)) {
197 /* Catch an obscure case of prefetch inside an NX page. */
198 if (nx_enabled && (error_code & 16))
200 return __is_prefetch(regs, addr);
205 static noinline void force_sig_info_fault(int si_signo, int si_code,
206 unsigned long address, struct task_struct *tsk)
210 info.si_signo = si_signo;
212 info.si_code = si_code;
213 info.si_addr = (void __user *)address;
214 force_sig_info(si_signo, &info, tsk);
217 fastcall void do_invalid_op(struct pt_regs *, unsigned long);
219 #ifdef CONFIG_X86_PAE
220 static void dump_fault_path(unsigned long address)
222 unsigned long *p, page;
226 p = (unsigned long *)__va(page);
227 p += (address >> 30) * 2;
228 printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
229 if (p[0] & _PAGE_PRESENT) {
230 mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
231 page = mfn_to_pfn(mfn) << PAGE_SHIFT;
232 p = (unsigned long *)__va(page);
233 address &= 0x3fffffff;
234 p += (address >> 21) * 2;
235 printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
237 mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
238 #ifdef CONFIG_HIGHPTE
239 if (mfn_to_pfn(mfn) >= highstart_pfn)
242 if ((p[0] & _PAGE_PRESENT) && !(p[0] & _PAGE_PSE)) {
243 page = mfn_to_pfn(mfn) << PAGE_SHIFT;
244 p = (unsigned long *) __va(page);
245 address &= 0x001fffff;
246 p += (address >> 12) * 2;
247 printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
253 static void dump_fault_path(unsigned long address)
258 page = ((unsigned long *) __va(page))[address >> PGDIR_SHIFT];
259 printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
260 machine_to_phys(page));
262 * We must not directly access the pte in the highpte
263 * case if the page table is located in highmem.
264 * And lets rather not kmap-atomic the pte, just in case
265 * it's allocated already.
267 if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn
268 && (page & _PAGE_PRESENT)
269 && !(page & _PAGE_PSE)) {
270 page = machine_to_phys(page & PAGE_MASK);
271 page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT)
272 & (PTRS_PER_PTE - 1)];
273 printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
274 machine_to_phys(page));
279 static int spurious_fault(struct pt_regs *regs,
280 unsigned long address,
281 unsigned long error_code)
288 /* Reserved-bit violation or user access to kernel space? */
289 if (error_code & 0x0c)
292 pgd = init_mm.pgd + pgd_index(address);
293 if (!pgd_present(*pgd))
296 pud = pud_offset(pgd, address);
297 if (!pud_present(*pud))
300 pmd = pmd_offset(pud, address);
301 if (!pmd_present(*pmd))
304 pte = pte_offset_kernel(pmd, address);
305 if (!pte_present(*pte))
307 if ((error_code & 0x02) && !pte_write(*pte))
309 #ifdef CONFIG_X86_PAE
310 if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
317 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
319 unsigned index = pgd_index(address);
325 pgd_k = init_mm.pgd + index;
327 if (!pgd_present(*pgd_k))
331 * set_pgd(pgd, *pgd_k); here would be useless on PAE
332 * and redundant with the set_pmd() on non-PAE. As would
336 pud = pud_offset(pgd, address);
337 pud_k = pud_offset(pgd_k, address);
338 if (!pud_present(*pud_k))
341 pmd = pmd_offset(pud, address);
342 pmd_k = pmd_offset(pud_k, address);
343 if (!pmd_present(*pmd_k))
345 if (!pmd_present(*pmd)) {
346 #if CONFIG_XEN_COMPAT > 0x030002
347 set_pmd(pmd, *pmd_k);
350 * When running on older Xen we must launder *pmd_k through
351 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
353 set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
355 arch_flush_lazy_mmu_mode();
357 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
362 * Handle a fault on the vmalloc or module mapping area
364 * This assumes no large pages in there.
366 static inline int vmalloc_fault(unsigned long address)
368 unsigned long pgd_paddr;
372 * Synchronize this task's top level page-table
373 * with the 'reference' page table.
375 * Do _not_ use "current" here. We might be inside
376 * an interrupt in the middle of a task switch..
378 pgd_paddr = read_cr3();
379 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
382 pte_k = pte_offset_kernel(pmd_k, address);
383 if (!pte_present(*pte_k))
388 int show_unhandled_signals = 1;
391 * This routine handles page faults. It determines the address,
392 * and the problem, and then passes it off to one of the appropriate
396 * bit 0 == 0 means no page found, 1 means protection fault
397 * bit 1 == 0 means read, 1 means write
398 * bit 2 == 0 means kernel, 1 means user-mode
399 * bit 3 == 1 means use of reserved bit detected
400 * bit 4 == 1 means fault was an instruction fetch
402 fastcall void __kprobes do_page_fault(struct pt_regs *regs,
403 unsigned long error_code)
405 struct task_struct *tsk;
406 struct mm_struct *mm;
407 struct vm_area_struct * vma;
408 unsigned long address;
413 * We can fault from pretty much anywhere, with unknown IRQ state.
415 trace_hardirqs_fixup();
417 /* get the address */
418 address = read_cr2();
420 /* Set the "privileged fault" bit to something sane. */
422 error_code |= (regs->xcs & 2) << 1;
423 if (regs->eflags & X86_EFLAGS_VM)
428 si_code = SEGV_MAPERR;
431 * We fault-in kernel-space virtual memory on-demand. The
432 * 'reference' page table is init_mm.pgd.
434 * NOTE! We MUST NOT take any locks for this case. We may
435 * be in an interrupt or a critical region, and should
436 * only copy the information from the master page table,
439 * This verifies that the fault happens in kernel space
440 * (error_code & 4) == 0, and that the fault was not a
441 * protection error (error_code & 9) == 0.
443 if (unlikely(address >= TASK_SIZE)) {
445 /* Faults in hypervisor area can never be patched up. */
446 if (address >= hypervisor_virt_start)
447 goto bad_area_nosemaphore;
449 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
451 /* Can take a spurious fault if mapping changes R/O -> R/W. */
452 if (spurious_fault(regs, address, error_code))
454 if (notify_page_fault(regs))
457 * Don't take the mm semaphore here. If we fixup a prefetch
458 * fault we could otherwise deadlock.
460 goto bad_area_nosemaphore;
463 if (notify_page_fault(regs))
466 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
467 fault has been handled. */
468 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
474 * If we're in an interrupt, have no user context or are running in an
475 * atomic region then we must not take the fault..
477 if (in_atomic() || !mm)
478 goto bad_area_nosemaphore;
480 /* When running in the kernel we expect faults to occur only to
481 * addresses in user space. All other faults represent errors in the
482 * kernel and should generate an OOPS. Unfortunately, in the case of an
483 * erroneous fault occurring in a code path which already holds mmap_sem
484 * we will deadlock attempting to validate the fault against the
485 * address space. Luckily the kernel only validly references user
486 * space from well defined areas of code, which are listed in the
489 * As the vast majority of faults will be valid we will only perform
490 * the source reference check when there is a possibility of a deadlock.
491 * Attempt to lock the address space, if we cannot we then validate the
492 * source. If this is invalid we can skip the address space check,
493 * thus avoiding the deadlock.
495 if (!down_read_trylock(&mm->mmap_sem)) {
496 if ((error_code & 4) == 0 &&
497 !search_exception_tables(regs->eip))
498 goto bad_area_nosemaphore;
499 down_read(&mm->mmap_sem);
502 vma = find_vma(mm, address);
505 if (vma->vm_start <= address)
507 if (!(vma->vm_flags & VM_GROWSDOWN))
509 if (error_code & 4) {
511 * Accessing the stack below %esp is always a bug.
512 * The large cushion allows instructions like enter
513 * and pusha to work. ("enter $65535,$31" pushes
514 * 32 pointers and then decrements %esp by 65535.)
516 if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
519 if (expand_stack(vma, address))
522 * Ok, we have a good vm_area for this memory access, so
526 si_code = SEGV_ACCERR;
528 switch (error_code & 3) {
529 default: /* 3: write, present */
531 case 2: /* write, not present */
532 if (!(vma->vm_flags & VM_WRITE))
536 case 1: /* read, present */
538 case 0: /* read, not present */
539 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
545 * If for any reason at all we couldn't handle the fault,
546 * make sure we exit gracefully rather than endlessly redo
549 fault = handle_mm_fault(mm, vma, address, write);
550 if (unlikely(fault & VM_FAULT_ERROR)) {
551 if (fault & VM_FAULT_OOM)
553 else if (fault & VM_FAULT_SIGBUS)
557 if (fault & VM_FAULT_MAJOR)
563 * Did it hit the DOS screen memory VA from vm86 mode?
565 if (regs->eflags & VM_MASK) {
566 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
568 tsk->thread.screen_bitmap |= 1 << bit;
570 up_read(&mm->mmap_sem);
574 * Something tried to access memory that isn't in our memory map..
575 * Fix it, but check if it's kernel or user first..
578 up_read(&mm->mmap_sem);
580 bad_area_nosemaphore:
581 /* User mode accesses just cause a SIGSEGV */
582 if (error_code & 4) {
584 * It's possible to have interrupts off here.
589 * Valid to do another page fault here because this one came
592 if (is_prefetch(regs, address, error_code))
595 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
596 printk_ratelimit()) {
597 printk("%s%s[%d]: segfault at %08lx eip %08lx "
598 "esp %08lx error %lx\n",
599 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
600 tsk->comm, task_pid_nr(tsk), address, regs->eip,
601 regs->esp, error_code);
603 tsk->thread.cr2 = address;
604 /* Kernel addresses are always protection faults */
605 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
606 tsk->thread.trap_no = 14;
607 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
611 #ifdef CONFIG_X86_F00F_BUG
613 * Pentium F0 0F C7 C8 bug workaround.
615 if (boot_cpu_data.f00f_bug) {
618 nr = (address - idt_descr.address) >> 3;
621 do_invalid_op(regs, 0);
628 /* Are we prepared to handle this kernel fault? */
629 if (fixup_exception(regs))
633 * Valid to do another page fault here, because if this fault
634 * had been triggered by is_prefetch fixup_exception would have
637 if (is_prefetch(regs, address, error_code))
641 * Oops. The kernel tried to access some bad page. We'll have to
642 * terminate things with extreme prejudice.
647 if (oops_may_print()) {
648 #ifdef CONFIG_X86_PAE
649 if (error_code & 16) {
650 pte_t *pte = lookup_address(address);
652 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
653 printk(KERN_CRIT "kernel tried to execute "
654 "NX-protected page - exploit attempt? "
655 "(uid: %d)\n", current->uid);
658 if (address < PAGE_SIZE)
659 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
660 "pointer dereference");
662 printk(KERN_ALERT "BUG: unable to handle kernel paging"
664 printk(" at virtual address %08lx\n",address);
665 printk(KERN_ALERT "printing eip: %08lx\n", regs->eip);
666 dump_fault_path(address);
668 tsk->thread.cr2 = address;
669 tsk->thread.trap_no = 14;
670 tsk->thread.error_code = error_code;
671 die("Oops", regs, error_code);
676 * We ran out of memory, or some other thing happened to us that made
677 * us unable to handle the page fault gracefully.
680 up_read(&mm->mmap_sem);
681 if (is_global_init(tsk)) {
683 down_read(&mm->mmap_sem);
686 printk("VM: killing process %s\n", tsk->comm);
688 do_group_exit(SIGKILL);
692 up_read(&mm->mmap_sem);
694 /* Kernel mode? Handle exceptions or die */
695 if (!(error_code & 4))
698 /* User space => ok to do another page fault */
699 if (is_prefetch(regs, address, error_code))
702 tsk->thread.cr2 = address;
703 tsk->thread.error_code = error_code;
704 tsk->thread.trap_no = 14;
705 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
708 void vmalloc_sync_all(void)
711 * Note that races in the updates of insync and start aren't
712 * problematic: insync can only get set bits added, and updates to
713 * start are only improving performance (without affecting correctness
715 * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
716 * This change works just fine with 2-level paging too.
718 #define sync_index(a) ((a) >> PMD_SHIFT)
719 static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
720 static unsigned long start = TASK_SIZE;
721 unsigned long address;
723 if (SHARED_KERNEL_PMD)
726 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
727 for (address = start;
728 address >= TASK_SIZE && address < hypervisor_virt_start;
729 address += 1UL << PMD_SHIFT) {
730 if (!test_bit(sync_index(address), insync)) {
734 spin_lock_irqsave(&pgd_lock, flags);
735 /* XEN: failure path assumes non-empty pgd_list. */
736 if (unlikely(!pgd_list)) {
737 spin_unlock_irqrestore(&pgd_lock, flags);
740 for (page = pgd_list; page; page =
741 (struct page *)page->index)
742 if (!vmalloc_sync_one(page_address(page),
744 BUG_ON(page != pgd_list);
747 spin_unlock_irqrestore(&pgd_lock, flags);
749 set_bit(sync_index(address), insync);
751 if (address == start && test_bit(sync_index(address), insync))
752 start = address + (1UL << PMD_SHIFT);