1 #ifndef _ASM_X86_KEXEC_H
2 #define _ASM_X86_KEXEC_H
5 # define PA_CONTROL_PAGE 0
6 # define VA_CONTROL_PAGE 1
9 # define PA_SWAP_PAGE 3
11 # else /* CONFIG_XEN */
13 * The hypervisor interface implicitly requires that all entries (except
14 * for possibly the final one) are arranged in matching PA_/VA_ pairs.
17 # define PA_SWAP_PAGE 4
19 # endif /* CONFIG_XEN */
21 # define PA_CONTROL_PAGE 0
22 # define VA_CONTROL_PAGE 1
23 # define PA_TABLE_PAGE 2
25 # define PA_SWAP_PAGE 3
27 # else /* CONFIG_XEN, see comment above
28 # define VA_TABLE_PAGE 3 */
29 # define PA_SWAP_PAGE 4
31 # endif /* CONFIG_XEN */
34 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
38 #include <linux/string.h>
41 #include <asm/ptrace.h>
44 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
45 * I.e. Maximum page that is mapped directly into kernel memory,
46 * and kmap is not required.
48 * So far x86_64 is limited to 40 physical address bits.
51 /* Maximum physical address we can use pages from */
52 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
53 /* Maximum address we can reach in physical address mode */
54 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
55 /* Maximum address we can use for the control code buffer */
56 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
58 # define KEXEC_CONTROL_PAGE_SIZE 4096
60 /* The native architecture */
61 # define KEXEC_ARCH KEXEC_ARCH_386
63 /* We can also handle crash dumps from 64 bit kernel. */
64 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
66 /* Maximum physical address we can use pages from */
67 # define KEXEC_SOURCE_MEMORY_LIMIT (0xFFFFFFFFFFUL)
68 /* Maximum address we can reach in physical address mode */
69 # define KEXEC_DESTINATION_MEMORY_LIMIT (0xFFFFFFFFFFUL)
70 /* Maximum address we can use for the control pages */
71 # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL)
73 /* Allocate one page for the pdp and the second for the code */
74 # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
76 /* The native architecture */
77 # define KEXEC_ARCH KEXEC_ARCH_X86_64
81 * CPU does not save ss and sp on stack if execution is already
82 * running in kernel mode at the time of NMI occurrence. This code
85 static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
86 struct pt_regs *oldregs)
89 newregs->sp = (unsigned long)&(oldregs->sp);
90 asm volatile("xorl %%eax, %%eax\n\t"
97 * This function is responsible for capturing register states if coming
98 * via panic otherwise just fix up the ss and sp if coming via kernel
101 static inline void crash_setup_regs(struct pt_regs *newregs,
102 struct pt_regs *oldregs)
105 memcpy(newregs, oldregs, sizeof(*newregs));
106 crash_fixup_ss_esp(newregs, oldregs);
109 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
110 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
111 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
112 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
113 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
114 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
115 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
116 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
117 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
118 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
119 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
120 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
121 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
123 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
124 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
125 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
126 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
127 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
128 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
129 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
130 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
131 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
132 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
133 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
134 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
135 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
136 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
137 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
138 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
139 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
140 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
141 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
143 newregs->ip = (unsigned long)current_text_addr();
148 asmlinkage unsigned long
149 relocate_kernel(unsigned long indirection_page,
150 unsigned long control_page,
151 unsigned long start_address,
152 unsigned int has_pae,
153 unsigned int preserve_context);
156 relocate_kernel(unsigned long indirection_page,
157 unsigned long page_list,
158 unsigned long start_address,
159 unsigned int preserve_context);
162 #define ARCH_HAS_KIMAGE_ARCH
167 #ifdef CONFIG_X86_PAE
182 /* Under Xen we need to work with machine addresses. These macros give the
183 * machine address of a certain page to the generic kexec code instead of
184 * the pseudo physical address which would be given by the default macros.
188 #define KEXEC_ARCH_HAS_PAGE_MACROS
189 #define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
190 #define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
191 #define kexec_virt_to_phys(addr) virt_to_machine(addr)
192 #define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
195 #endif /* __ASSEMBLY__ */
197 #endif /* _ASM_X86_KEXEC_H */