2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <generated/utsrelease.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
31 #include <linux/cpu.h>
32 #include <linux/console.h>
33 #include <linux/vmalloc.h>
34 #include <linux/swap.h>
35 #include <linux/syscore_ops.h>
38 #include <asm/uaccess.h>
40 #include <asm/sections.h>
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu *crash_notes;
47 /* vmcoreinfo stuff */
48 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
50 #if defined(CONFIG_XEN) && defined(CONFIG_X86)
53 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
54 size_t vmcoreinfo_size;
55 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
57 /* Location of the reserved area for the crash kernel */
58 struct resource crashk_res = {
59 .name = "Crash kernel",
62 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
65 int kexec_should_crash(struct task_struct *p)
67 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
73 * When kexec transitions to the new kernel there is a one-to-one
74 * mapping between physical and virtual addresses. On processors
75 * where you can disable the MMU this is trivial, and easy. For
76 * others it is still a simple predictable page table to setup.
78 * In that environment kexec copies the new kernel to its final
79 * resting place. This means I can only support memory whose
80 * physical address can fit in an unsigned long. In particular
81 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
82 * If the assembly stub has more restrictive requirements
83 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
84 * defined more restrictively in <asm/kexec.h>.
86 * The code for the transition from the current kernel to the
87 * the new kernel is placed in the control_code_buffer, whose size
88 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
89 * page of memory is necessary, but some architectures require more.
90 * Because this memory must be identity mapped in the transition from
91 * virtual to physical addresses it must live in the range
92 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
95 * The assembly stub in the control code buffer is passed a linked list
96 * of descriptor pages detailing the source pages of the new kernel,
97 * and the destination addresses of those source pages. As this data
98 * structure is not used in the context of the current OS, it must
101 * The code has been made to work with highmem pages and will use a
102 * destination page in its final resting place (if it happens
103 * to allocate it). The end product of this is that most of the
104 * physical address space, and most of RAM can be used.
106 * Future directions include:
107 * - allocating a page table with the control code buffer identity
108 * mapped, to simplify machine_kexec and make kexec_on_panic more
113 * KIMAGE_NO_DEST is an impossible destination address..., for
114 * allocating pages whose destination address we do not care about.
116 #define KIMAGE_NO_DEST (-1UL)
118 static int kimage_is_destination_range(struct kimage *image,
119 unsigned long start, unsigned long end);
120 static struct page *kimage_alloc_page(struct kimage *image,
124 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
125 unsigned long nr_segments,
126 struct kexec_segment __user *segments)
128 size_t segment_bytes;
129 struct kimage *image;
133 /* Allocate a controlling structure */
135 image = kzalloc(sizeof(*image), GFP_KERNEL);
140 image->entry = &image->head;
141 image->last_entry = &image->head;
142 image->control_page = ~0; /* By default this does not apply */
143 image->start = entry;
144 image->type = KEXEC_TYPE_DEFAULT;
146 /* Initialize the list of control pages */
147 INIT_LIST_HEAD(&image->control_pages);
149 /* Initialize the list of destination pages */
150 INIT_LIST_HEAD(&image->dest_pages);
152 /* Initialize the list of unusable pages */
153 INIT_LIST_HEAD(&image->unuseable_pages);
155 /* Read in the segments */
156 image->nr_segments = nr_segments;
157 segment_bytes = nr_segments * sizeof(*segments);
158 result = copy_from_user(image->segment, segments, segment_bytes);
165 * Verify we have good destination addresses. The caller is
166 * responsible for making certain we don't attempt to load
167 * the new image into invalid or reserved areas of RAM. This
168 * just verifies it is an address we can use.
170 * Since the kernel does everything in page size chunks ensure
171 * the destination addresses are page aligned. Too many
172 * special cases crop of when we don't do this. The most
173 * insidious is getting overlapping destination addresses
174 * simply because addresses are changed to page size
177 result = -EADDRNOTAVAIL;
178 for (i = 0; i < nr_segments; i++) {
179 unsigned long mstart, mend;
181 mstart = image->segment[i].mem;
182 mend = mstart + image->segment[i].memsz;
183 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
185 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
189 /* Verify our destination addresses do not overlap.
190 * If we alloed overlapping destination addresses
191 * through very weird things can happen with no
192 * easy explanation as one segment stops on another.
195 for (i = 0; i < nr_segments; i++) {
196 unsigned long mstart, mend;
199 mstart = image->segment[i].mem;
200 mend = mstart + image->segment[i].memsz;
201 for (j = 0; j < i; j++) {
202 unsigned long pstart, pend;
203 pstart = image->segment[j].mem;
204 pend = pstart + image->segment[j].memsz;
205 /* Do the segments overlap ? */
206 if ((mend > pstart) && (mstart < pend))
211 /* Ensure our buffer sizes are strictly less than
212 * our memory sizes. This should always be the case,
213 * and it is easier to check up front than to be surprised
217 for (i = 0; i < nr_segments; i++) {
218 if (image->segment[i].bufsz > image->segment[i].memsz)
233 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
234 unsigned long nr_segments,
235 struct kexec_segment __user *segments)
238 struct kimage *image;
240 /* Allocate and initialize a controlling structure */
242 result = do_kimage_alloc(&image, entry, nr_segments, segments);
249 * Find a location for the control code buffer, and add it
250 * the vector of segments so that it's pages will also be
251 * counted as destination pages.
254 image->control_code_page = kimage_alloc_control_pages(image,
255 get_order(KEXEC_CONTROL_PAGE_SIZE));
256 if (!image->control_code_page) {
257 printk(KERN_ERR "Could not allocate control_code_buffer\n");
261 image->swap_page = kimage_alloc_control_pages(image, 0);
262 if (!image->swap_page) {
263 printk(KERN_ERR "Could not allocate swap buffer\n");
277 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
278 unsigned long nr_segments,
279 struct kexec_segment __user *segments)
282 struct kimage *image;
286 /* Verify we have a valid entry point */
287 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
288 result = -EADDRNOTAVAIL;
292 /* Allocate and initialize a controlling structure */
293 result = do_kimage_alloc(&image, entry, nr_segments, segments);
297 /* Enable the special crash kernel control page
300 image->control_page = crashk_res.start;
301 image->type = KEXEC_TYPE_CRASH;
304 * Verify we have good destination addresses. Normally
305 * the caller is responsible for making certain we don't
306 * attempt to load the new image into invalid or reserved
307 * areas of RAM. But crash kernels are preloaded into a
308 * reserved area of ram. We must ensure the addresses
309 * are in the reserved area otherwise preloading the
310 * kernel could corrupt things.
312 result = -EADDRNOTAVAIL;
313 for (i = 0; i < nr_segments; i++) {
314 unsigned long mstart, mend;
316 mstart = image->segment[i].mem;
317 mend = mstart + image->segment[i].memsz - 1;
318 /* Ensure we are within the crash kernel limits */
319 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
324 * Find a location for the control code buffer, and add
325 * the vector of segments so that it's pages will also be
326 * counted as destination pages.
329 image->control_code_page = kimage_alloc_control_pages(image,
330 get_order(KEXEC_CONTROL_PAGE_SIZE));
331 if (!image->control_code_page) {
332 printk(KERN_ERR "Could not allocate control_code_buffer\n");
346 static int kimage_is_destination_range(struct kimage *image,
352 for (i = 0; i < image->nr_segments; i++) {
353 unsigned long mstart, mend;
355 mstart = image->segment[i].mem;
356 mend = mstart + image->segment[i].memsz;
357 if ((end > mstart) && (start < mend))
364 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
368 pages = alloc_pages(gfp_mask, order);
370 unsigned int count, i;
375 address_bits = BITS_PER_LONG;
377 address_bits = ilog2(limit);
379 if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
380 __free_pages(pages, order);
384 pages->mapping = NULL;
385 set_page_private(pages, order);
387 for (i = 0; i < count; i++)
388 SetPageReserved(pages + i);
394 static void kimage_free_pages(struct page *page)
396 unsigned int order, count, i;
398 order = page_private(page);
400 for (i = 0; i < count; i++)
401 ClearPageReserved(page + i);
402 __free_pages(page, order);
405 static void kimage_free_page_list(struct list_head *list)
407 struct list_head *pos, *next;
409 list_for_each_safe(pos, next, list) {
412 page = list_entry(pos, struct page, lru);
413 list_del(&page->lru);
414 kimage_free_pages(page);
418 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
421 /* Control pages are special, they are the intermediaries
422 * that are needed while we copy the rest of the pages
423 * to their final resting place. As such they must
424 * not conflict with either the destination addresses
425 * or memory the kernel is already using.
427 * The only case where we really need more than one of
428 * these are for architectures where we cannot disable
429 * the MMU and must instead generate an identity mapped
430 * page table for all of the memory.
432 * At worst this runs in O(N) of the image size.
434 struct list_head extra_pages;
439 INIT_LIST_HEAD(&extra_pages);
441 /* Loop while I can allocate a page and the page allocated
442 * is a destination page.
445 unsigned long pfn, epfn, addr, eaddr;
447 pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
450 pfn = kexec_page_to_pfn(pages);
452 addr = pfn << PAGE_SHIFT;
453 eaddr = epfn << PAGE_SHIFT;
454 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
455 kimage_is_destination_range(image, addr, eaddr)) {
456 list_add(&pages->lru, &extra_pages);
462 /* Remember the allocated page... */
463 list_add(&pages->lru, &image->control_pages);
465 /* Because the page is already in it's destination
466 * location we will never allocate another page at
467 * that address. Therefore kimage_alloc_pages
468 * will not return it (again) and we don't need
469 * to give it an entry in image->segment[].
472 /* Deal with the destination pages I have inadvertently allocated.
474 * Ideally I would convert multi-page allocations into single
475 * page allocations, and add everything to image->dest_pages.
477 * For now it is simpler to just free the pages.
479 kimage_free_page_list(&extra_pages);
485 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
488 /* Control pages are special, they are the intermediaries
489 * that are needed while we copy the rest of the pages
490 * to their final resting place. As such they must
491 * not conflict with either the destination addresses
492 * or memory the kernel is already using.
494 * Control pages are also the only pags we must allocate
495 * when loading a crash kernel. All of the other pages
496 * are specified by the segments and we just memcpy
497 * into them directly.
499 * The only case where we really need more than one of
500 * these are for architectures where we cannot disable
501 * the MMU and must instead generate an identity mapped
502 * page table for all of the memory.
504 * Given the low demand this implements a very simple
505 * allocator that finds the first hole of the appropriate
506 * size in the reserved memory region, and allocates all
507 * of the memory up to and including the hole.
509 unsigned long hole_start, hole_end, size;
513 size = (1 << order) << PAGE_SHIFT;
514 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
515 hole_end = hole_start + size - 1;
516 while (hole_end <= crashk_res.end) {
519 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
521 if (hole_end > crashk_res.end)
523 /* See if I overlap any of the segments */
524 for (i = 0; i < image->nr_segments; i++) {
525 unsigned long mstart, mend;
527 mstart = image->segment[i].mem;
528 mend = mstart + image->segment[i].memsz - 1;
529 if ((hole_end >= mstart) && (hole_start <= mend)) {
530 /* Advance the hole to the end of the segment */
531 hole_start = (mend + (size - 1)) & ~(size - 1);
532 hole_end = hole_start + size - 1;
536 /* If I don't overlap any segments I have found my hole! */
537 if (i == image->nr_segments) {
538 pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
543 image->control_page = hole_end;
549 struct page *kimage_alloc_control_pages(struct kimage *image,
552 struct page *pages = NULL;
554 switch (image->type) {
555 case KEXEC_TYPE_DEFAULT:
556 pages = kimage_alloc_normal_control_pages(image, order);
558 case KEXEC_TYPE_CRASH:
559 pages = kimage_alloc_crash_control_pages(image, order);
565 #else /* !CONFIG_XEN */
566 struct page *kimage_alloc_control_pages(struct kimage *image,
569 return kimage_alloc_normal_control_pages(image, order);
573 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
575 if (*image->entry != 0)
578 if (image->entry == image->last_entry) {
579 kimage_entry_t *ind_page;
582 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
586 ind_page = page_address(page);
587 *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
588 image->entry = ind_page;
589 image->last_entry = ind_page +
590 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
592 *image->entry = entry;
599 static int kimage_set_destination(struct kimage *image,
600 unsigned long destination)
604 destination &= PAGE_MASK;
605 result = kimage_add_entry(image, destination | IND_DESTINATION);
607 image->destination = destination;
613 static int kimage_add_page(struct kimage *image, unsigned long page)
618 result = kimage_add_entry(image, page | IND_SOURCE);
620 image->destination += PAGE_SIZE;
626 static void kimage_free_extra_pages(struct kimage *image)
628 /* Walk through and free any extra destination pages I may have */
629 kimage_free_page_list(&image->dest_pages);
631 /* Walk through and free any unusable pages I have cached */
632 kimage_free_page_list(&image->unuseable_pages);
635 static void kimage_terminate(struct kimage *image)
637 if (*image->entry != 0)
640 *image->entry = IND_DONE;
643 #define for_each_kimage_entry(image, ptr, entry) \
644 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
645 ptr = (entry & IND_INDIRECTION)? \
646 kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
648 static void kimage_free_entry(kimage_entry_t entry)
652 page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
653 kimage_free_pages(page);
656 static void kimage_free(struct kimage *image)
658 kimage_entry_t *ptr, entry;
659 kimage_entry_t ind = 0;
665 xen_machine_kexec_unload(image);
668 kimage_free_extra_pages(image);
669 for_each_kimage_entry(image, ptr, entry) {
670 if (entry & IND_INDIRECTION) {
671 /* Free the previous indirection page */
672 if (ind & IND_INDIRECTION)
673 kimage_free_entry(ind);
674 /* Save this indirection page until we are
679 else if (entry & IND_SOURCE)
680 kimage_free_entry(entry);
682 /* Free the final indirection page */
683 if (ind & IND_INDIRECTION)
684 kimage_free_entry(ind);
686 /* Handle any machine specific cleanup */
687 machine_kexec_cleanup(image);
689 /* Free the kexec control pages... */
690 kimage_free_page_list(&image->control_pages);
694 static kimage_entry_t *kimage_dst_used(struct kimage *image,
697 kimage_entry_t *ptr, entry;
698 unsigned long destination = 0;
700 for_each_kimage_entry(image, ptr, entry) {
701 if (entry & IND_DESTINATION)
702 destination = entry & PAGE_MASK;
703 else if (entry & IND_SOURCE) {
704 if (page == destination)
706 destination += PAGE_SIZE;
713 static struct page *kimage_alloc_page(struct kimage *image,
715 unsigned long destination)
718 * Here we implement safeguards to ensure that a source page
719 * is not copied to its destination page before the data on
720 * the destination page is no longer useful.
722 * To do this we maintain the invariant that a source page is
723 * either its own destination page, or it is not a
724 * destination page at all.
726 * That is slightly stronger than required, but the proof
727 * that no problems will not occur is trivial, and the
728 * implementation is simply to verify.
730 * When allocating all pages normally this algorithm will run
731 * in O(N) time, but in the worst case it will run in O(N^2)
732 * time. If the runtime is a problem the data structures can
739 * Walk through the list of destination pages, and see if I
742 list_for_each_entry(page, &image->dest_pages, lru) {
743 addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
744 if (addr == destination) {
745 list_del(&page->lru);
753 /* Allocate a page, if we run out of memory give up */
754 page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
757 /* If the page cannot be used file it away */
758 if (kexec_page_to_pfn(page) >
759 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
760 list_add(&page->lru, &image->unuseable_pages);
763 addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
765 /* If it is the destination page we want use it */
766 if (addr == destination)
769 /* If the page is not a destination page use it */
770 if (!kimage_is_destination_range(image, addr,
775 * I know that the page is someones destination page.
776 * See if there is already a source page for this
777 * destination page. And if so swap the source pages.
779 old = kimage_dst_used(image, addr);
782 unsigned long old_addr;
783 struct page *old_page;
785 old_addr = *old & PAGE_MASK;
786 old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
787 copy_highpage(page, old_page);
788 *old = addr | (*old & ~PAGE_MASK);
790 /* The old page I have found cannot be a
791 * destination page, so return it if it's
792 * gfp_flags honor the ones passed in.
794 if (!(gfp_mask & __GFP_HIGHMEM) &&
795 PageHighMem(old_page)) {
796 kimage_free_pages(old_page);
804 /* Place the page on the destination list I
807 list_add(&page->lru, &image->dest_pages);
814 static int kimage_load_normal_segment(struct kimage *image,
815 struct kexec_segment *segment)
818 unsigned long ubytes, mbytes;
820 unsigned char __user *buf;
824 ubytes = segment->bufsz;
825 mbytes = segment->memsz;
826 maddr = segment->mem;
828 result = kimage_set_destination(image, maddr);
835 size_t uchunk, mchunk;
837 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
842 result = kimage_add_page(image, kexec_page_to_pfn(page)
848 /* Start with a clear page */
850 ptr += maddr & ~PAGE_MASK;
851 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
859 result = copy_from_user(ptr, buf, uchunk);
875 static int kimage_load_crash_segment(struct kimage *image,
876 struct kexec_segment *segment)
878 /* For crash dumps kernels we simply copy the data from
879 * user space to it's destination.
880 * We do things a page at a time for the sake of kmap.
883 unsigned long ubytes, mbytes;
885 unsigned char __user *buf;
889 ubytes = segment->bufsz;
890 mbytes = segment->memsz;
891 maddr = segment->mem;
895 size_t uchunk, mchunk;
897 page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
903 ptr += maddr & ~PAGE_MASK;
904 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
909 if (uchunk > ubytes) {
911 /* Zero the trailing part of the page */
912 memset(ptr + uchunk, 0, mchunk - uchunk);
914 result = copy_from_user(ptr, buf, uchunk);
915 kexec_flush_icache_page(page);
930 static int kimage_load_segment(struct kimage *image,
931 struct kexec_segment *segment)
933 int result = -ENOMEM;
935 switch (image->type) {
936 case KEXEC_TYPE_DEFAULT:
937 result = kimage_load_normal_segment(image, segment);
939 case KEXEC_TYPE_CRASH:
940 result = kimage_load_crash_segment(image, segment);
946 #else /* CONFIG_XEN */
947 static int kimage_load_segment(struct kimage *image,
948 struct kexec_segment *segment)
950 return kimage_load_normal_segment(image, segment);
955 * Exec Kernel system call: for obvious reasons only root may call it.
957 * This call breaks up into three pieces.
958 * - A generic part which loads the new kernel from the current
959 * address space, and very carefully places the data in the
962 * - A generic part that interacts with the kernel and tells all of
963 * the devices to shut down. Preventing on-going dmas, and placing
964 * the devices in a consistent state so a later kernel can
967 * - A machine specific part that includes the syscall number
968 * and the copies the image to it's final destination. And
969 * jumps into the image at entry.
971 * kexec does not sync, or unmount filesystems so if you need
972 * that to happen you need to do that yourself.
974 struct kimage *kexec_image;
975 struct kimage *kexec_crash_image;
977 static DEFINE_MUTEX(kexec_mutex);
979 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
980 struct kexec_segment __user *, segments, unsigned long, flags)
982 struct kimage **dest_image, *image;
985 /* We only trust the superuser with rebooting the system. */
986 if (!capable(CAP_SYS_BOOT))
990 * Verify we have a legal set of flags
991 * This leaves us room for future extensions.
993 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
996 /* Verify we are on the appropriate architecture */
997 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
998 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
1001 /* Put an artificial cap on the number
1002 * of segments passed to kexec_load.
1004 if (nr_segments > KEXEC_SEGMENT_MAX)
1010 /* Because we write directly to the reserved memory
1011 * region when loading crash kernels we need a mutex here to
1012 * prevent multiple crash kernels from attempting to load
1013 * simultaneously, and to prevent a crash kernel from loading
1014 * over the top of a in use crash kernel.
1016 * KISS: always take the mutex.
1018 if (!mutex_trylock(&kexec_mutex))
1021 dest_image = &kexec_image;
1022 if (flags & KEXEC_ON_CRASH)
1023 dest_image = &kexec_crash_image;
1024 if (nr_segments > 0) {
1027 /* Loading another kernel to reboot into */
1028 if ((flags & KEXEC_ON_CRASH) == 0)
1029 result = kimage_normal_alloc(&image, entry,
1030 nr_segments, segments);
1031 /* Loading another kernel to switch to if this one crashes */
1032 else if (flags & KEXEC_ON_CRASH) {
1033 /* Free any current crash dump kernel before
1036 kimage_free(xchg(&kexec_crash_image, NULL));
1037 result = kimage_crash_alloc(&image, entry,
1038 nr_segments, segments);
1039 crash_map_reserved_pages();
1044 if (flags & KEXEC_PRESERVE_CONTEXT)
1045 image->preserve_context = 1;
1046 result = machine_kexec_prepare(image);
1050 for (i = 0; i < nr_segments; i++) {
1051 result = kimage_load_segment(image, &image->segment[i]);
1055 kimage_terminate(image);
1056 if (flags & KEXEC_ON_CRASH)
1057 crash_unmap_reserved_pages();
1061 result = xen_machine_kexec_load(image);
1066 /* Install the new kernel, and Uninstall the old */
1067 image = xchg(dest_image, image);
1070 mutex_unlock(&kexec_mutex);
1077 * Add and remove page tables for crashkernel memory
1079 * Provide an empty default implementation here -- architecture
1080 * code may override this
1082 void __weak crash_map_reserved_pages(void)
1085 void __weak crash_unmap_reserved_pages(void)
1088 #ifdef CONFIG_COMPAT
1089 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1090 unsigned long nr_segments,
1091 struct compat_kexec_segment __user *segments,
1092 unsigned long flags)
1094 struct compat_kexec_segment in;
1095 struct kexec_segment out, __user *ksegments;
1096 unsigned long i, result;
1098 /* Don't allow clients that don't understand the native
1099 * architecture to do anything.
1101 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1104 if (nr_segments > KEXEC_SEGMENT_MAX)
1107 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1108 for (i=0; i < nr_segments; i++) {
1109 result = copy_from_user(&in, &segments[i], sizeof(in));
1113 out.buf = compat_ptr(in.buf);
1114 out.bufsz = in.bufsz;
1116 out.memsz = in.memsz;
1118 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1123 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1127 void crash_kexec(struct pt_regs *regs)
1129 /* Take the kexec_mutex here to prevent sys_kexec_load
1130 * running on one cpu from replacing the crash kernel
1131 * we are using after a panic on a different cpu.
1133 * If the crash kernel was not located in a fixed area
1134 * of memory the xchg(&kexec_crash_image) would be
1135 * sufficient. But since I reuse the memory...
1137 if (mutex_trylock(&kexec_mutex)) {
1138 if (kexec_crash_image) {
1139 struct pt_regs fixed_regs;
1141 crash_setup_regs(&fixed_regs, regs);
1142 crash_save_vmcoreinfo();
1143 machine_crash_shutdown(&fixed_regs);
1144 machine_kexec(kexec_crash_image);
1146 mutex_unlock(&kexec_mutex);
1150 size_t crash_get_memory_size(void)
1153 mutex_lock(&kexec_mutex);
1154 if (crashk_res.end != crashk_res.start)
1155 size = resource_size(&crashk_res);
1156 mutex_unlock(&kexec_mutex);
1161 void __weak crash_free_reserved_phys_range(unsigned long begin,
1166 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1167 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1168 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1169 free_page((unsigned long)__va(addr));
1174 int crash_shrink_memory(unsigned long new_size)
1177 unsigned long start, end;
1178 unsigned long old_size;
1179 struct resource *ram_res;
1181 mutex_lock(&kexec_mutex);
1183 if (kexec_crash_image) {
1187 start = crashk_res.start;
1188 end = crashk_res.end;
1189 old_size = (end == 0) ? 0 : end - start + 1;
1190 if (new_size >= old_size) {
1191 ret = (new_size == old_size) ? 0 : -EINVAL;
1195 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1201 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1202 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1204 crash_map_reserved_pages();
1205 crash_free_reserved_phys_range(end, crashk_res.end);
1207 if ((start == end) && (crashk_res.parent != NULL))
1208 release_resource(&crashk_res);
1210 ram_res->start = end;
1211 ram_res->end = crashk_res.end;
1212 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1213 ram_res->name = "System RAM";
1215 crashk_res.end = end - 1;
1217 insert_resource(&iomem_resource, ram_res);
1218 crash_unmap_reserved_pages();
1221 mutex_unlock(&kexec_mutex);
1224 #endif /* !CONFIG_XEN */
1226 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1229 struct elf_note note;
1231 note.n_namesz = strlen(name) + 1;
1232 note.n_descsz = data_len;
1234 memcpy(buf, ¬e, sizeof(note));
1235 buf += (sizeof(note) + 3)/4;
1236 memcpy(buf, name, note.n_namesz);
1237 buf += (note.n_namesz + 3)/4;
1238 memcpy(buf, data, note.n_descsz);
1239 buf += (note.n_descsz + 3)/4;
1244 static void final_note(u32 *buf)
1246 struct elf_note note;
1251 memcpy(buf, ¬e, sizeof(note));
1255 void crash_save_cpu(struct pt_regs *regs, int cpu)
1257 struct elf_prstatus prstatus;
1260 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1263 /* Using ELF notes here is opportunistic.
1264 * I need a well defined structure format
1265 * for the data I pass, and I need tags
1266 * on the data to indicate what information I have
1267 * squirrelled away. ELF notes happen to provide
1268 * all of that, so there is no need to invent something new.
1270 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1273 memset(&prstatus, 0, sizeof(prstatus));
1274 prstatus.pr_pid = current->pid;
1275 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1276 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1277 &prstatus, sizeof(prstatus));
1282 static int __init crash_notes_memory_init(void)
1285 /* Allocate memory for saving cpu registers. */
1286 crash_notes = alloc_percpu(note_buf_t);
1288 printk("Kexec: Memory allocation for saving cpu register"
1289 " states failed\n");
1295 module_init(crash_notes_memory_init)
1300 * parsing the "crashkernel" commandline
1302 * this code is intended to be called from architecture specific code
1307 * This function parses command lines in the format
1309 * crashkernel=ramsize-range:size[,...][@offset]
1311 * The function returns 0 on success and -EINVAL on failure.
1313 static int __init parse_crashkernel_mem(char *cmdline,
1314 unsigned long long system_ram,
1315 unsigned long long *crash_size,
1316 unsigned long long *crash_base)
1318 char *cur = cmdline, *tmp;
1320 /* for each entry of the comma-separated list */
1322 unsigned long long start, end = ULLONG_MAX, size;
1324 /* get the start of the range */
1325 start = memparse(cur, &tmp);
1327 pr_warning("crashkernel: Memory value expected\n");
1332 pr_warning("crashkernel: '-' expected\n");
1337 /* if no ':' is here, than we read the end */
1339 end = memparse(cur, &tmp);
1341 pr_warning("crashkernel: Memory "
1342 "value expected\n");
1347 pr_warning("crashkernel: end <= start\n");
1353 pr_warning("crashkernel: ':' expected\n");
1358 size = memparse(cur, &tmp);
1360 pr_warning("Memory value expected\n");
1364 if (size >= system_ram) {
1365 pr_warning("crashkernel: invalid size\n");
1370 if (system_ram >= start && system_ram < end) {
1374 } while (*cur++ == ',');
1376 if (*crash_size > 0) {
1377 while (*cur && *cur != ' ' && *cur != '@')
1381 *crash_base = memparse(cur, &tmp);
1383 pr_warning("Memory value expected "
1394 * That function parses "simple" (old) crashkernel command lines like
1396 * crashkernel=size[@offset]
1398 * It returns 0 on success and -EINVAL on failure.
1400 static int __init parse_crashkernel_simple(char *cmdline,
1401 unsigned long long *crash_size,
1402 unsigned long long *crash_base)
1404 char *cur = cmdline;
1406 *crash_size = memparse(cmdline, &cur);
1407 if (cmdline == cur) {
1408 pr_warning("crashkernel: memory value expected\n");
1413 *crash_base = memparse(cur+1, &cur);
1414 else if (*cur != ' ' && *cur != '\0') {
1415 pr_warning("crashkernel: unrecognized char\n");
1423 * That function is the entry point for command line parsing and should be
1424 * called from the arch-specific code.
1426 int __init parse_crashkernel(char *cmdline,
1427 unsigned long long system_ram,
1428 unsigned long long *crash_size,
1429 unsigned long long *crash_base)
1431 char *p = cmdline, *ck_cmdline = NULL;
1432 char *first_colon, *first_space;
1434 BUG_ON(!crash_size || !crash_base);
1438 /* find crashkernel and use the last one if there are more */
1439 p = strstr(p, "crashkernel=");
1442 p = strstr(p+1, "crashkernel=");
1448 ck_cmdline += 12; /* strlen("crashkernel=") */
1451 * if the commandline contains a ':', then that's the extended
1452 * syntax -- if not, it must be the classic syntax
1454 first_colon = strchr(ck_cmdline, ':');
1455 first_space = strchr(ck_cmdline, ' ');
1456 if (first_colon && (!first_space || first_colon < first_space))
1457 return parse_crashkernel_mem(ck_cmdline, system_ram,
1458 crash_size, crash_base);
1460 return parse_crashkernel_simple(ck_cmdline, crash_size,
1467 static void update_vmcoreinfo_note(void)
1469 u32 *buf = vmcoreinfo_note;
1471 if (!vmcoreinfo_size)
1473 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1478 void crash_save_vmcoreinfo(void)
1480 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1481 update_vmcoreinfo_note();
1484 void vmcoreinfo_append_str(const char *fmt, ...)
1490 va_start(args, fmt);
1491 r = vsnprintf(buf, sizeof(buf), fmt, args);
1494 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1495 r = vmcoreinfo_max_size - vmcoreinfo_size;
1497 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1499 vmcoreinfo_size += r;
1503 * provide an empty default implementation here -- architecture
1504 * code may override this
1506 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1509 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1511 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1514 static int __init crash_save_vmcoreinfo_init(void)
1516 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1517 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1519 VMCOREINFO_SYMBOL(init_uts_ns);
1520 VMCOREINFO_SYMBOL(node_online_map);
1522 # ifndef CONFIG_X86_XEN
1523 VMCOREINFO_SYMBOL(swapper_pg_dir);
1526 * Since for x86-32 Xen swapper_pg_dir is a pointer rather than an array,
1527 * make the value stored consistent with native (i.e. the base address of
1528 * the page directory).
1530 # define swapper_pg_dir *swapper_pg_dir
1531 VMCOREINFO_SYMBOL(swapper_pg_dir);
1532 # undef swapper_pg_dir
1535 VMCOREINFO_SYMBOL(_stext);
1536 VMCOREINFO_SYMBOL(vmlist);
1538 #ifndef CONFIG_NEED_MULTIPLE_NODES
1539 VMCOREINFO_SYMBOL(mem_map);
1540 VMCOREINFO_SYMBOL(contig_page_data);
1542 #ifdef CONFIG_SPARSEMEM
1543 VMCOREINFO_SYMBOL(mem_section);
1544 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1545 VMCOREINFO_STRUCT_SIZE(mem_section);
1546 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1548 VMCOREINFO_STRUCT_SIZE(page);
1549 VMCOREINFO_STRUCT_SIZE(pglist_data);
1550 VMCOREINFO_STRUCT_SIZE(zone);
1551 VMCOREINFO_STRUCT_SIZE(free_area);
1552 VMCOREINFO_STRUCT_SIZE(list_head);
1553 VMCOREINFO_SIZE(nodemask_t);
1554 VMCOREINFO_OFFSET(page, flags);
1555 VMCOREINFO_OFFSET(page, _count);
1556 VMCOREINFO_OFFSET(page, mapping);
1557 VMCOREINFO_OFFSET(page, lru);
1558 VMCOREINFO_OFFSET(pglist_data, node_zones);
1559 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1560 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1561 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1563 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1564 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1565 VMCOREINFO_OFFSET(pglist_data, node_id);
1566 VMCOREINFO_OFFSET(zone, free_area);
1567 VMCOREINFO_OFFSET(zone, vm_stat);
1568 VMCOREINFO_OFFSET(zone, spanned_pages);
1569 VMCOREINFO_OFFSET(free_area, free_list);
1570 VMCOREINFO_OFFSET(list_head, next);
1571 VMCOREINFO_OFFSET(list_head, prev);
1572 VMCOREINFO_OFFSET(vm_struct, addr);
1573 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1574 log_buf_kexec_setup();
1575 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1576 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1577 VMCOREINFO_NUMBER(PG_lru);
1578 VMCOREINFO_NUMBER(PG_private);
1579 VMCOREINFO_NUMBER(PG_swapcache);
1581 arch_crash_save_vmcoreinfo();
1582 update_vmcoreinfo_note();
1587 module_init(crash_save_vmcoreinfo_init)
1590 * Move into place and start executing a preloaded standalone
1591 * executable. If nothing was preloaded return an error.
1593 int kernel_kexec(void)
1597 if (!mutex_trylock(&kexec_mutex))
1604 #ifdef CONFIG_KEXEC_JUMP
1605 if (kexec_image->preserve_context) {
1606 lock_system_sleep();
1607 pm_prepare_console();
1608 error = freeze_processes();
1611 goto Restore_console;
1614 error = dpm_suspend_start(PMSG_FREEZE);
1616 goto Resume_console;
1617 /* At this point, dpm_suspend_start() has been called,
1618 * but *not* dpm_suspend_end(). We *must* call
1619 * dpm_suspend_end() now. Otherwise, drivers for
1620 * some devices (e.g. interrupt controllers) become
1621 * desynchronized with the actual state of the
1622 * hardware at resume time, and evil weirdness ensues.
1624 error = dpm_suspend_end(PMSG_FREEZE);
1626 goto Resume_devices;
1627 error = disable_nonboot_cpus();
1630 local_irq_disable();
1631 error = syscore_suspend();
1637 kernel_restart_prepare(NULL);
1638 printk(KERN_EMERG "Starting new kernel\n");
1642 machine_kexec(kexec_image);
1644 #ifdef CONFIG_KEXEC_JUMP
1645 if (kexec_image->preserve_context) {
1650 enable_nonboot_cpus();
1651 dpm_resume_start(PMSG_RESTORE);
1653 dpm_resume_end(PMSG_RESTORE);
1658 pm_restore_console();
1659 unlock_system_sleep();
1664 mutex_unlock(&kexec_mutex);