Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / kernel / kexec.c
index 8a6d7b0..dc1c426 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/hardirq.h>
 #include <linux/elf.h>
 #include <linux/elfcore.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
 #include <linux/utsname.h>
 #include <linux/numa.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
 #include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
-#include <asm/system.h>
 #include <asm/sections.h>
 
+#ifndef CONFIG_XEN
 /* Per cpu memory for storing cpu states in case of system crash. */
-note_buf_t* crash_notes;
+note_buf_t __percpu *crash_notes;
+#endif
 
 /* vmcoreinfo stuff */
-unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
-u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32
+#if defined(CONFIG_XEN) && defined(CONFIG_X86)
+__page_aligned_bss
+#endif
+vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
 size_t vmcoreinfo_size;
 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
 
@@ -142,15 +149,17 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
        /* Initialize the list of destination pages */
        INIT_LIST_HEAD(&image->dest_pages);
 
-       /* Initialize the list of unuseable pages */
+       /* Initialize the list of unusable pages */
        INIT_LIST_HEAD(&image->unuseable_pages);
 
        /* Read in the segments */
        image->nr_segments = nr_segments;
        segment_bytes = nr_segments * sizeof(*segments);
        result = copy_from_user(image->segment, segments, segment_bytes);
-       if (result)
+       if (result) {
+               result = -EFAULT;
                goto out;
+       }
 
        /*
         * Verify we have good destination addresses.  The caller is
@@ -159,7 +168,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
         * just verifies it is an address we can use.
         *
         * Since the kernel does everything in page size chunks ensure
-        * the destination addreses are page aligned.  Too many
+        * the destination addresses are page aligned.  Too many
         * special cases crop of when we don't do this.  The most
         * insidious is getting overlapping destination addresses
         * simply because addresses are changed to page size
@@ -352,13 +361,26 @@ static int kimage_is_destination_range(struct kimage *image,
        return 0;
 }
 
-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
 {
        struct page *pages;
 
        pages = alloc_pages(gfp_mask, order);
        if (pages) {
                unsigned int count, i;
+#ifdef CONFIG_XEN
+               int address_bits;
+
+               if (limit == ~0UL)
+                       address_bits = BITS_PER_LONG;
+               else
+                       address_bits = ilog2(limit);
+
+               if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
+                       __free_pages(pages, order);
+                       return NULL;
+               }
+#endif
                pages->mapping = NULL;
                set_page_private(pages, order);
                count = 1 << order;
@@ -422,10 +444,10 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        do {
                unsigned long pfn, epfn, addr, eaddr;
 
-               pages = kimage_alloc_pages(GFP_KERNEL, order);
+               pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
                if (!pages)
                        break;
-               pfn   = page_to_pfn(pages);
+               pfn   = kexec_page_to_pfn(pages);
                epfn  = pfn + count;
                addr  = pfn << PAGE_SHIFT;
                eaddr = epfn << PAGE_SHIFT;
@@ -450,7 +472,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        /* Deal with the destination pages I have inadvertently allocated.
         *
         * Ideally I would convert multi-page allocations into single
-        * page allocations, and add everyting to image->dest_pages.
+        * page allocations, and add everything to image->dest_pages.
         *
         * For now it is simpler to just free the pages.
         */
@@ -459,6 +481,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        return pages;
 }
 
+#ifndef CONFIG_XEN
 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
                                                      unsigned int order)
 {
@@ -493,7 +516,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
        while (hole_end <= crashk_res.end) {
                unsigned long i;
 
-               if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
+               if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
                        break;
                if (hole_end > crashk_res.end)
                        break;
@@ -512,7 +535,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
                }
                /* If I don't overlap any segments I have found my hole! */
                if (i == image->nr_segments) {
-                       pages = pfn_to_page(hole_start >> PAGE_SHIFT);
+                       pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
                        break;
                }
        }
@@ -539,6 +562,13 @@ struct page *kimage_alloc_control_pages(struct kimage *image,
 
        return pages;
 }
+#else /* !CONFIG_XEN */
+struct page *kimage_alloc_control_pages(struct kimage *image,
+                                        unsigned int order)
+{
+       return kimage_alloc_normal_control_pages(image, order);
+}
+#endif
 
 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 {
@@ -554,7 +584,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
                        return -ENOMEM;
 
                ind_page = page_address(page);
-               *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
+               *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
                image->entry = ind_page;
                image->last_entry = ind_page +
                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -598,7 +628,7 @@ static void kimage_free_extra_pages(struct kimage *image)
        /* Walk through and free any extra destination pages I may have */
        kimage_free_page_list(&image->dest_pages);
 
-       /* Walk through and free any unuseable pages I have cached */
+       /* Walk through and free any unusable pages I have cached */
        kimage_free_page_list(&image->unuseable_pages);
 
 }
@@ -613,13 +643,13 @@ static void kimage_terminate(struct kimage *image)
 #define for_each_kimage_entry(image, ptr, entry) \
        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
                ptr = (entry & IND_INDIRECTION)? \
-                       phys_to_virt((entry & PAGE_MASK)): ptr +1)
+                       kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
 
 static void kimage_free_entry(kimage_entry_t entry)
 {
        struct page *page;
 
-       page = pfn_to_page(entry >> PAGE_SHIFT);
+       page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
        kimage_free_pages(page);
 }
 
@@ -631,6 +661,10 @@ static void kimage_free(struct kimage *image)
        if (!image)
                return;
 
+#ifdef CONFIG_XEN
+       xen_machine_kexec_unload(image);
+#endif
+
        kimage_free_extra_pages(image);
        for_each_kimage_entry(image, ptr, entry) {
                if (entry & IND_INDIRECTION) {
@@ -706,7 +740,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
         * have a match.
         */
        list_for_each_entry(page, &image->dest_pages, lru) {
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
                if (addr == destination) {
                        list_del(&page->lru);
                        return page;
@@ -717,16 +751,16 @@ static struct page *kimage_alloc_page(struct kimage *image,
                kimage_entry_t *old;
 
                /* Allocate a page, if we run out of memory give up */
-               page = kimage_alloc_pages(gfp_mask, 0);
+               page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
                if (!page)
                        return NULL;
                /* If the page cannot be used file it away */
-               if (page_to_pfn(page) >
+               if (kexec_page_to_pfn(page) >
                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
                        list_add(&page->lru, &image->unuseable_pages);
                        continue;
                }
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
 
                /* If it is the destination page we want use it */
                if (addr == destination)
@@ -749,7 +783,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
                        struct page *old_page;
 
                        old_addr = *old & PAGE_MASK;
-                       old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
+                       old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
                        copy_highpage(page, old_page);
                        *old = addr | (*old & ~PAGE_MASK);
 
@@ -805,14 +839,14 @@ static int kimage_load_normal_segment(struct kimage *image,
                        result  = -ENOMEM;
                        goto out;
                }
-               result = kimage_add_page(image, page_to_pfn(page)
+               result = kimage_add_page(image, kexec_page_to_pfn(page)
                                                                << PAGE_SHIFT);
                if (result < 0)
                        goto out;
 
                ptr = kmap(page);
                /* Start with a clear page */
-               memset(ptr, 0, PAGE_SIZE);
+               clear_page(ptr);
                ptr += maddr & ~PAGE_MASK;
                mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
                if (mchunk > mbytes)
@@ -825,7 +859,7 @@ static int kimage_load_normal_segment(struct kimage *image,
                result = copy_from_user(ptr, buf, uchunk);
                kunmap(page);
                if (result) {
-                       result = (result < 0) ? result : -EIO;
+                       result = -EFAULT;
                        goto out;
                }
                ubytes -= uchunk;
@@ -837,6 +871,7 @@ out:
        return result;
 }
 
+#ifndef CONFIG_XEN
 static int kimage_load_crash_segment(struct kimage *image,
                                        struct kexec_segment *segment)
 {
@@ -859,7 +894,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                char *ptr;
                size_t uchunk, mchunk;
 
-               page = pfn_to_page(maddr >> PAGE_SHIFT);
+               page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
                if (!page) {
                        result  = -ENOMEM;
                        goto out;
@@ -880,7 +915,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                kexec_flush_icache_page(page);
                kunmap(page);
                if (result) {
-                       result = (result < 0) ? result : -EIO;
+                       result = -EFAULT;
                        goto out;
                }
                ubytes -= uchunk;
@@ -908,6 +943,13 @@ static int kimage_load_segment(struct kimage *image,
 
        return result;
 }
+#else /* CONFIG_XEN */
+static int kimage_load_segment(struct kimage *image,
+                               struct kexec_segment *segment)
+{
+       return kimage_load_normal_segment(image, segment);
+}
+#endif
 
 /*
  * Exec Kernel system call: for obvious reasons only root may call it.
@@ -994,6 +1036,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
                        kimage_free(xchg(&kexec_crash_image, NULL));
                        result = kimage_crash_alloc(&image, entry,
                                                     nr_segments, segments);
+                       crash_map_reserved_pages();
                }
                if (result)
                        goto out;
@@ -1010,7 +1053,16 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
                                goto out;
                }
                kimage_terminate(image);
+               if (flags & KEXEC_ON_CRASH)
+                       crash_unmap_reserved_pages();
+       }
+#ifdef CONFIG_XEN
+       if (image) {
+               result = xen_machine_kexec_load(image);
+               if (result)
+                       goto out;
        }
+#endif
        /* Install the new kernel, and  Uninstall the old */
        image = xchg(dest_image, image);
 
@@ -1021,6 +1073,18 @@ out:
        return result;
 }
 
+/*
+ * Add and remove page tables for crashkernel memory
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __weak crash_map_reserved_pages(void)
+{}
+
+void __weak crash_unmap_reserved_pages(void)
+{}
+
 #ifdef CONFIG_COMPAT
 asmlinkage long compat_sys_kexec_load(unsigned long entry,
                                unsigned long nr_segments,
@@ -1073,6 +1137,7 @@ void crash_kexec(struct pt_regs *regs)
        if (mutex_trylock(&kexec_mutex)) {
                if (kexec_crash_image) {
                        struct pt_regs fixed_regs;
+
                        crash_setup_regs(&fixed_regs, regs);
                        crash_save_vmcoreinfo();
                        machine_crash_shutdown(&fixed_regs);
@@ -1082,6 +1147,82 @@ void crash_kexec(struct pt_regs *regs)
        }
 }
 
+size_t crash_get_memory_size(void)
+{
+       size_t size = 0;
+       mutex_lock(&kexec_mutex);
+       if (crashk_res.end != crashk_res.start)
+               size = resource_size(&crashk_res);
+       mutex_unlock(&kexec_mutex);
+       return size;
+}
+
+#ifndef CONFIG_XEN
+void __weak crash_free_reserved_phys_range(unsigned long begin,
+                                          unsigned long end)
+{
+       unsigned long addr;
+
+       for (addr = begin; addr < end; addr += PAGE_SIZE) {
+               ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+               init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+               free_page((unsigned long)__va(addr));
+               totalram_pages++;
+       }
+}
+
+int crash_shrink_memory(unsigned long new_size)
+{
+       int ret = 0;
+       unsigned long start, end;
+       unsigned long old_size;
+       struct resource *ram_res;
+
+       mutex_lock(&kexec_mutex);
+
+       if (kexec_crash_image) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+       start = crashk_res.start;
+       end = crashk_res.end;
+       old_size = (end == 0) ? 0 : end - start + 1;
+       if (new_size >= old_size) {
+               ret = (new_size == old_size) ? 0 : -EINVAL;
+               goto unlock;
+       }
+
+       ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
+       if (!ram_res) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
+       end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
+
+       crash_map_reserved_pages();
+       crash_free_reserved_phys_range(end, crashk_res.end);
+
+       if ((start == end) && (crashk_res.parent != NULL))
+               release_resource(&crashk_res);
+
+       ram_res->start = end;
+       ram_res->end = crashk_res.end;
+       ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+       ram_res->name = "System RAM";
+
+       crashk_res.end = end - 1;
+
+       insert_resource(&iomem_resource, ram_res);
+       crash_unmap_reserved_pages();
+
+unlock:
+       mutex_unlock(&kexec_mutex);
+       return ret;
+}
+#endif /* !CONFIG_XEN */
+
 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
                            size_t data_len)
 {
@@ -1110,6 +1251,7 @@ static void final_note(u32 *buf)
        memcpy(buf, &note, sizeof(note));
 }
 
+#ifndef CONFIG_XEN
 void crash_save_cpu(struct pt_regs *regs, int cpu)
 {
        struct elf_prstatus prstatus;
@@ -1130,14 +1272,16 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
                return;
        memset(&prstatus, 0, sizeof(prstatus));
        prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
+       elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
                              &prstatus, sizeof(prstatus));
        final_note(buf);
 }
+#endif
 
 static int __init crash_notes_memory_init(void)
 {
+#ifndef CONFIG_XEN
        /* Allocate memory for saving cpu registers. */
        crash_notes = alloc_percpu(note_buf_t);
        if (!crash_notes) {
@@ -1145,11 +1289,13 @@ static int __init crash_notes_memory_init(void)
                " states failed\n");
                return -ENOMEM;
        }
+#endif
        return 0;
 }
 module_init(crash_notes_memory_init)
 
 
+#ifndef CONFIG_XEN
 /*
  * parsing the "crashkernel" commandline
  *
@@ -1228,7 +1374,7 @@ static int __init parse_crashkernel_mem(char                      *cmdline,
        } while (*cur++ == ',');
 
        if (*crash_size > 0) {
-               while (*cur != ' ' && *cur != '@')
+               while (*cur && *cur != ' ' && *cur != '@')
                        cur++;
                if (*cur == '@') {
                        cur++;
@@ -1265,6 +1411,10 @@ static int __init parse_crashkernel_simple(char          *cmdline,
 
        if (*cur == '@')
                *crash_base = memparse(cur+1, &cur);
+       else if (*cur != ' ' && *cur != '\0') {
+               pr_warning("crashkernel: unrecognized char\n");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -1312,26 +1462,25 @@ int __init parse_crashkernel(char                *cmdline,
 
        return 0;
 }
+#endif
 
-
-
-void crash_save_vmcoreinfo(void)
+static void update_vmcoreinfo_note(void)
 {
-       u32 *buf;
+       u32 *buf = vmcoreinfo_note;
 
        if (!vmcoreinfo_size)
                return;
-
-       vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
-
-       buf = (u32 *)vmcoreinfo_note;
-
        buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
                              vmcoreinfo_size);
-
        final_note(buf);
 }
 
+void crash_save_vmcoreinfo(void)
+{
+       vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+       update_vmcoreinfo_note();
+}
+
 void vmcoreinfo_append_str(const char *fmt, ...)
 {
        va_list args;
@@ -1369,7 +1518,20 @@ static int __init crash_save_vmcoreinfo_init(void)
 
        VMCOREINFO_SYMBOL(init_uts_ns);
        VMCOREINFO_SYMBOL(node_online_map);
+#ifdef CONFIG_MMU
+# ifndef CONFIG_X86_XEN
        VMCOREINFO_SYMBOL(swapper_pg_dir);
+# else
+/*
+ * Since for x86-32 Xen swapper_pg_dir is a pointer rather than an array,
+ * make the value stored consistent with native (i.e. the base address of
+ * the page directory).
+ */
+#  define swapper_pg_dir *swapper_pg_dir
+       VMCOREINFO_SYMBOL(swapper_pg_dir);
+#  undef swapper_pg_dir
+# endif
+#endif
        VMCOREINFO_SYMBOL(_stext);
        VMCOREINFO_SYMBOL(vmlist);
 
@@ -1409,6 +1571,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_OFFSET(list_head, prev);
        VMCOREINFO_OFFSET(vm_struct, addr);
        VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+       log_buf_kexec_setup();
        VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
        VMCOREINFO_NUMBER(NR_FREE_PAGES);
        VMCOREINFO_NUMBER(PG_lru);
@@ -1416,6 +1579,7 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_NUMBER(PG_swapcache);
 
        arch_crash_save_vmcoreinfo();
+       update_vmcoreinfo_note();
 
        return 0;
 }
@@ -1439,7 +1603,7 @@ int kernel_kexec(void)
 
 #ifdef CONFIG_KEXEC_JUMP
        if (kexec_image->preserve_context) {
-               mutex_lock(&pm_mutex);
+               lock_system_sleep();
                pm_prepare_console();
                error = freeze_processes();
                if (error) {
@@ -1447,22 +1611,24 @@ int kernel_kexec(void)
                        goto Restore_console;
                }
                suspend_console();
-               error = device_suspend(PMSG_FREEZE);
+               error = dpm_suspend_start(PMSG_FREEZE);
                if (error)
                        goto Resume_console;
-               error = disable_nonboot_cpus();
-               if (error)
-                       goto Resume_devices;
-               device_pm_lock();
-               local_irq_disable();
-               /* At this point, device_suspend() has been called,
-                * but *not* device_power_down(). We *must*
-                * device_power_down() now.  Otherwise, drivers for
+               /* At this point, dpm_suspend_start() has been called,
+                * but *not* dpm_suspend_end(). We *must* call
+                * dpm_suspend_end() now.  Otherwise, drivers for
                 * some devices (e.g. interrupt controllers) become
                 * desynchronized with the actual state of the
                 * hardware at resume time, and evil weirdness ensues.
                 */
-               error = device_power_down(PMSG_FREEZE);
+               error = dpm_suspend_end(PMSG_FREEZE);
+               if (error)
+                       goto Resume_devices;
+               error = disable_nonboot_cpus();
+               if (error)
+                       goto Enable_cpus;
+               local_irq_disable();
+               error = syscore_suspend();
                if (error)
                        goto Enable_irqs;
        } else
@@ -1477,19 +1643,20 @@ int kernel_kexec(void)
 
 #ifdef CONFIG_KEXEC_JUMP
        if (kexec_image->preserve_context) {
-               device_power_up(PMSG_RESTORE);
+               syscore_resume();
  Enable_irqs:
                local_irq_enable();
-               device_pm_unlock();
+ Enable_cpus:
                enable_nonboot_cpus();
+               dpm_resume_start(PMSG_RESTORE);
  Resume_devices:
-               device_resume(PMSG_RESTORE);
+               dpm_resume_end(PMSG_RESTORE);
  Resume_console:
                resume_console();
                thaw_processes();
  Restore_console:
                pm_restore_console();
-               mutex_unlock(&pm_mutex);
+               unlock_system_sleep();
        }
 #endif