- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / kernel / kexec.c
index 7b08867..b19e3af 100644 (file)
 #include <asm/system.h>
 #include <asm/sections.h>
 
+#ifndef CONFIG_XEN
 /* Per cpu memory for storing cpu states in case of system crash. */
 note_buf_t __percpu *crash_notes;
+#endif
 
 /* vmcoreinfo stuff */
 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
-u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+u32
+#if defined(CONFIG_XEN) && defined(CONFIG_X86)
+__page_aligned_bss
+#endif
+vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
 size_t vmcoreinfo_size;
 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
 
@@ -356,13 +362,26 @@ static int kimage_is_destination_range(struct kimage *image,
        return 0;
 }
 
-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
+static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
 {
        struct page *pages;
 
        pages = alloc_pages(gfp_mask, order);
        if (pages) {
                unsigned int count, i;
+#ifdef CONFIG_XEN
+               int address_bits;
+
+               if (limit == ~0UL)
+                       address_bits = BITS_PER_LONG;
+               else
+                       address_bits = ilog2(limit);
+
+               if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
+                       __free_pages(pages, order);
+                       return NULL;
+               }
+#endif
                pages->mapping = NULL;
                set_page_private(pages, order);
                count = 1 << order;
@@ -426,10 +445,10 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        do {
                unsigned long pfn, epfn, addr, eaddr;
 
-               pages = kimage_alloc_pages(GFP_KERNEL, order);
+               pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
                if (!pages)
                        break;
-               pfn   = page_to_pfn(pages);
+               pfn   = kexec_page_to_pfn(pages);
                epfn  = pfn + count;
                addr  = pfn << PAGE_SHIFT;
                eaddr = epfn << PAGE_SHIFT;
@@ -463,6 +482,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
        return pages;
 }
 
+#ifndef CONFIG_XEN
 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
                                                      unsigned int order)
 {
@@ -516,7 +536,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
                }
                /* If I don't overlap any segments I have found my hole! */
                if (i == image->nr_segments) {
-                       pages = pfn_to_page(hole_start >> PAGE_SHIFT);
+                       pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
                        break;
                }
        }
@@ -543,6 +563,13 @@ struct page *kimage_alloc_control_pages(struct kimage *image,
 
        return pages;
 }
+#else /* !CONFIG_XEN */
+struct page *kimage_alloc_control_pages(struct kimage *image,
+                                        unsigned int order)
+{
+       return kimage_alloc_normal_control_pages(image, order);
+}
+#endif
 
 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 {
@@ -558,7 +585,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
                        return -ENOMEM;
 
                ind_page = page_address(page);
-               *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
+               *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
                image->entry = ind_page;
                image->last_entry = ind_page +
                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -617,13 +644,13 @@ static void kimage_terminate(struct kimage *image)
 #define for_each_kimage_entry(image, ptr, entry) \
        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
                ptr = (entry & IND_INDIRECTION)? \
-                       phys_to_virt((entry & PAGE_MASK)): ptr +1)
+                       kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
 
 static void kimage_free_entry(kimage_entry_t entry)
 {
        struct page *page;
 
-       page = pfn_to_page(entry >> PAGE_SHIFT);
+       page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
        kimage_free_pages(page);
 }
 
@@ -635,6 +662,10 @@ static void kimage_free(struct kimage *image)
        if (!image)
                return;
 
+#ifdef CONFIG_XEN
+       xen_machine_kexec_unload(image);
+#endif
+
        kimage_free_extra_pages(image);
        for_each_kimage_entry(image, ptr, entry) {
                if (entry & IND_INDIRECTION) {
@@ -710,7 +741,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
         * have a match.
         */
        list_for_each_entry(page, &image->dest_pages, lru) {
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
                if (addr == destination) {
                        list_del(&page->lru);
                        return page;
@@ -721,16 +752,16 @@ static struct page *kimage_alloc_page(struct kimage *image,
                kimage_entry_t *old;
 
                /* Allocate a page, if we run out of memory give up */
-               page = kimage_alloc_pages(gfp_mask, 0);
+               page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
                if (!page)
                        return NULL;
                /* If the page cannot be used file it away */
-               if (page_to_pfn(page) >
+               if (kexec_page_to_pfn(page) >
                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
                        list_add(&page->lru, &image->unuseable_pages);
                        continue;
                }
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
 
                /* If it is the destination page we want use it */
                if (addr == destination)
@@ -753,7 +784,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
                        struct page *old_page;
 
                        old_addr = *old & PAGE_MASK;
-                       old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
+                       old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
                        copy_highpage(page, old_page);
                        *old = addr | (*old & ~PAGE_MASK);
 
@@ -809,7 +840,7 @@ static int kimage_load_normal_segment(struct kimage *image,
                        result  = -ENOMEM;
                        goto out;
                }
-               result = kimage_add_page(image, page_to_pfn(page)
+               result = kimage_add_page(image, kexec_page_to_pfn(page)
                                                                << PAGE_SHIFT);
                if (result < 0)
                        goto out;
@@ -841,6 +872,7 @@ out:
        return result;
 }
 
+#ifndef CONFIG_XEN
 static int kimage_load_crash_segment(struct kimage *image,
                                        struct kexec_segment *segment)
 {
@@ -863,7 +895,7 @@ static int kimage_load_crash_segment(struct kimage *image,
                char *ptr;
                size_t uchunk, mchunk;
 
-               page = pfn_to_page(maddr >> PAGE_SHIFT);
+               page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
                if (!page) {
                        result  = -ENOMEM;
                        goto out;
@@ -912,6 +944,13 @@ static int kimage_load_segment(struct kimage *image,
 
        return result;
 }
+#else /* CONFIG_XEN */
+static int kimage_load_segment(struct kimage *image,
+                               struct kexec_segment *segment)
+{
+       return kimage_load_normal_segment(image, segment);
+}
+#endif
 
 /*
  * Exec Kernel system call: for obvious reasons only root may call it.
@@ -1018,6 +1057,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
                if (flags & KEXEC_ON_CRASH)
                        crash_unmap_reserved_pages();
        }
+#ifdef CONFIG_XEN
+       if (image) {
+               result = xen_machine_kexec_load(image);
+               if (result)
+                       goto out;
+       }
+#endif
        /* Install the new kernel, and  Uninstall the old */
        image = xchg(dest_image, image);
 
@@ -1112,6 +1158,7 @@ size_t crash_get_memory_size(void)
        return size;
 }
 
+#ifndef CONFIG_XEN
 void __weak crash_free_reserved_phys_range(unsigned long begin,
                                           unsigned long end)
 {
@@ -1175,6 +1222,7 @@ unlock:
        mutex_unlock(&kexec_mutex);
        return ret;
 }
+#endif /* !CONFIG_XEN */
 
 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
                            size_t data_len)
@@ -1204,6 +1252,7 @@ static void final_note(u32 *buf)
        memcpy(buf, &note, sizeof(note));
 }
 
+#ifndef CONFIG_XEN
 void crash_save_cpu(struct pt_regs *regs, int cpu)
 {
        struct elf_prstatus prstatus;
@@ -1229,9 +1278,11 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
                              &prstatus, sizeof(prstatus));
        final_note(buf);
 }
+#endif
 
 static int __init crash_notes_memory_init(void)
 {
+#ifndef CONFIG_XEN
        /* Allocate memory for saving cpu registers. */
        crash_notes = alloc_percpu(note_buf_t);
        if (!crash_notes) {
@@ -1239,11 +1290,13 @@ static int __init crash_notes_memory_init(void)
                " states failed\n");
                return -ENOMEM;
        }
+#endif
        return 0;
 }
 module_init(crash_notes_memory_init)
 
 
+#ifndef CONFIG_XEN
 /*
  * parsing the "crashkernel" commandline
  *
@@ -1406,7 +1459,7 @@ int __init parse_crashkernel(char                  *cmdline,
 
        return 0;
 }
-
+#endif
 
 static void update_vmcoreinfo_note(void)
 {
@@ -1462,7 +1515,18 @@ static int __init crash_save_vmcoreinfo_init(void)
 
        VMCOREINFO_SYMBOL(init_uts_ns);
        VMCOREINFO_SYMBOL(node_online_map);
+#ifndef CONFIG_X86_XEN
        VMCOREINFO_SYMBOL(swapper_pg_dir);
+#else
+/*
+ * Since for x86-32 Xen swapper_pg_dir is a pointer rather than an array,
+ * make the value stored consistent with native (i.e. the base address of
+ * the page directory).
+ */
+# define swapper_pg_dir *swapper_pg_dir
+       VMCOREINFO_SYMBOL(swapper_pg_dir);
+# undef swapper_pg_dir
+#endif
        VMCOREINFO_SYMBOL(_stext);
        VMCOREINFO_SYMBOL(vmlist);