- Update to 3.4-rc7.
[linux-flexiantxendom0-3.2.10.git] / mm / page_alloc.c
index 918330f..e5a3966 100644 (file)
@@ -692,6 +692,13 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
+#ifdef CONFIG_XEN
+       if (PageForeign(page)) {
+               PageForeignDestructor(page, order);
+               return false;
+       }
+#endif
+
        trace_mm_page_free(page, order);
        kmemcheck_free_shadow(page, order);
 
@@ -718,6 +725,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int wasMlocked = __TestClearPageMlocked(page);
 
+#ifdef CONFIG_XEN
+       WARN_ON(PageForeign(page) && wasMlocked);
+#endif
        if (!free_pages_prepare(page, order))
                return;
 
@@ -1252,6 +1262,9 @@ void free_hot_cold_page(struct page *page, int cold)
        int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
+#ifdef CONFIG_XEN
+       WARN_ON(PageForeign(page) && wasMlocked);
+#endif
        if (!free_pages_prepare(page, 0))
                return;
 
@@ -1910,7 +1923,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
                va_end(args);
        }
 
-       pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
+       if (!(gfp_mask & __GFP_WAIT)) {
+               pr_info("The following is only an harmless informational message.\n");
+               pr_info("Unless you get a _continuous_flood_ of these messages it means\n");
+               pr_info("everything is working fine. Allocations from irqs cannot be\n");
+               pr_info("perfectly reliable and the kernel is designed to handle that.\n");
+       }
+       pr_info("%s: page allocation failure. order:%d, mode:0x%x\n",
                current->comm, order, gfp_mask);
 
        dump_stack();
@@ -5034,6 +5053,22 @@ void setup_per_zone_wmarks(void)
                spin_unlock_irqrestore(&zone->lock, flags);
        }
 
+#ifdef CONFIG_XEN
+       for_each_populated_zone(zone) {
+               unsigned int cpu;
+
+               for_each_online_cpu(cpu) {
+                       unsigned long high;
+
+                       high = percpu_pagelist_fraction
+                              ? zone->present_pages / percpu_pagelist_fraction
+                              : 5 * zone_batchsize(zone);
+                       setup_pagelist_highmark(
+                               per_cpu_ptr(zone->pageset, cpu), high);
+               }
+       }
+#endif
+
        /* update totalreserve_pages */
        calculate_totalreserve_pages();
 }