- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / mm / page_alloc.c
index b9a81db..dae6d3d 100644 (file)
@@ -692,6 +692,13 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
        int i;
        int bad = 0;
 
+#ifdef CONFIG_XEN
+       if (PageForeign(page)) {
+               PageForeignDestructor(page, order);
+               return false;
+       }
+#endif
+
        trace_mm_page_free(page, order);
        kmemcheck_free_shadow(page, order);
 
@@ -718,6 +725,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int wasMlocked = __TestClearPageMlocked(page);
 
+#ifdef CONFIG_XEN
+       WARN_ON(PageForeign(page) && wasMlocked);
+#endif
        if (!free_pages_prepare(page, order))
                return;
 
@@ -1216,6 +1226,9 @@ void free_hot_cold_page(struct page *page, int cold)
        int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
+#ifdef CONFIG_XEN
+       WARN_ON(PageForeign(page) && wasMlocked);
+#endif
        if (!free_pages_prepare(page, 0))
                return;
 
@@ -4996,6 +5009,22 @@ void setup_per_zone_wmarks(void)
                spin_unlock_irqrestore(&zone->lock, flags);
        }
 
+#ifdef CONFIG_XEN
+       for_each_populated_zone(zone) {
+               unsigned int cpu;
+
+               for_each_online_cpu(cpu) {
+                       unsigned long high;
+
+                       high = percpu_pagelist_fraction
+                              ? zone->present_pages / percpu_pagelist_fraction
+                              : 5 * zone_batchsize(zone);
+                       setup_pagelist_highmark(
+                               per_cpu_ptr(zone->pageset, cpu), high);
+               }
+       }
+#endif
+
        /* update totalreserve_pages */
        calculate_totalreserve_pages();
 }