int i;
int bad = 0;
+#ifdef CONFIG_XEN
+ if (PageForeign(page)) {
+ PageForeignDestructor(page, order);
+ return false;
+ }
+#endif
+
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
unsigned long flags;
int wasMlocked = __TestClearPageMlocked(page);
+#ifdef CONFIG_XEN
+ WARN_ON(PageForeign(page) && wasMlocked);
+#endif
if (!free_pages_prepare(page, order))
return;
int migratetype;
int wasMlocked = __TestClearPageMlocked(page);
+#ifdef CONFIG_XEN
+ WARN_ON(PageForeign(page) && wasMlocked);
+#endif
if (!free_pages_prepare(page, 0))
return;
va_end(args);
}
- pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
+ if (!(gfp_mask & __GFP_WAIT)) {
+ pr_info("The following is only an harmless informational message.\n");
+ pr_info("Unless you get a _continuous_flood_ of these messages it means\n");
+ pr_info("everything is working fine. Allocations from irqs cannot be\n");
+ pr_info("perfectly reliable and the kernel is designed to handle that.\n");
+ }
+ pr_info("%s: page allocation failure. order:%d, mode:0x%x\n",
current->comm, order, gfp_mask);
dump_stack();
spin_unlock_irqrestore(&zone->lock, flags);
}
+#ifdef CONFIG_XEN
+ for_each_populated_zone(zone) {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ unsigned long high;
+
+ high = percpu_pagelist_fraction
+ ? zone->present_pages / percpu_pagelist_fraction
+ : 5 * zone_batchsize(zone);
+ setup_pagelist_highmark(
+ per_cpu_ptr(zone->pageset, cpu), high);
+ }
+ }
+#endif
+
/* update totalreserve_pages */
calculate_totalreserve_pages();
}