int i;
int bad = 0;
+#ifdef CONFIG_XEN
+ if (PageForeign(page)) {
+ PageForeignDestructor(page, order);
+ return false;
+ }
+#endif
+
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
unsigned long flags;
int wasMlocked = __TestClearPageMlocked(page);
+#ifdef CONFIG_XEN
+ WARN_ON(PageForeign(page) && wasMlocked);
+#endif
if (!free_pages_prepare(page, order))
return;
}
/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
+ *
+ * Note that this code is protected against sending an IPI to an offline
+ * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
+ * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
+ * nothing keeps CPUs from showing up after we populated the cpumask and
+ * before the call to on_each_cpu_mask().
*/
void drain_all_pages(void)
{
- on_each_cpu(drain_local_pages, NULL, 1);
+ int cpu;
+ struct per_cpu_pageset *pcp;
+ struct zone *zone;
+
+ /*
+ * Allocate in the BSS so we wont require allocation in
+ * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
+ */
+ static cpumask_t cpus_with_pcps;
+
+ /*
+ * We don't care about racing with CPU hotplug event
+ * as offline notification will cause the notified
+ * cpu to drain that CPU pcps and on_each_cpu_mask
+ * disables preemption as part of its processing
+ */
+ for_each_online_cpu(cpu) {
+ bool has_pcps = false;
+ for_each_populated_zone(zone) {
+ pcp = per_cpu_ptr(zone->pageset, cpu);
+ if (pcp->pcp.count) {
+ has_pcps = true;
+ break;
+ }
+ }
+ if (has_pcps)
+ cpumask_set_cpu(cpu, &cpus_with_pcps);
+ else
+ cpumask_clear_cpu(cpu, &cpus_with_pcps);
+ }
+ on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
}
#ifdef CONFIG_HIBERNATION
int migratetype;
int wasMlocked = __TestClearPageMlocked(page);
+#ifdef CONFIG_XEN
+ WARN_ON(PageForeign(page) && wasMlocked);
+#endif
if (!free_pages_prepare(page, 0))
return;
va_end(args);
}
- pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
+ if (!(gfp_mask & __GFP_WAIT)) {
+ pr_info("The following is only an harmless informational message.\n");
+ pr_info("Unless you get a _continuous_flood_ of these messages it means\n");
+ pr_info("everything is working fine. Allocations from irqs cannot be\n");
+ pr_info("perfectly reliable and the kernel is designed to handle that.\n");
+ }
+ pr_info("%s: page allocation failure. order:%d, mode:0x%x\n",
current->comm, order, gfp_mask);
dump_stack();
spin_unlock_irqrestore(&zone->lock, flags);
}
+#ifdef CONFIG_XEN
+ for_each_populated_zone(zone) {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ unsigned long high;
+
+ high = percpu_pagelist_fraction
+ ? zone->present_pages / percpu_pagelist_fraction
+ : 5 * zone_batchsize(zone);
+ setup_pagelist_highmark(
+ per_cpu_ptr(zone->pageset, cpu), high);
+ }
+ }
+#endif
+
/* update totalreserve_pages */
calculate_totalreserve_pages();
}
int ret;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (!write || (ret == -EINVAL))
+ if (!write || (ret < 0))
return ret;
for_each_populated_zone(zone) {
for_each_possible_cpu(cpu) {