* Copyright (C) 1998,2000 Rik van Riel
* Thanks go out to Claus Fischer for some serious inspiration and
* for goading me into coding this file...
+ * Copyright (C) 2010 Google, Inc.
+ * Rewritten by David Rientjes
*
* The routines in this file are used to kill a process when
* we're seriously out of memory. This gets called from __alloc_pages()
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/cpuset.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
#include <linux/mempolicy.h>
#include <linux/security.h>
+#include <linux/ptrace.h>
+#include <linux/freezer.h>
+#include <linux/ftrace.h>
+#include <linux/ratelimit.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/oom.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
static DEFINE_SPINLOCK(zone_scan_lock);
-/* #define DEBUG */
+
+/*
+ * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
+ * @old_val: old oom_score_adj for compare
+ * @new_val: new oom_score_adj for swap
+ *
+ * Sets the oom_score_adj value for current to @new_val iff its present value is
+ * @old_val. Usually used to reinstate a previous value to prevent racing with
+ * userspacing tuning the value in the interim.
+ */
+void compare_swap_oom_score_adj(int old_val, int new_val)
+{
+ struct sighand_struct *sighand = current->sighand;
+
+ spin_lock_irq(&sighand->siglock);
+ if (current->signal->oom_score_adj == old_val)
+ current->signal->oom_score_adj = new_val;
+ trace_oom_score_adj_update(current);
+ spin_unlock_irq(&sighand->siglock);
+}
+
+/**
+ * test_set_oom_score_adj() - set current's oom_score_adj and return old value
+ * @new_val: new oom_score_adj value
+ *
+ * Sets the oom_score_adj value for current to @new_val with proper
+ * synchronization and returns the old value. Usually used to temporarily
+ * set a value, save the old value in the caller, and then reinstate it later.
+ */
+int test_set_oom_score_adj(int new_val)
+{
+ struct sighand_struct *sighand = current->sighand;
+ int old_val;
+
+ spin_lock_irq(&sighand->siglock);
+ old_val = current->signal->oom_score_adj;
+ current->signal->oom_score_adj = new_val;
+ trace_oom_score_adj_update(current);
+ spin_unlock_irq(&sighand->siglock);
+
+ return old_val;
+}
#ifdef CONFIG_NUMA
/**
if (cpuset_mems_allowed_intersects(current, tsk))
return true;
}
- tsk = next_thread(tsk);
- } while (tsk != start);
+ } while_each_thread(start, tsk);
+
return false;
}
#else
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
-static struct task_struct *find_lock_task_mm(struct task_struct *p)
+struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t = p;
}
/* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem,
- const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p,
+ const struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
return true;
/* When mem_cgroup_out_of_memory() and p is not member of the group */
- if (mem && !task_in_mem_cgroup(p, mem))
+ if (memcg && !task_in_mem_cgroup(p, memcg))
return true;
/* p may not have freeable memory in nodemask */
}
/**
- * badness - calculate a numeric value for how bad this task has been
+ * oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
- * @uptime: current uptime in seconds
+ * @totalpages: total present RAM allowed for page allocation
*
- * The formula used is relatively simple and documented inline in the
- * function. The main rationale is that we want to select a good task
- * to kill when we run out of memory.
- *
- * Good in this context means that:
- * 1) we lose the minimum amount of work done
- * 2) we recover a large amount of memory
- * 3) we don't kill anything innocent of eating tons of memory
- * 4) we want to kill the minimum amount of processes (one)
- * 5) we try to kill the process the user expects us to kill, this
- * algorithm has been meticulously tuned to meet the principle
- * of least surprise ... (be careful when you change it)
+ * The heuristic for determining which task to kill is made to be as simple and
+ * predictable as possible. The goal is to return the highest value for the
+ * task consuming the most memory to avoid subsequent oom failures.
*/
-unsigned long badness(struct task_struct *p, struct mem_cgroup *mem,
- const nodemask_t *nodemask, unsigned long uptime)
+unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+ const nodemask_t *nodemask, unsigned long totalpages)
{
- unsigned long points, cpu_time, run_time;
- struct task_struct *child;
- struct task_struct *c, *t;
- int oom_adj = p->signal->oom_adj;
- struct task_cputime task_time;
- unsigned long utime;
- unsigned long stime;
+ long points;
- if (oom_unkillable_task(p, mem, nodemask))
- return 0;
- if (oom_adj == OOM_DISABLE)
+ if (oom_unkillable_task(p, memcg, nodemask))
return 0;
p = find_lock_task_mm(p);
if (!p)
return 0;
- /*
- * The memory size of the process is the basis for the badness.
- */
- points = p->mm->total_vm;
- task_unlock(p);
+ if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
+ task_unlock(p);
+ return 0;
+ }
/*
- * swapoff can easily use up all memory, so kill those first.
+ * The memory controller may have a limit of 0 bytes, so avoid a divide
+ * by zero, if necessary.
*/
- if (p->flags & PF_OOM_ORIGIN)
- return ULONG_MAX;
+ if (!totalpages)
+ totalpages = 1;
/*
- * Processes which fork a lot of child processes are likely
- * a good choice. We add half the vmsize of the children if they
- * have an own mm. This prevents forking servers to flood the
- * machine with an endless amount of children. In case a single
- * child is eating the vast majority of memory, adding only half
- * to the parents will make the child our kill candidate of choice.
+ * The baseline for the badness score is the proportion of RAM that each
+ * task's rss, pagetable and swap space use.
*/
- t = p;
- do {
- list_for_each_entry(c, &t->children, sibling) {
- child = find_lock_task_mm(c);
- if (child) {
- if (child->mm != p->mm)
- points += child->mm->total_vm/2 + 1;
- task_unlock(child);
- }
- }
- } while_each_thread(p, t);
+ points = get_mm_rss(p->mm) + p->mm->nr_ptes;
+ points += get_mm_counter(p->mm, MM_SWAPENTS);
- /*
- * CPU time is in tens of seconds and run time is in thousands
- * of seconds. There is no particular reason for this other than
- * that it turned out to work very well in practice.
- */
- thread_group_cputime(p, &task_time);
- utime = cputime_to_jiffies(task_time.utime);
- stime = cputime_to_jiffies(task_time.stime);
- cpu_time = (utime + stime) >> (SHIFT_HZ + 3);
-
-
- if (uptime >= p->start_time.tv_sec)
- run_time = (uptime - p->start_time.tv_sec) >> 10;
- else
- run_time = 0;
-
- if (cpu_time)
- points /= int_sqrt(cpu_time);
- if (run_time)
- points /= int_sqrt(int_sqrt(run_time));
-
- /*
- * Niced processes are most likely less important, so double
- * their badness points.
- */
- if (task_nice(p) > 0)
- points *= 2;
+ points *= 1000;
+ points /= totalpages;
+ task_unlock(p);
/*
- * Superuser processes are usually more important, so we make it
- * less likely that we kill those.
+ * Root processes get 3% bonus, just like the __vm_enough_memory()
+ * implementation used by LSMs.
*/
- if (has_capability_noaudit(p, CAP_SYS_ADMIN) ||
- has_capability_noaudit(p, CAP_SYS_RESOURCE))
- points /= 4;
+ if (has_capability_noaudit(p, CAP_SYS_ADMIN))
+ points -= 30;
/*
- * We don't want to kill a process with direct hardware access.
- * Not only could that mess up the hardware, but usually users
- * tend to only have this flag set on applications they think
- * of as important.
+ * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
+ * either completely disable oom killing or always prefer a certain
+ * task.
*/
- if (has_capability_noaudit(p, CAP_SYS_RAWIO))
- points /= 4;
+ points += p->signal->oom_score_adj;
/*
- * Adjust the score by oom_adj.
+ * Never return 0 for an eligible task that may be killed since it's
+ * possible that no single user task uses more than 0.1% of memory and
+ * no single admin tasks uses more than 3.0%.
*/
- if (oom_adj) {
- if (oom_adj > 0) {
- if (!points)
- points = 1;
- points <<= oom_adj;
- } else
- points >>= -(oom_adj);
- }
-
-#ifdef DEBUG
- printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
- p->pid, p->comm, points);
-#endif
- return points;
+ if (points <= 0)
+ return 1;
+ return (points < 1000) ? points : 1000;
}
/*
*/
#ifdef CONFIG_NUMA
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
- gfp_t gfp_mask, nodemask_t *nodemask)
+ gfp_t gfp_mask, nodemask_t *nodemask,
+ unsigned long *totalpages)
{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ bool cpuset_limited = false;
+ int nid;
+
+ /* Default to all available memory */
+ *totalpages = totalram_pages + total_swap_pages;
+ if (!zonelist)
+ return CONSTRAINT_NONE;
/*
* Reach here only when __GFP_NOFAIL is used. So, we should avoid
* to kill current.We have to random task kill in this case.
return CONSTRAINT_NONE;
/*
- * The nodemask here is a nodemask passed to alloc_pages(). Now,
- * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
- * feature. mempolicy is an only user of nodemask here.
- * check mempolicy's nodemask contains all N_HIGH_MEMORY
+ * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
+ * the page allocator means a mempolicy is in effect. Cpuset policy
+ * is enforced in get_page_from_freelist().
*/
- if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
+ if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
+ *totalpages = total_swap_pages;
+ for_each_node_mask(nid, *nodemask)
+ *totalpages += node_spanned_pages(nid);
return CONSTRAINT_MEMORY_POLICY;
+ }
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
- return CONSTRAINT_CPUSET;
+ cpuset_limited = true;
+ if (cpuset_limited) {
+ *totalpages = total_swap_pages;
+ for_each_node_mask(nid, cpuset_current_mems_allowed)
+ *totalpages += node_spanned_pages(nid);
+ return CONSTRAINT_CPUSET;
+ }
return CONSTRAINT_NONE;
}
#else
static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
- gfp_t gfp_mask, nodemask_t *nodemask)
+ gfp_t gfp_mask, nodemask_t *nodemask,
+ unsigned long *totalpages)
{
+ *totalpages = totalram_pages + total_swap_pages;
return CONSTRAINT_NONE;
}
#endif
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
-static struct task_struct *select_bad_process(unsigned long *ppoints,
- struct mem_cgroup *mem, const nodemask_t *nodemask)
+static struct task_struct *select_bad_process(unsigned int *ppoints,
+ unsigned long totalpages, struct mem_cgroup *memcg,
+ const nodemask_t *nodemask, bool force_kill)
{
- struct task_struct *p;
+ struct task_struct *g, *p;
struct task_struct *chosen = NULL;
- struct timespec uptime;
*ppoints = 0;
- do_posix_clock_monotonic_gettime(&uptime);
- for_each_process(p) {
- unsigned long points;
+ do_each_thread(g, p) {
+ unsigned int points;
- if (oom_unkillable_task(p, mem, nodemask))
+ if (p->exit_state)
+ continue;
+ if (oom_unkillable_task(p, memcg, nodemask))
continue;
/*
* blocked waiting for another task which itself is waiting
* for memory. Is there a better alternative?
*/
- if (test_tsk_thread_flag(p, TIF_MEMDIE))
- return ERR_PTR(-1UL);
-
- /*
- * This is in the process of releasing memory so wait for it
- * to finish before killing some other task by mistake.
- *
- * However, if p is the current task, we allow the 'kill' to
- * go ahead if it is exiting: this will simply set TIF_MEMDIE,
- * which will allow it to gain access to memory reserves in
- * the process of exiting and releasing its resources.
- * Otherwise we could get an easy OOM deadlock.
- */
- if ((p->flags & PF_EXITING) && p->mm) {
- if (p != current)
+ if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
+ if (unlikely(frozen(p)))
+ __thaw_task(p);
+ if (!force_kill)
return ERR_PTR(-1UL);
-
- chosen = p;
- *ppoints = ULONG_MAX;
}
-
- if (p->signal->oom_adj == OOM_DISABLE)
+ if (!p->mm)
continue;
- points = badness(p, mem, nodemask, uptime.tv_sec);
- if (points > *ppoints || !chosen) {
+ if (p->flags & PF_EXITING) {
+ /*
+ * If p is the current task and is in the process of
+ * releasing memory, we allow the "kill" to set
+ * TIF_MEMDIE, which will allow it to gain access to
+ * memory reserves. Otherwise, it may stall forever.
+ *
+ * The loop isn't broken here, however, in case other
+ * threads are found to have already been oom killed.
+ */
+ if (p == current) {
+ chosen = p;
+ *ppoints = 1000;
+ } else if (!force_kill) {
+ /*
+ * If this task is not being ptraced on exit,
+ * then wait for it to finish before killing
+ * some other task unnecessarily.
+ */
+ if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
+ return ERR_PTR(-1UL);
+ }
+ }
+
+ points = oom_badness(p, memcg, nodemask, totalpages);
+ if (points > *ppoints) {
chosen = p;
*ppoints = points;
}
- }
+ } while_each_thread(g, p);
return chosen;
}
/**
* dump_tasks - dump current memory state of all system tasks
* @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
*
- * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * Dumps the current memory state of all eligible tasks. Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
* State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
- * score, and name.
- *
- * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
- * shown.
+ * value, oom_score_adj value, and name.
*
* Call with tasklist_lock read-locked.
*/
-static void dump_tasks(const struct mem_cgroup *mem)
+static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
- printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
- "name\n");
+ pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
for_each_process(p) {
- if (p->flags & PF_KTHREAD)
- continue;
- if (mem && !task_in_mem_cgroup(p, mem))
+ if (oom_unkillable_task(p, memcg, nodemask))
continue;
task = find_lock_task_mm(p);
continue;
}
- printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3u %3d %s\n",
- task->pid, __task_cred(task)->uid, task->tgid,
- task->mm->total_vm, get_mm_rss(task->mm),
- task_cpu(task), task->signal->oom_adj, task->comm);
+ pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
+ task->pid, task_uid(task), task->tgid,
+ task->mm->total_vm, get_mm_rss(task->mm),
+ task_cpu(task), task->signal->oom_adj,
+ task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
- struct mem_cgroup *mem)
+ struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
task_lock(current);
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
- "oom_adj=%d\n",
- current->comm, gfp_mask, order, current->signal->oom_adj);
+ "oom_adj=%d, oom_score_adj=%d\n",
+ current->comm, gfp_mask, order, current->signal->oom_adj,
+ current->signal->oom_score_adj);
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
- mem_cgroup_print_oom_info(mem, p);
- show_mem();
+ mem_cgroup_print_oom_info(memcg, p);
+ show_mem(SHOW_MEM_FILTER_NODES);
if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
+ dump_tasks(memcg, nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
-static int oom_kill_task(struct task_struct *p)
-{
- p = find_lock_task_mm(p);
- if (!p || p->signal->oom_adj == OOM_DISABLE) {
- task_unlock(p);
- return 1;
- }
- pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
- task_pid_nr(p), p->comm, K(p->mm->total_vm),
- K(get_mm_counter(p->mm, MM_ANONPAGES)),
- K(get_mm_counter(p->mm, MM_FILEPAGES)));
- task_unlock(p);
-
- p->rt.time_slice = HZ;
- set_tsk_thread_flag(p, TIF_MEMDIE);
- force_sig(SIGKILL, p);
- return 0;
-}
-#undef K
-
-static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
- unsigned long points, struct mem_cgroup *mem,
- nodemask_t *nodemask, const char *message)
+static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+ const char *message)
{
struct task_struct *victim = p;
struct task_struct *child;
struct task_struct *t = p;
- unsigned long victim_points = 0;
- struct timespec uptime;
-
- if (printk_ratelimit())
- dump_header(p, gfp_mask, order, mem);
+ struct mm_struct *mm;
+ unsigned int victim_points = 0;
+ static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
- return 0;
+ return;
}
+ if (__ratelimit(&oom_rs))
+ dump_header(p, gfp_mask, order, memcg, nodemask);
+
task_lock(p);
- pr_err("%s: Kill process %d (%s) score %lu or sacrifice child\n",
+ pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
message, task_pid_nr(p), p->comm, points);
task_unlock(p);
/*
* If any of p's children has a different mm and is eligible for kill,
- * the one with the highest badness() score is sacrificed for its
+ * the one with the highest oom_badness() score is sacrificed for its
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
- do_posix_clock_monotonic_gettime(&uptime);
do {
list_for_each_entry(child, &t->children, sibling) {
- unsigned long child_points;
+ unsigned int child_points;
if (child->mm == p->mm)
continue;
-
- /* badness() returns 0 if the thread is unkillable */
- child_points = badness(child, mem, nodemask,
- uptime.tv_sec);
+ /*
+ * oom_badness() returns 0 if the thread is unkillable
+ */
+ child_points = oom_badness(child, memcg, nodemask,
+ totalpages);
if (child_points > victim_points) {
victim = child;
victim_points = child_points;
}
} while_each_thread(p, t);
- return oom_kill_task(victim);
+ victim = find_lock_task_mm(victim);
+ if (!victim)
+ return;
+
+ /* mm cannot safely be dereferenced after task_unlock(victim) */
+ mm = victim->mm;
+ pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
+ K(get_mm_counter(victim->mm, MM_ANONPAGES)),
+ K(get_mm_counter(victim->mm, MM_FILEPAGES)));
+ task_unlock(victim);
+
+ /*
+ * Kill all user processes sharing victim->mm in other thread groups, if
+ * any. They don't get access to memory reserves, though, to avoid
+ * depletion of all memory. This prevents mm->mmap_sem livelock when an
+ * oom killed thread cannot exit because it requires the semaphore and
+ * its contended by another thread trying to allocate memory itself.
+ * That thread will now get access to memory reserves since it has a
+ * pending fatal signal.
+ */
+ for_each_process(p)
+ if (p->mm == mm && !same_thread_group(p, victim) &&
+ !(p->flags & PF_KTHREAD)) {
+ if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ continue;
+
+ task_lock(p); /* Protect ->comm from prctl() */
+ pr_err("Kill process %d (%s) sharing same memory\n",
+ task_pid_nr(p), p->comm);
+ task_unlock(p);
+ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
+ }
+
+ set_tsk_thread_flag(victim, TIF_MEMDIE);
+ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
}
+#undef K
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order)
+ int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
return;
}
read_lock(&tasklist_lock);
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, nodemask);
read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
+void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ int order)
{
- unsigned long points = 0;
+ unsigned long limit;
+ unsigned int points = 0;
struct task_struct *p;
- check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0);
- read_lock(&tasklist_lock);
-retry:
- p = select_bad_process(&points, mem, NULL);
- if (!p || PTR_ERR(p) == -1UL)
- goto out;
+ /*
+ * If current has a pending SIGKILL, then automatically select it. The
+ * goal is to allow it to allocate so that it may quickly exit and free
+ * its memory.
+ */
+ if (fatal_signal_pending(current)) {
+ set_thread_flag(TIF_MEMDIE);
+ return;
+ }
- if (oom_kill_process(p, gfp_mask, 0, points, mem, NULL,
- "Memory cgroup out of memory"))
- goto retry;
-out:
+ check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
+ limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
+ read_lock(&tasklist_lock);
+ p = select_bad_process(&points, limit, memcg, NULL, false);
+ if (p && PTR_ERR(p) != -1UL)
+ oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
+ "Memory cgroup out of memory");
read_unlock(&tasklist_lock);
}
#endif
* @gfp_mask: memory allocation flags
* @order: amount of memory being requested as a power of 2
* @nodemask: nodemask passed to page allocator
+ * @force_kill: true if a task must be killed, even if others are exiting
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
* don't have to be perfect here, we just have to be good.
*/
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
- int order, nodemask_t *nodemask)
+ int order, nodemask_t *nodemask, bool force_kill)
{
+ const nodemask_t *mpol_mask;
struct task_struct *p;
+ unsigned long totalpages;
unsigned long freed = 0;
- unsigned long points;
+ unsigned int points;
enum oom_constraint constraint = CONSTRAINT_NONE;
+ int killed = 0;
blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
if (freed > 0)
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
- if (zonelist)
- constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
- check_panic_on_oom(constraint, gfp_mask, order);
+ constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
+ &totalpages);
+ mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
+ check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
read_lock(&tasklist_lock);
if (sysctl_oom_kill_allocating_task &&
- !oom_unkillable_task(current, NULL, nodemask)) {
- /*
- * oom_kill_process() needs tasklist_lock held. If it returns
- * non-zero, current could not be killed so we must fallback to
- * the tasklist scan.
- */
- if (!oom_kill_process(current, gfp_mask, order, 0, NULL,
- nodemask,
- "Out of memory (oom_kill_allocating_task)"))
- return;
+ !oom_unkillable_task(current, NULL, nodemask) &&
+ current->mm) {
+ oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
+ nodemask,
+ "Out of memory (oom_kill_allocating_task)");
+ goto out;
}
-retry:
- p = select_bad_process(&points, NULL,
- constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
- NULL);
- if (PTR_ERR(p) == -1UL)
- return;
-
+ p = select_bad_process(&points, totalpages, NULL, mpol_mask,
+ force_kill);
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
- dump_header(NULL, gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
-
- if (oom_kill_process(p, gfp_mask, order, points, NULL, nodemask,
- "Out of memory"))
- goto retry;
+ if (PTR_ERR(p) != -1UL) {
+ oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
+ nodemask, "Out of memory");
+ killed = 1;
+ }
+out:
read_unlock(&tasklist_lock);
/*
* Give "p" a good chance of killing itself before we
* retry to allocate memory unless "p" is current
*/
- if (!test_thread_flag(TIF_MEMDIE))
+ if (killed && !test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
void pagefault_out_of_memory(void)
{
if (try_set_system_oom()) {
- out_of_memory(NULL, 0, 0, NULL);
+ out_of_memory(NULL, 0, 0, NULL, false);
clear_system_oom();
}
if (!test_thread_flag(TIF_MEMDIE))