4 * Copyright (C) 2004, Red Hat, Inc.
5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
6 * Released under the GPL, see the file COPYING for details.
8 * Simple token based thrashing protection, using the algorithm
9 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
12 * Improved algorithm to pass token:
13 * Each task has a priority which is incremented if it contended
14 * for the token in an interval less than its previous attempt.
15 * If the token is acquired, that task's priority is boosted to prevent
16 * the token from bouncing around too often and to let the task make
17 * some progress in its execution.
20 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23 #include <linux/swap.h>
24 #include <linux/memcontrol.h>
26 #include <trace/events/vmscan.h>
28 #define TOKEN_AGING_INTERVAL (0xFF)
30 static DEFINE_SPINLOCK(swap_token_lock);
31 struct mm_struct *swap_token_mm;
32 static struct mem_cgroup *swap_token_memcg;
34 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
35 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
37 struct mem_cgroup *memcg;
39 memcg = try_get_mem_cgroup_from_mm(mm);
41 css_put(mem_cgroup_css(memcg));
46 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
52 void grab_swap_token(struct mm_struct *mm)
55 unsigned int old_prio;
56 static unsigned int global_faults;
57 static unsigned int last_aging;
63 old_prio = mm->token_priority;
64 current_interval = global_faults - mm->faultstamp;
66 if (!spin_trylock(&swap_token_lock))
69 /* First come first served */
74 * Usually, we don't need priority aging because long interval faults
75 * makes priority decrease quickly. But there is one exception. If the
76 * token owner task is sleeping, it never make long interval faults.
77 * Thus, we need a priority aging mechanism instead. The requirements
78 * of priority aging are
79 * 1) An aging interval is reasonable enough long. Too short aging
80 * interval makes quick swap token lost and decrease performance.
81 * 2) The swap token owner task have to get priority aging even if
84 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
85 swap_token_mm->token_priority /= 2;
86 last_aging = global_faults;
89 if (mm == swap_token_mm) {
90 mm->token_priority += 2;
94 if (current_interval < mm->last_interval)
97 if (likely(mm->token_priority > 0))
101 /* Check if we deserve the token */
102 if (mm->token_priority > swap_token_mm->token_priority)
106 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
109 mm->faultstamp = global_faults;
110 mm->last_interval = current_interval;
111 spin_unlock(&swap_token_lock);
115 mm->token_priority += 2;
116 trace_replace_swap_token(swap_token_mm, mm);
118 swap_token_memcg = swap_token_memcg_from_mm(mm);
119 last_aging = global_faults;
123 /* Called on process exit. */
124 void __put_swap_token(struct mm_struct *mm)
126 spin_lock(&swap_token_lock);
127 if (likely(mm == swap_token_mm)) {
128 trace_put_swap_token(swap_token_mm);
129 swap_token_mm = NULL;
130 swap_token_memcg = NULL;
132 spin_unlock(&swap_token_lock);
135 static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
146 void disable_swap_token(struct mem_cgroup *memcg)
148 /* memcg reclaim don't disable unrelated mm token. */
149 if (match_memcg(memcg, swap_token_memcg)) {
150 spin_lock(&swap_token_lock);
151 if (match_memcg(memcg, swap_token_memcg)) {
152 trace_disable_swap_token(swap_token_mm);
153 swap_token_mm = NULL;
154 swap_token_memcg = NULL;
156 spin_unlock(&swap_token_lock);