#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm_inline.h>
-#include <linux/buffer_head.h> /* for try_to_release_page() */
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
+#include <linux/gfp.h>
#include "internal.h"
static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
/*
* This path almost never happens for VM activity - pages are normally
spin_lock_irqsave(&zone->lru_lock, flags);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru(zone, page);
+ del_page_from_lru_list(zone, page, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
- free_hot_page(page);
}
-static void put_compound_page(struct page *page)
+static void __put_single_page(struct page *page)
+{
+ __page_cache_release(page);
+ free_hot_cold_page(page, 0);
+}
+
+static void __put_compound_page(struct page *page)
{
- page = compound_head(page);
- if (put_page_testzero(page)) {
- compound_page_dtor *dtor;
+ compound_page_dtor *dtor;
+
+ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
+ (*dtor)(page);
+}
- dtor = get_compound_page_dtor(page);
- (*dtor)(page);
+static void put_compound_page(struct page *page)
+{
+ if (unlikely(PageTail(page))) {
+ /* __split_huge_page_refcount can run under us */
+ struct page *page_head = compound_trans_head(page);
+
+ if (likely(page != page_head &&
+ get_page_unless_zero(page_head))) {
+ unsigned long flags;
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+ * we obtain the lock. That is ok as long as it
+ * can't be freed from under us.
+ */
+ flags = compound_lock_irqsave(page_head);
+ if (unlikely(!PageTail(page))) {
+ /* __split_huge_page_refcount run before us */
+ compound_unlock_irqrestore(page_head, flags);
+ VM_BUG_ON(PageHead(page_head));
+ if (put_page_testzero(page_head))
+ __put_single_page(page_head);
+ out_put_single:
+ if (put_page_testzero(page))
+ __put_single_page(page);
+ return;
+ }
+ VM_BUG_ON(page_head != page->first_page);
+ /*
+ * We can release the refcount taken by
+ * get_page_unless_zero() now that
+ * __split_huge_page_refcount() is blocked on
+ * the compound_lock.
+ */
+ if (put_page_testzero(page_head))
+ VM_BUG_ON(1);
+ /* __split_huge_page_refcount will wait now */
+ VM_BUG_ON(page_mapcount(page) <= 0);
+ atomic_dec(&page->_mapcount);
+ VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+ VM_BUG_ON(atomic_read(&page->_count) != 0);
+ compound_unlock_irqrestore(page_head, flags);
+ if (put_page_testzero(page_head)) {
+ if (PageHead(page_head))
+ __put_compound_page(page_head);
+ else
+ __put_single_page(page_head);
+ }
+ } else {
+ /* page_head is a dangling pointer */
+ VM_BUG_ON(PageTail(page));
+ goto out_put_single;
+ }
+ } else if (put_page_testzero(page)) {
+ if (PageHead(page))
+ __put_compound_page(page);
+ else
+ __put_single_page(page);
}
}
if (unlikely(PageCompound(page)))
put_compound_page(page);
else if (put_page_testzero(page))
- __page_cache_release(page);
+ __put_single_page(page);
}
EXPORT_SYMBOL(put_page);
+/*
+ * This function is exported but must not be called by anything other
+ * than get_page(). It implements the slow path of get_page().
+ */
+bool __get_page_tail(struct page *page)
+{
+ /*
+ * This takes care of get_page() if run on a tail page
+ * returned by one of the get_user_pages/follow_page variants.
+ * get_user_pages/follow_page itself doesn't need the compound
+ * lock because it runs __get_page_tail_foll() under the
+ * proper PT lock that already serializes against
+ * split_huge_page().
+ */
+ unsigned long flags;
+ bool got = false;
+ struct page *page_head = compound_trans_head(page);
+
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+ * we obtain the lock. That is ok as long as it
+ * can't be freed from under us.
+ */
+ flags = compound_lock_irqsave(page_head);
+ /* here __split_huge_page_refcount won't run anymore */
+ if (likely(PageTail(page))) {
+ __get_page_tail_foll(page, false);
+ got = true;
+ }
+ compound_unlock_irqrestore(page_head, flags);
+ if (unlikely(!got))
+ put_page(page_head);
+ }
+ return got;
+}
+EXPORT_SYMBOL(__get_page_tail);
+
/**
* put_pages_list() - release a list of pages
* @pages: list of pages threaded on page->lru
}
EXPORT_SYMBOL(put_pages_list);
-/*
- * pagevec_move_tail() must be called with IRQ disabled.
- * Otherwise this may cause nasty races.
- */
-static void pagevec_move_tail(struct pagevec *pvec)
+static void pagevec_lru_move_fn(struct pagevec *pvec,
+ void (*move_fn)(struct page *page, void *arg),
+ void *arg)
{
int i;
- int pgmoved = 0;
struct zone *zone = NULL;
+ unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
if (pagezone != zone) {
if (zone)
- spin_unlock(&zone->lru_lock);
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
zone = pagezone;
- spin_lock(&zone->lru_lock);
- }
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
- int lru = page_is_file_cache(page);
- list_move_tail(&page->lru, &zone->lru[lru].list);
- pgmoved++;
+ spin_lock_irqsave(&zone->lru_lock, flags);
}
+
+ (*move_fn)(page, arg);
}
if (zone)
- spin_unlock(&zone->lru_lock);
- __count_vm_events(PGROTATED, pgmoved);
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
+static void pagevec_move_tail_fn(struct page *page, void *arg)
+{
+ int *pgmoved = arg;
+
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+ enum lru_list lru = page_lru_base_type(page);
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_lru_move_lists(page_zone(page),
+ page, lru, lru);
+ list_move_tail(&page->lru, &lruvec->lists[lru]);
+ (*pgmoved)++;
+ }
+}
+
+/*
+ * pagevec_move_tail() must be called with IRQ disabled.
+ * Otherwise this may cause nasty races.
+ */
+static void pagevec_move_tail(struct pagevec *pvec)
+{
+ int pgmoved = 0;
+
+ pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
+ __count_vm_events(PGROTATED, pgmoved);
+}
+
/*
* Writeback is about to end against a page which has been marked for immediate
* reclaim. If it still appears to be reclaimable, move it to the tail of the
* inactive list.
*/
-void rotate_reclaimable_page(struct page *page)
+void rotate_reclaimable_page(struct page *page)
{
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
!PageUnevictable(page) && PageLRU(page)) {
}
}
-/*
- * FIXME: speed this up?
- */
-void activate_page(struct page *page)
+static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+ int file, int rotated)
+{
+ struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
+ struct zone_reclaim_stat *memcg_reclaim_stat;
+
+ memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+
+ reclaim_stat->recent_scanned[file]++;
+ if (rotated)
+ reclaim_stat->recent_rotated[file]++;
+
+ if (!memcg_reclaim_stat)
+ return;
+
+ memcg_reclaim_stat->recent_scanned[file]++;
+ if (rotated)
+ memcg_reclaim_stat->recent_rotated[file]++;
+}
+
+static void __activate_page(struct page *page, void *arg)
{
struct zone *zone = page_zone(page);
- spin_lock_irq(&zone->lru_lock);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
- int lru = LRU_BASE + file;
+ int lru = page_lru_base_type(page);
del_page_from_lru_list(zone, page, lru);
SetPageActive(page);
lru += LRU_ACTIVE;
add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
- mem_cgroup_move_lists(page, lru);
- zone->recent_rotated[!!file]++;
- zone->recent_scanned[!!file]++;
+ update_page_reclaim_stat(zone, page, file, 1);
}
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+
+static void activate_page_drain(int cpu)
+{
+ struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
+
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+}
+
+void activate_page(struct page *page)
+{
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+ struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+ put_cpu_var(activate_page_pvecs);
+ }
+}
+
+#else
+static inline void activate_page_drain(int cpu)
+{
+}
+
+void activate_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ __activate_page(page, NULL);
spin_unlock_irq(&zone->lru_lock);
}
+#endif
/*
* Mark a page as having seen activity.
SetPageReferenced(page);
}
}
-
EXPORT_SYMBOL(mark_page_accessed);
void __lru_cache_add(struct page *page, enum lru_list lru)
page_cache_get(page);
if (!pagevec_add(pvec, page))
- ____pagevec_lru_add(pvec, lru);
+ __pagevec_lru_add(pvec, lru);
put_cpu_var(lru_add_pvecs);
}
+EXPORT_SYMBOL(__lru_cache_add);
/**
* lru_cache_add_lru - add a page to a page list
spin_unlock_irq(&zone->lru_lock);
}
-/**
- * lru_cache_add_active_or_unevictable
- * @page: the page to be added to LRU
- * @vma: vma in which page is mapped for determining reclaimability
+/*
+ * If the page can not be invalidated, it is moved to the
+ * inactive list to speed up its reclaim. It is moved to the
+ * head of the list, rather than the tail, to give the flusher
+ * threads some time to write it out, as this is much more
+ * effective than the single-page writeout from reclaim.
+ *
+ * If the page isn't page_mapped and dirty/writeback, the page
+ * could reclaim asap using PG_reclaim.
+ *
+ * 1. active, mapped page -> none
+ * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
+ * 3. inactive, mapped page -> none
+ * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
+ * 5. inactive, clean -> inactive, tail
+ * 6. Others -> none
*
- * place @page on active or unevictable LRU list, depending on
- * page_evictable(). Note that if the page is not evictable,
- * it goes directly back onto it's zone's unevictable list. It does
- * NOT use a per cpu pagevec.
+ * In 4, why it moves inactive's head, the VM expects the page would
+ * be write it out by flusher threads as this is much more effective
+ * than the single-page writeout from reclaim.
*/
-void lru_cache_add_active_or_unevictable(struct page *page,
- struct vm_area_struct *vma)
+static void lru_deactivate_fn(struct page *page, void *arg)
{
- if (page_evictable(page, vma))
- lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
- else
- add_page_to_unevictable_list(page);
+ int lru, file;
+ bool active;
+ struct zone *zone = page_zone(page);
+
+ if (!PageLRU(page))
+ return;
+
+ if (PageUnevictable(page))
+ return;
+
+ /* Some processes are using the page */
+ if (page_mapped(page))
+ return;
+
+ active = PageActive(page);
+
+ file = page_is_file_cache(page);
+ lru = page_lru_base_type(page);
+ del_page_from_lru_list(zone, page, lru + active);
+ ClearPageActive(page);
+ ClearPageReferenced(page);
+ add_page_to_lru_list(zone, page, lru);
+
+ if (PageWriteback(page) || PageDirty(page)) {
+ /*
+ * PG_reclaim could be raced with end_page_writeback
+ * It can make readahead confusing. But race window
+ * is _really_ small and it's non-critical problem.
+ */
+ SetPageReclaim(page);
+ } else {
+ struct lruvec *lruvec;
+ /*
+ * The page's writeback ends up during pagevec
+ * We moves tha page into tail of inactive.
+ */
+ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
+ list_move_tail(&page->lru, &lruvec->lists[lru]);
+ __count_vm_event(PGROTATED);
+ }
+
+ if (active)
+ __count_vm_event(PGDEACTIVATE);
+ update_page_reclaim_stat(zone, page, file, 0);
}
/*
* Either "cpu" is the current CPU, and preemption has already been
* disabled; or "cpu" is being hot-unplugged, and is already dead.
*/
-static void drain_cpu_pagevecs(int cpu)
+void lru_add_drain_cpu(int cpu)
{
struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
struct pagevec *pvec;
for_each_lru(lru) {
pvec = &pvecs[lru - LRU_BASE];
if (pagevec_count(pvec))
- ____pagevec_lru_add(pvec, lru);
+ __pagevec_lru_add(pvec, lru);
}
pvec = &per_cpu(lru_rotate_pvecs, cpu);
pagevec_move_tail(pvec);
local_irq_restore(flags);
}
+
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+ if (pagevec_count(pvec))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
+ activate_page_drain(cpu);
+}
+
+/**
+ * deactivate_page - forcefully deactivate a page
+ * @page: page to deactivate
+ *
+ * This function hints the VM that @page is a good reclaim candidate,
+ * for example if its invalidation fails due to the page being dirty
+ * or under writeback.
+ */
+void deactivate_page(struct page *page)
+{
+ /*
+ * In a workload with many unevictable page such as mprotect, unevictable
+ * page deactivation for accelerating reclaim is pointless.
+ */
+ if (PageUnevictable(page))
+ return;
+
+ if (likely(get_page_unless_zero(page))) {
+ struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+ put_cpu_var(lru_deactivate_pvecs);
+ }
}
void lru_add_drain(void)
{
- drain_cpu_pagevecs(get_cpu());
+ lru_add_drain_cpu(get_cpu());
put_cpu();
}
void release_pages(struct page **pages, int nr, int cold)
{
int i;
- struct pagevec pages_to_free;
+ LIST_HEAD(pages_to_free);
struct zone *zone = NULL;
unsigned long uninitialized_var(flags);
- pagevec_init(&pages_to_free, cold);
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
}
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru(zone, page);
+ del_page_from_lru_list(zone, page, page_off_lru(page));
}
- if (!pagevec_add(&pages_to_free, page)) {
- if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
- }
- __pagevec_free(&pages_to_free);
- pagevec_reinit(&pages_to_free);
- }
+ list_add(&page->lru, &pages_to_free);
}
if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags);
- pagevec_free(&pages_to_free);
+ free_hot_cold_page_list(&pages_to_free, cold);
}
+EXPORT_SYMBOL(release_pages);
/*
* The pages which we're about to release may be in the deferred lru-addition
release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
pagevec_reinit(pvec);
}
-
EXPORT_SYMBOL(__pagevec_release);
-/*
- * Add the passed pages to the LRU, then drop the caller's refcount
- * on them. Reinitialises the caller's pagevec.
- */
-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/* used by __split_huge_page_refcount() */
+void lru_add_page_tail(struct zone* zone,
+ struct page *page, struct page *page_tail)
{
- int i;
- struct zone *zone = NULL;
- VM_BUG_ON(is_unevictable_lru(lru));
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
- int file;
-
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
+ int uninitialized_var(active);
+ enum lru_list lru;
+ const int file = 0;
+
+ VM_BUG_ON(!PageHead(page));
+ VM_BUG_ON(PageCompound(page_tail));
+ VM_BUG_ON(PageLRU(page_tail));
+ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+
+ SetPageLRU(page_tail);
+
+ if (page_evictable(page_tail, NULL)) {
+ if (PageActive(page)) {
+ SetPageActive(page_tail);
+ active = 1;
+ lru = LRU_ACTIVE_ANON;
+ } else {
+ active = 0;
+ lru = LRU_INACTIVE_ANON;
}
- VM_BUG_ON(PageActive(page));
- VM_BUG_ON(PageUnevictable(page));
- VM_BUG_ON(PageLRU(page));
- SetPageLRU(page);
- file = is_file_lru(lru);
- zone->recent_scanned[file]++;
- if (is_active_lru(lru)) {
- SetPageActive(page);
- zone->recent_rotated[file]++;
- }
- add_page_to_lru_list(zone, page, lru);
+ } else {
+ SetPageUnevictable(page_tail);
+ lru = LRU_UNEVICTABLE;
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- release_pages(pvec->pages, pvec->nr, pvec->cold);
- pagevec_reinit(pvec);
-}
-EXPORT_SYMBOL(____pagevec_lru_add);
+ if (likely(PageLRU(page)))
+ list_add_tail(&page_tail->lru, &page->lru);
+ else {
+ struct list_head *list_head;
+ /*
+ * Head page has not yet been counted, as an hpage,
+ * so we must account for each subpage individually.
+ *
+ * Use the standard add function to put page_tail on the list,
+ * but then correct its position so they all end up in order.
+ */
+ add_page_to_lru_list(zone, page_tail, lru);
+ list_head = page_tail->lru.prev;
+ list_move_tail(&page_tail->lru, list_head);
+ }
-/*
- * Try to drop buffers from the pages in a pagevec
- */
-void pagevec_strip(struct pagevec *pvec)
+ if (!PageUnevictable(page))
+ update_page_reclaim_stat(zone, page_tail, file, active);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static void __pagevec_lru_add_fn(struct page *page, void *arg)
{
- int i;
+ enum lru_list lru = (enum lru_list)arg;
+ struct zone *zone = page_zone(page);
+ int file = is_file_lru(lru);
+ int active = is_active_lru(lru);
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
+ VM_BUG_ON(PageActive(page));
+ VM_BUG_ON(PageUnevictable(page));
+ VM_BUG_ON(PageLRU(page));
- if (PagePrivate(page) && trylock_page(page)) {
- if (PagePrivate(page))
- try_to_release_page(page, 0);
- unlock_page(page);
- }
- }
+ SetPageLRU(page);
+ if (active)
+ SetPageActive(page);
+ add_page_to_lru_list(zone, page, lru);
+ update_page_reclaim_stat(zone, page, file, active);
}
-/**
- * pagevec_swap_free - try to free swap space from the pages in a pagevec
- * @pvec: pagevec with swapcache pages to free the swap space of
- *
- * The caller needs to hold an extra reference to each page and
- * not hold the page lock on the pages. This function uses a
- * trylock on the page lock so it may not always free the swap
- * space associated with a page.
+/*
+ * Add the passed pages to the LRU, then drop the caller's refcount
+ * on them. Reinitialises the caller's pagevec.
*/
-void pagevec_swap_free(struct pagevec *pvec)
+void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
{
- int i;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
+ VM_BUG_ON(is_unevictable_lru(lru));
- if (PageSwapCache(page) && trylock_page(page)) {
- if (PageSwapCache(page))
- remove_exclusive_swap_page_ref(page);
- unlock_page(page);
- }
- }
+ pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
}
+EXPORT_SYMBOL(__pagevec_lru_add);
/**
* pagevec_lookup - gang pagecache lookup
pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
return pagevec_count(pvec);
}
-
EXPORT_SYMBOL(pagevec_lookup);
unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
nr_pages, pvec->pages);
return pagevec_count(pvec);
}
-
EXPORT_SYMBOL(pagevec_lookup_tag);
-#ifdef CONFIG_SMP
-/*
- * We tolerate a little inaccuracy to avoid ping-ponging the counter between
- * CPUs
- */
-#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
-
-static DEFINE_PER_CPU(long, committed_space);
-
-void vm_acct_memory(long pages)
-{
- long *local;
-
- preempt_disable();
- local = &__get_cpu_var(committed_space);
- *local += pages;
- if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
- atomic_long_add(*local, &vm_committed_space);
- *local = 0;
- }
- preempt_enable();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Drop the CPU's cached committed space back into the central pool. */
-static int cpu_swap_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- long *committed;
-
- committed = &per_cpu(committed_space, (long)hcpu);
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- atomic_long_add(*committed, &vm_committed_space);
- *committed = 0;
- drain_cpu_pagevecs((long)hcpu);
- }
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-#endif /* CONFIG_SMP */
-
/*
* Perform any setup for the swap system
*/
void __init swap_setup(void)
{
- unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+ unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
#ifdef CONFIG_SWAP
bdi_init(swapper_space.backing_dev_info);
* Right now other parts of the system means that we
* _really_ don't want to cluster much more
*/
-#ifdef CONFIG_HOTPLUG_CPU
- hotcpu_notifier(cpu_swap_callback, 0);
-#endif
}