drm/radeon: disable MSI on RV515
[linux-flexiantxendom0.git] / mm / swap.c
index e304504..55b266d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -21,7 +21,7 @@
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/mm_inline.h>
 #include <linux/buffer_head.h> /* for try_to_release_page() */
 #include <linux/percpu_counter.h>
 #include <linux/notifier.h>
 #include <linux/backing-dev.h>
 #include <linux/memcontrol.h>
+#include <linux/gfp.h>
+
+#include "internal.h"
 
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
 static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
 
 /*
  * This path almost never happens for VM activity - pages are normally
@@ -53,17 +57,81 @@ static void __page_cache_release(struct page *page)
                del_page_from_lru(zone, page);
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
-       free_hot_page(page);
 }
 
-static void put_compound_page(struct page *page)
+static void __put_single_page(struct page *page)
+{
+       __page_cache_release(page);
+       free_hot_cold_page(page, 0);
+}
+
+static void __put_compound_page(struct page *page)
 {
-       page = compound_head(page);
-       if (put_page_testzero(page)) {
-               compound_page_dtor *dtor;
+       compound_page_dtor *dtor;
+
+       __page_cache_release(page);
+       dtor = get_compound_page_dtor(page);
+       (*dtor)(page);
+}
 
-               dtor = get_compound_page_dtor(page);
-               (*dtor)(page);
+static void put_compound_page(struct page *page)
+{
+       if (unlikely(PageTail(page))) {
+               /* __split_huge_page_refcount can run under us */
+               struct page *page_head = compound_trans_head(page);
+
+               if (likely(page != page_head &&
+                          get_page_unless_zero(page_head))) {
+                       unsigned long flags;
+                       /*
+                        * page_head wasn't a dangling pointer but it
+                        * may not be a head page anymore by the time
+                        * we obtain the lock. That is ok as long as it
+                        * can't be freed from under us.
+                        */
+                       flags = compound_lock_irqsave(page_head);
+                       if (unlikely(!PageTail(page))) {
+                               /* __split_huge_page_refcount run before us */
+                               compound_unlock_irqrestore(page_head, flags);
+                               VM_BUG_ON(PageHead(page_head));
+                               if (put_page_testzero(page_head))
+                                       __put_single_page(page_head);
+                       out_put_single:
+                               if (put_page_testzero(page))
+                                       __put_single_page(page);
+                               return;
+                       }
+                       VM_BUG_ON(page_head != page->first_page);
+                       /*
+                        * We can release the refcount taken by
+                        * get_page_unless_zero() now that
+                        * __split_huge_page_refcount() is blocked on
+                        * the compound_lock.
+                        */
+                       if (put_page_testzero(page_head))
+                               VM_BUG_ON(1);
+                       /* __split_huge_page_refcount will wait now */
+                       VM_BUG_ON(page_mapcount(page) <= 0);
+                       atomic_dec(&page->_mapcount);
+                       VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+                       VM_BUG_ON(atomic_read(&page->_count) != 0);
+                       compound_unlock_irqrestore(page_head, flags);
+                       if (put_page_testzero(page_head)) {
+                               if (PageHead(page_head))
+                                       __put_compound_page(page_head);
+                               else
+                                       __put_single_page(page_head);
+                       }
+               } else {
+                       /* page_head is a dangling pointer */
+                       VM_BUG_ON(PageTail(page));
+                       goto out_put_single;
+               }
+       } else if (put_page_testzero(page)) {
+               if (PageHead(page))
+                       __put_compound_page(page);
+               else
+                       __put_single_page(page);
        }
 }
 
@@ -72,10 +140,49 @@ void put_page(struct page *page)
        if (unlikely(PageCompound(page)))
                put_compound_page(page);
        else if (put_page_testzero(page))
-               __page_cache_release(page);
+               __put_single_page(page);
 }
 EXPORT_SYMBOL(put_page);
 
+/*
+ * This function is exported but must not be called by anything other
+ * than get_page(). It implements the slow path of get_page().
+ */
+bool __get_page_tail(struct page *page)
+{
+       /*
+        * This takes care of get_page() if run on a tail page
+        * returned by one of the get_user_pages/follow_page variants.
+        * get_user_pages/follow_page itself doesn't need the compound
+        * lock because it runs __get_page_tail_foll() under the
+        * proper PT lock that already serializes against
+        * split_huge_page().
+        */
+       unsigned long flags;
+       bool got = false;
+       struct page *page_head = compound_trans_head(page);
+
+       if (likely(page != page_head && get_page_unless_zero(page_head))) {
+               /*
+                * page_head wasn't a dangling pointer but it
+                * may not be a head page anymore by the time
+                * we obtain the lock. That is ok as long as it
+                * can't be freed from under us.
+                */
+               flags = compound_lock_irqsave(page_head);
+               /* here __split_huge_page_refcount won't run anymore */
+               if (likely(PageTail(page))) {
+                       __get_page_tail_foll(page, false);
+                       got = true;
+               }
+               compound_unlock_irqrestore(page_head, flags);
+               if (unlikely(!got))
+                       put_page(page_head);
+       }
+       return got;
+}
+EXPORT_SYMBOL(__get_page_tail);
+
 /**
  * put_pages_list() - release a list of pages
  * @pages: list of pages threaded on page->lru
@@ -95,15 +202,13 @@ void put_pages_list(struct list_head *pages)
 }
 EXPORT_SYMBOL(put_pages_list);
 
-/*
- * pagevec_move_tail() must be called with IRQ disabled.
- * Otherwise this may cause nasty races.
- */
-static void pagevec_move_tail(struct pagevec *pvec)
+static void pagevec_lru_move_fn(struct pagevec *pvec,
+                               void (*move_fn)(struct page *page, void *arg),
+                               void *arg)
 {
        int i;
-       int pgmoved = 0;
        struct zone *zone = NULL;
+       unsigned long flags = 0;
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
@@ -111,31 +216,53 @@ static void pagevec_move_tail(struct pagevec *pvec)
 
                if (pagezone != zone) {
                        if (zone)
-                               spin_unlock(&zone->lru_lock);
+                               spin_unlock_irqrestore(&zone->lru_lock, flags);
                        zone = pagezone;
-                       spin_lock(&zone->lru_lock);
-               }
-               if (PageLRU(page) && !PageActive(page)) {
-                       list_move_tail(&page->lru, &zone->lru[LRU_INACTIVE].list);
-                       pgmoved++;
+                       spin_lock_irqsave(&zone->lru_lock, flags);
                }
+
+               (*move_fn)(page, arg);
        }
        if (zone)
-               spin_unlock(&zone->lru_lock);
-       __count_vm_events(PGROTATED, pgmoved);
+               spin_unlock_irqrestore(&zone->lru_lock, flags);
        release_pages(pvec->pages, pvec->nr, pvec->cold);
        pagevec_reinit(pvec);
 }
 
+static void pagevec_move_tail_fn(struct page *page, void *arg)
+{
+       int *pgmoved = arg;
+       struct zone *zone = page_zone(page);
+
+       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+               enum lru_list lru = page_lru_base_type(page);
+               list_move_tail(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_rotate_reclaimable_page(page);
+               (*pgmoved)++;
+       }
+}
+
+/*
+ * pagevec_move_tail() must be called with IRQ disabled.
+ * Otherwise this may cause nasty races.
+ */
+static void pagevec_move_tail(struct pagevec *pvec)
+{
+       int pgmoved = 0;
+
+       pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
+       __count_vm_events(PGROTATED, pgmoved);
+}
+
 /*
  * Writeback is about to end against a page which has been marked for immediate
  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
  * inactive list.
  */
-void  rotate_reclaimable_page(struct page *page)
+void rotate_reclaimable_page(struct page *page)
 {
        if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
-           PageLRU(page)) {
+           !PageUnevictable(page) && PageLRU(page)) {
                struct pagevec *pvec;
                unsigned long flags;
 
@@ -148,23 +275,81 @@ void  rotate_reclaimable_page(struct page *page)
        }
 }
 
-/*
- * FIXME: speed this up?
- */
-void activate_page(struct page *page)
+static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+                                    int file, int rotated)
+{
+       struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
+       struct zone_reclaim_stat *memcg_reclaim_stat;
+
+       memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+
+       reclaim_stat->recent_scanned[file]++;
+       if (rotated)
+               reclaim_stat->recent_rotated[file]++;
+
+       if (!memcg_reclaim_stat)
+               return;
+
+       memcg_reclaim_stat->recent_scanned[file]++;
+       if (rotated)
+               memcg_reclaim_stat->recent_rotated[file]++;
+}
+
+static void __activate_page(struct page *page, void *arg)
 {
        struct zone *zone = page_zone(page);
 
-       spin_lock_irq(&zone->lru_lock);
-       if (PageLRU(page) && !PageActive(page)) {
-               del_page_from_inactive_list(zone, page);
+       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+               int file = page_is_file_cache(page);
+               int lru = page_lru_base_type(page);
+               del_page_from_lru_list(zone, page, lru);
+
                SetPageActive(page);
-               add_page_to_active_list(zone, page);
+               lru += LRU_ACTIVE;
+               add_page_to_lru_list(zone, page, lru);
                __count_vm_event(PGACTIVATE);
-               mem_cgroup_move_lists(page, true);
+
+               update_page_reclaim_stat(zone, page, file, 1);
+       }
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+
+static void activate_page_drain(int cpu)
+{
+       struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
+
+       if (pagevec_count(pvec))
+               pagevec_lru_move_fn(pvec, __activate_page, NULL);
+}
+
+void activate_page(struct page *page)
+{
+       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+               struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+
+               page_cache_get(page);
+               if (!pagevec_add(pvec, page))
+                       pagevec_lru_move_fn(pvec, __activate_page, NULL);
+               put_cpu_var(activate_page_pvecs);
        }
+}
+
+#else
+static inline void activate_page_drain(int cpu)
+{
+}
+
+void activate_page(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+
+       spin_lock_irq(&zone->lru_lock);
+       __activate_page(page, NULL);
        spin_unlock_irq(&zone->lru_lock);
 }
+#endif
 
 /*
  * Mark a page as having seen activity.
@@ -175,7 +360,8 @@ void activate_page(struct page *page)
  */
 void mark_page_accessed(struct page *page)
 {
-       if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
+       if (!PageActive(page) && !PageUnevictable(page) &&
+                       PageReferenced(page) && PageLRU(page)) {
                activate_page(page);
                ClearPageReferenced(page);
        } else if (!PageReferenced(page)) {
@@ -194,6 +380,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
                ____pagevec_lru_add(pvec, lru);
        put_cpu_var(lru_add_pvecs);
 }
+EXPORT_SYMBOL(__lru_cache_add);
 
 /**
  * lru_cache_add_lru - add a page to a page list
@@ -203,13 +390,106 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
 void lru_cache_add_lru(struct page *page, enum lru_list lru)
 {
        if (PageActive(page)) {
+               VM_BUG_ON(PageUnevictable(page));
                ClearPageActive(page);
+       } else if (PageUnevictable(page)) {
+               VM_BUG_ON(PageActive(page));
+               ClearPageUnevictable(page);
        }
 
-       VM_BUG_ON(PageLRU(page) || PageActive(page));
+       VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
        __lru_cache_add(page, lru);
 }
 
+/**
+ * add_page_to_unevictable_list - add a page to the unevictable list
+ * @page:  the page to be added to the unevictable list
+ *
+ * Add page directly to its zone's unevictable list.  To avoid races with
+ * tasks that might be making the page evictable, through eg. munlock,
+ * munmap or exit, while it's not on the lru, we want to add the page
+ * while it's locked or otherwise "invisible" to other tasks.  This is
+ * difficult to do when using the pagevec cache, so bypass that.
+ */
+void add_page_to_unevictable_list(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+
+       spin_lock_irq(&zone->lru_lock);
+       SetPageUnevictable(page);
+       SetPageLRU(page);
+       add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+       spin_unlock_irq(&zone->lru_lock);
+}
+
+/*
+ * If the page can not be invalidated, it is moved to the
+ * inactive list to speed up its reclaim.  It is moved to the
+ * head of the list, rather than the tail, to give the flusher
+ * threads some time to write it out, as this is much more
+ * effective than the single-page writeout from reclaim.
+ *
+ * If the page isn't page_mapped and dirty/writeback, the page
+ * could reclaim asap using PG_reclaim.
+ *
+ * 1. active, mapped page -> none
+ * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
+ * 3. inactive, mapped page -> none
+ * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
+ * 5. inactive, clean -> inactive, tail
+ * 6. Others -> none
+ *
+ * In 4, why it moves inactive's head, the VM expects the page would
+ * be write it out by flusher threads as this is much more effective
+ * than the single-page writeout from reclaim.
+ */
+static void lru_deactivate_fn(struct page *page, void *arg)
+{
+       int lru, file;
+       bool active;
+       struct zone *zone = page_zone(page);
+
+       if (!PageLRU(page))
+               return;
+
+       if (PageUnevictable(page))
+               return;
+
+       /* Some processes are using the page */
+       if (page_mapped(page))
+               return;
+
+       active = PageActive(page);
+
+       file = page_is_file_cache(page);
+       lru = page_lru_base_type(page);
+       del_page_from_lru_list(zone, page, lru + active);
+       ClearPageActive(page);
+       ClearPageReferenced(page);
+       add_page_to_lru_list(zone, page, lru);
+
+       if (PageWriteback(page) || PageDirty(page)) {
+               /*
+                * PG_reclaim could be raced with end_page_writeback
+                * It can make readahead confusing.  But race window
+                * is _really_ small and  it's non-critical problem.
+                */
+               SetPageReclaim(page);
+       } else {
+               /*
+                * The page's writeback ends up during pagevec
+                * We moves tha page into tail of inactive.
+                */
+               list_move_tail(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_rotate_reclaimable_page(page);
+               __count_vm_event(PGROTATED);
+       }
+
+       if (active)
+               __count_vm_event(PGDEACTIVATE);
+       update_page_reclaim_stat(zone, page, file, 0);
+}
+
 /*
  * Drain pages out of the cpu's pagevecs.
  * Either "cpu" is the current CPU, and preemption has already been
@@ -236,6 +516,38 @@ static void drain_cpu_pagevecs(int cpu)
                pagevec_move_tail(pvec);
                local_irq_restore(flags);
        }
+
+       pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+       if (pagevec_count(pvec))
+               pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+
+       activate_page_drain(cpu);
+}
+
+/**
+ * deactivate_page - forcefully deactivate a page
+ * @page: page to deactivate
+ *
+ * This function hints the VM that @page is a good reclaim candidate,
+ * for example if its invalidation fails due to the page being dirty
+ * or under writeback.
+ */
+void deactivate_page(struct page *page)
+{
+       /*
+        * In a workload with many unevictable page such as mprotect, unevictable
+        * page deactivation for accelerating reclaim is pointless.
+        */
+       if (PageUnevictable(page))
+               return;
+
+       if (likely(get_page_unless_zero(page))) {
+               struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
+
+               if (!pagevec_add(pvec, page))
+                       pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+               put_cpu_var(lru_deactivate_pvecs);
+       }
 }
 
 void lru_add_drain(void)
@@ -244,7 +556,6 @@ void lru_add_drain(void)
        put_cpu();
 }
 
-#ifdef CONFIG_NUMA
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
@@ -258,18 +569,6 @@ int lru_add_drain_all(void)
        return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
-#else
-
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
-{
-       lru_add_drain();
-       return 0;
-}
-#endif
-
 /*
  * Batched page_cache_release().  Decrement the reference count on all the
  * passed pages.  If it fell to zero then remove the page from the LRU and
@@ -308,6 +607,7 @@ void release_pages(struct page **pages, int nr, int cold)
 
                if (PageLRU(page)) {
                        struct zone *pagezone = page_zone(page);
+
                        if (pagezone != zone) {
                                if (zone)
                                        spin_unlock_irqrestore(&zone->lru_lock,
@@ -334,6 +634,7 @@ void release_pages(struct page **pages, int nr, int cold)
 
        pagevec_free(&pages_to_free);
 }
+EXPORT_SYMBOL(release_pages);
 
 /*
  * The pages which we're about to release may be in the deferred lru-addition
@@ -354,26 +655,59 @@ void __pagevec_release(struct pagevec *pvec)
 
 EXPORT_SYMBOL(__pagevec_release);
 
-/*
- * pagevec_release() for pages which are known to not be on the LRU
- *
- * This function reinitialises the caller's pagevec.
- */
-void __pagevec_release_nonlru(struct pagevec *pvec)
+/* used by __split_huge_page_refcount() */
+void lru_add_page_tail(struct zone* zone,
+                      struct page *page, struct page *page_tail)
 {
-       int i;
-       struct pagevec pages_to_free;
+       int active;
+       enum lru_list lru;
+       const int file = 0;
+       struct list_head *head;
+
+       VM_BUG_ON(!PageHead(page));
+       VM_BUG_ON(PageCompound(page_tail));
+       VM_BUG_ON(PageLRU(page_tail));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+
+       SetPageLRU(page_tail);
+
+       if (page_evictable(page_tail, NULL)) {
+               if (PageActive(page)) {
+                       SetPageActive(page_tail);
+                       active = 1;
+                       lru = LRU_ACTIVE_ANON;
+               } else {
+                       active = 0;
+                       lru = LRU_INACTIVE_ANON;
+               }
+               update_page_reclaim_stat(zone, page_tail, file, active);
+               if (likely(PageLRU(page)))
+                       head = page->lru.prev;
+               else
+                       head = &zone->lru[lru].list;
+               __add_page_to_lru_list(zone, page_tail, lru, head);
+       } else {
+               SetPageUnevictable(page_tail);
+               add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
+       }
+}
 
-       pagevec_init(&pages_to_free, pvec->cold);
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
+static void ____pagevec_lru_add_fn(struct page *page, void *arg)
+{
+       enum lru_list lru = (enum lru_list)arg;
+       struct zone *zone = page_zone(page);
+       int file = is_file_lru(lru);
+       int active = is_active_lru(lru);
 
-               VM_BUG_ON(PageLRU(page));
-               if (put_page_testzero(page))
-                       pagevec_add(&pages_to_free, page);
-       }
-       pagevec_free(&pages_to_free);
-       pagevec_reinit(pvec);
+       VM_BUG_ON(PageActive(page));
+       VM_BUG_ON(PageUnevictable(page));
+       VM_BUG_ON(PageLRU(page));
+
+       SetPageLRU(page);
+       if (active)
+               SetPageActive(page);
+       update_page_reclaim_stat(zone, page, file, active);
+       add_page_to_lru_list(zone, page, lru);
 }
 
 /*
@@ -382,29 +716,9 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
  */
 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 {
-       int i;
-       struct zone *zone = NULL;
+       VM_BUG_ON(is_unevictable_lru(lru));
 
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
-               struct zone *pagezone = page_zone(page);
-
-               if (pagezone != zone) {
-                       if (zone)
-                               spin_unlock_irq(&zone->lru_lock);
-                       zone = pagezone;
-                       spin_lock_irq(&zone->lru_lock);
-               }
-               VM_BUG_ON(PageLRU(page));
-               SetPageLRU(page);
-               if (is_active_lru(lru))
-                       SetPageActive(page);
-               add_page_to_lru_list(zone, page, lru);
-       }
-       if (zone)
-               spin_unlock_irq(&zone->lru_lock);
-       release_pages(pvec->pages, pvec->nr, pvec->cold);
-       pagevec_reinit(pvec);
+       pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
 }
 
 EXPORT_SYMBOL(____pagevec_lru_add);
@@ -419,8 +733,8 @@ void pagevec_strip(struct pagevec *pvec)
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
 
-               if (PagePrivate(page) && trylock_page(page)) {
-                       if (PagePrivate(page))
+               if (page_has_private(page) && trylock_page(page)) {
+                       if (page_has_private(page))
                                try_to_release_page(page, 0);
                        unlock_page(page);
                }
@@ -462,55 +776,12 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
 
 EXPORT_SYMBOL(pagevec_lookup_tag);
 
-#ifdef CONFIG_SMP
-/*
- * We tolerate a little inaccuracy to avoid ping-ponging the counter between
- * CPUs
- */
-#define ACCT_THRESHOLD max(16, NR_CPUS * 2)
-
-static DEFINE_PER_CPU(long, committed_space);
-
-void vm_acct_memory(long pages)
-{
-       long *local;
-
-       preempt_disable();
-       local = &__get_cpu_var(committed_space);
-       *local += pages;
-       if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) {
-               atomic_long_add(*local, &vm_committed_space);
-               *local = 0;
-       }
-       preempt_enable();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Drop the CPU's cached committed space back into the central pool. */
-static int cpu_swap_callback(struct notifier_block *nfb,
-                            unsigned long action,
-                            void *hcpu)
-{
-       long *committed;
-
-       committed = &per_cpu(committed_space, (long)hcpu);
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               atomic_long_add(*committed, &vm_committed_space);
-               *committed = 0;
-               drain_cpu_pagevecs((long)hcpu);
-       }
-       return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-#endif /* CONFIG_SMP */
-
 /*
  * Perform any setup for the swap system
  */
 void __init swap_setup(void)
 {
-       unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
+       unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
 
 #ifdef CONFIG_SWAP
        bdi_init(swapper_space.backing_dev_info);
@@ -525,7 +796,4 @@ void __init swap_setup(void)
         * Right now other parts of the system means that we
         * _really_ don't want to cluster much more
         */
-#ifdef CONFIG_HOTPLUG_CPU
-       hotcpu_notifier(cpu_swap_callback, 0);
-#endif
 }