vmscan: fix a livelock in kswapd
[linux-flexiantxendom0-natty.git] / mm / vmscan.c
index 0465818..1e0eefe 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/gfp.h>
 #include <linux/kernel_stat.h>
 #include <linux/swap.h>
 #include <linux/pagemap.h>
@@ -32,6 +32,7 @@
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/compaction.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 #include <linux/delay.h>
@@ -40,6 +41,7 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
+#include <linux/oom.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
 
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/vmscan.h>
+
+/*
+ * reclaim_mode determines how the inactive list is shrunk
+ * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
+ * RECLAIM_MODE_ASYNC:  Do not block
+ * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
+ * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
+ *                     page from the LRU and reclaim all pages within a
+ *                     naturally aligned range
+ * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
+ *                     order-0 pages and then compact the zone
+ */
+typedef unsigned __bitwise__ reclaim_mode_t;
+#define RECLAIM_MODE_SINGLE            ((__force reclaim_mode_t)0x01u)
+#define RECLAIM_MODE_ASYNC             ((__force reclaim_mode_t)0x02u)
+#define RECLAIM_MODE_SYNC              ((__force reclaim_mode_t)0x04u)
+#define RECLAIM_MODE_LUMPYRECLAIM      ((__force reclaim_mode_t)0x08u)
+#define RECLAIM_MODE_COMPACTION                ((__force reclaim_mode_t)0x10u)
+
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -73,10 +96,14 @@ struct scan_control {
 
        int swappiness;
 
-       int all_unreclaimable;
-
        int order;
 
+       /*
+        * Intend to reclaim enough continuous memory rather than reclaim
+        * enough amount of memory. i.e, mode for high order allocation.
+        */
+       reclaim_mode_t reclaim_mode;
+
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
@@ -85,12 +112,6 @@ struct scan_control {
         * are scanned.
         */
        nodemask_t      *nodemask;
-
-       /* Pluggable isolate pages callback */
-       unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
-                       unsigned long *scanned, int order, int mode,
-                       struct zone *z, struct mem_cgroup *mem_cont,
-                       int active, int file);
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -209,14 +230,18 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        if (scanned == 0)
                scanned = SWAP_CLUSTER_MAX;
 
-       if (!down_read_trylock(&shrinker_rwsem))
-               return 1;       /* Assume we'll be able to shrink next time */
+       if (!down_read_trylock(&shrinker_rwsem)) {
+               /* Assume we'll be able to shrink next time */
+               ret = 1;
+               goto out;
+       }
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
-               unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
+               unsigned long max_pass;
 
+               max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
                delta = (4 * scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -244,8 +269,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrink)(0, gfp_mask);
-                       shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
+                       nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
+                       shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
+                                                               gfp_mask);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -259,28 +285,42 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                shrinker->nr += total_scan;
        }
        up_read(&shrinker_rwsem);
+out:
+       cond_resched();
        return ret;
 }
 
-/* Called without lock on whether page is mapped, so answer is unstable */
-static inline int page_mapping_inuse(struct page *page)
+static void set_reclaim_mode(int priority, struct scan_control *sc,
+                                  bool sync)
 {
-       struct address_space *mapping;
+       reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
 
-       /* Page is in somebody's page tables. */
-       if (page_mapped(page))
-               return 1;
-
-       /* Be more reluctant to reclaim swapcache than pagecache */
-       if (PageSwapCache(page))
-               return 1;
+       /*
+        * Initially assume we are entering either lumpy reclaim or
+        * reclaim/compaction.Depending on the order, we will either set the
+        * sync mode or just reclaim order-0 pages later.
+        */
+       if (COMPACTION_BUILD)
+               sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
+       else
+               sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
 
-       mapping = page_mapping(page);
-       if (!mapping)
-               return 0;
+       /*
+        * Avoid using lumpy reclaim or reclaim/compaction if possible by
+        * restricting when its set to either costly allocations or when
+        * under memory pressure
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               sc->reclaim_mode |= syncmode;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               sc->reclaim_mode |= syncmode;
+       else
+               sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
+}
 
-       /* File is mmap'd by somebody? */
-       return mapping_mapped(mapping);
+static void reset_reclaim_mode(struct scan_control *sc)
+{
+       sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
 static inline int is_page_cache_freeable(struct page *page)
@@ -293,7 +333,8 @@ static inline int is_page_cache_freeable(struct page *page)
        return page_count(page) - page_has_private(page) == 2;
 }
 
-static int may_write_to_queue(struct backing_dev_info *bdi)
+static int may_write_to_queue(struct backing_dev_info *bdi,
+                             struct scan_control *sc)
 {
        if (current->flags & PF_SWAPWRITE)
                return 1;
@@ -301,6 +342,10 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
                return 1;
        if (bdi == current->backing_dev_info)
                return 1;
+
+       /* lumpy reclaim for hugepage often need a lot of write */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               return 1;
        return 0;
 }
 
@@ -319,18 +364,12 @@ static int may_write_to_queue(struct backing_dev_info *bdi)
 static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
-       lock_page(page);
+       lock_page_nosync(page);
        if (page_mapping(page) == mapping)
                mapping_set_error(mapping, error);
        unlock_page(page);
 }
 
-/* Request for sync pageout. */
-enum pageout_io {
-       PAGEOUT_IO_ASYNC,
-       PAGEOUT_IO_SYNC,
-};
-
 /* possible outcome of pageout() */
 typedef enum {
        /* failed to write page out, page is locked */
@@ -348,7 +387,7 @@ typedef enum {
  * Calls ->writepage().
  */
 static pageout_t pageout(struct page *page, struct address_space *mapping,
-                                               enum pageout_io sync_writeback)
+                        struct scan_control *sc)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -384,7 +423,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
        }
        if (mapping->a_ops->writepage == NULL)
                return PAGE_ACTIVATE;
-       if (!may_write_to_queue(mapping->backing_dev_info))
+       if (!may_write_to_queue(mapping->backing_dev_info, sc))
                return PAGE_KEEP;
 
        if (clear_page_dirty_for_io(page)) {
@@ -394,7 +433,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                        .nr_to_write = SWAP_CLUSTER_MAX,
                        .range_start = 0,
                        .range_end = LLONG_MAX,
-                       .nonblocking = 1,
                        .for_reclaim = 1,
                };
 
@@ -412,13 +450,16 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * direct reclaiming a large contiguous area and the
                 * first attempt to free a range of pages fails.
                 */
-               if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+               if (PageWriteback(page) &&
+                   (sc->reclaim_mode & RECLAIM_MODE_SYNC))
                        wait_on_page_writeback(page);
 
                if (!PageWriteback(page)) {
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
+               trace_mm_vmscan_writepage(page,
+                       trace_reclaim_flags(page, sc->reclaim_mode));
                inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
@@ -475,9 +516,16 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                spin_unlock_irq(&mapping->tree_lock);
                swapcache_free(swap, page);
        } else {
+               void (*freepage)(struct page *);
+
+               freepage = mapping->a_ops->freepage;
+
                __remove_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
                mem_cgroup_uncharge_cache_page(page);
+
+               if (freepage != NULL)
+                       freepage(page);
        }
 
        return 1;
@@ -579,27 +627,104 @@ redo:
        put_page(page);         /* drop ref from isolate */
 }
 
+enum page_references {
+       PAGEREF_RECLAIM,
+       PAGEREF_RECLAIM_CLEAN,
+       PAGEREF_KEEP,
+       PAGEREF_ACTIVATE,
+};
+
+static enum page_references page_check_references(struct page *page,
+                                                 struct scan_control *sc)
+{
+       int referenced_ptes, referenced_page;
+       unsigned long vm_flags;
+
+       referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
+       referenced_page = TestClearPageReferenced(page);
+
+       /* Lumpy reclaim - ignore references */
+       if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
+               return PAGEREF_RECLAIM;
+
+       /*
+        * Mlock lost the isolation race with us.  Let try_to_unmap()
+        * move the page to the unevictable list.
+        */
+       if (vm_flags & VM_LOCKED)
+               return PAGEREF_RECLAIM;
+
+       if (referenced_ptes) {
+               if (PageAnon(page))
+                       return PAGEREF_ACTIVATE;
+               /*
+                * All mapped pages start out with page table
+                * references from the instantiating fault, so we need
+                * to look twice if a mapped file page is used more
+                * than once.
+                *
+                * Mark it and spare it for another trip around the
+                * inactive list.  Another page table reference will
+                * lead to its activation.
+                *
+                * Note: the mark is set for activated pages as well
+                * so that recently deactivated but used pages are
+                * quickly recovered.
+                */
+               SetPageReferenced(page);
+
+               if (referenced_page)
+                       return PAGEREF_ACTIVATE;
+
+               return PAGEREF_KEEP;
+       }
+
+       /* Reclaim if clean, defer dirty pages to writeback */
+       if (referenced_page && !PageSwapBacked(page))
+               return PAGEREF_RECLAIM_CLEAN;
+
+       return PAGEREF_RECLAIM;
+}
+
+static noinline_for_stack void free_page_list(struct list_head *free_pages)
+{
+       struct pagevec freed_pvec;
+       struct page *page, *tmp;
+
+       pagevec_init(&freed_pvec, 1);
+
+       list_for_each_entry_safe(page, tmp, free_pages, lru) {
+               list_del(&page->lru);
+               if (!pagevec_add(&freed_pvec, page)) {
+                       __pagevec_free(&freed_pvec);
+                       pagevec_reinit(&freed_pvec);
+               }
+       }
+
+       pagevec_free(&freed_pvec);
+}
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                       struct scan_control *sc,
-                                       enum pageout_io sync_writeback)
+                                     struct zone *zone,
+                                     struct scan_control *sc)
 {
        LIST_HEAD(ret_pages);
-       struct pagevec freed_pvec;
+       LIST_HEAD(free_pages);
        int pgactivate = 0;
+       unsigned long nr_dirty = 0;
+       unsigned long nr_congested = 0;
        unsigned long nr_reclaimed = 0;
-       unsigned long vm_flags;
 
        cond_resched();
 
-       pagevec_init(&freed_pvec, 1);
        while (!list_empty(page_list)) {
+               enum page_references references;
                struct address_space *mapping;
                struct page *page;
                int may_enter_fs;
-               int referenced;
 
                cond_resched();
 
@@ -610,6 +735,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
+               VM_BUG_ON(page_zone(page) != zone);
 
                sc->nr_scanned++;
 
@@ -635,23 +761,25 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         * for any page for which writeback has already
                         * started.
                         */
-                       if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+                       if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
+                           may_enter_fs)
                                wait_on_page_writeback(page);
-                       else
-                               goto keep_locked;
+                       else {
+                               unlock_page(page);
+                               goto keep_lumpy;
+                       }
                }
 
-               referenced = page_referenced(page, 1,
-                                               sc->mem_cgroup, &vm_flags);
-               /*
-                * In active use or really unfreeable?  Activate it.
-                * If page which have PG_mlocked lost isoltation race,
-                * try_to_unmap moves it to unevictable list
-                */
-               if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
-                                       referenced && page_mapping_inuse(page)
-                                       && !(vm_flags & VM_LOCKED))
+               references = page_check_references(page, sc);
+               switch (references) {
+               case PAGEREF_ACTIVATE:
                        goto activate_locked;
+               case PAGEREF_KEEP:
+                       goto keep_locked;
+               case PAGEREF_RECLAIM:
+               case PAGEREF_RECLAIM_CLEAN:
+                       ; /* try to reclaim the page below */
+               }
 
                /*
                 * Anonymous process memory has backing store?
@@ -685,7 +813,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                }
 
                if (PageDirty(page)) {
-                       if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
+                       nr_dirty++;
+
+                       if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
                        if (!may_enter_fs)
                                goto keep_locked;
@@ -693,14 +823,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
 
                        /* Page is dirty, try to write it out here */
-                       switch (pageout(page, mapping, sync_writeback)) {
+                       switch (pageout(page, mapping, sc)) {
                        case PAGE_KEEP:
+                               nr_congested++;
                                goto keep_locked;
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               if (PageWriteback(page) || PageDirty(page))
+                               if (PageWriteback(page))
+                                       goto keep_lumpy;
+                               if (PageDirty(page))
                                        goto keep;
+
                                /*
                                 * A synchronous write - probably a ramdisk.  Go
                                 * ahead and try to reclaim the page.
@@ -770,10 +904,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                __clear_page_locked(page);
 free_it:
                nr_reclaimed++;
-               if (!pagevec_add(&freed_pvec, page)) {
-                       __pagevec_free(&freed_pvec);
-                       pagevec_reinit(&freed_pvec);
-               }
+
+               /*
+                * Is there need to periodically free_page_list? It would
+                * appear not as the counts should be low
+                */
+               list_add(&page->lru, &free_pages);
                continue;
 
 cull_mlocked:
@@ -781,6 +917,7 @@ cull_mlocked:
                        try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
+               reset_reclaim_mode(sc);
                continue;
 
 activate_locked:
@@ -793,21 +930,28 @@ activate_locked:
 keep_locked:
                unlock_page(page);
 keep:
+               reset_reclaim_mode(sc);
+keep_lumpy:
                list_add(&page->lru, &ret_pages);
                VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
+
+       /*
+        * Tag a zone as congested if all the dirty pages encountered were
+        * backed by a congested BDI. In this case, reclaimers should just
+        * back off and wait for congestion to clear because further reclaim
+        * will encounter the same problem
+        */
+       if (nr_dirty == nr_congested && nr_dirty != 0)
+               zone_set_flag(zone, ZONE_CONGESTED);
+
+       free_page_list(&free_pages);
+
        list_splice(&ret_pages, page_list);
-       if (pagevec_count(&freed_pvec))
-               __pagevec_free(&freed_pvec);
        count_vm_events(PGACTIVATE, pgactivate);
        return nr_reclaimed;
 }
 
-/* LRU Isolation modes. */
-#define ISOLATE_INACTIVE 0     /* Isolate inactive pages. */
-#define ISOLATE_ACTIVE 1       /* Isolate active pages. */
-#define ISOLATE_BOTH 2         /* Isolate both active and inactive pages. */
-
 /*
  * Attempt to remove the specified page from its LRU.  Only take this page
  * if it is of the appropriate PageActive status.  Pages which are being
@@ -885,6 +1029,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                unsigned long *scanned, int order, int mode, int file)
 {
        unsigned long nr_taken = 0;
+       unsigned long nr_lumpy_taken = 0;
+       unsigned long nr_lumpy_dirty = 0;
+       unsigned long nr_lumpy_failed = 0;
        unsigned long scan;
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
@@ -903,7 +1050,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                case 0:
                        list_move(&page->lru, dst);
                        mem_cgroup_del_lru(page);
-                       nr_taken++;
+                       nr_taken += hpage_nr_pages(page);
                        break;
 
                case -EBUSY:
@@ -947,7 +1094,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
-                               continue;
+                               break;
 
                        /*
                         * If we don't have enough swap space, reclaiming of
@@ -955,19 +1102,37 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                         * pointless.
                         */
                        if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
-                                       !PageSwapCache(cursor_page))
-                               continue;
+                           !PageSwapCache(cursor_page))
+                               break;
 
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
                                mem_cgroup_del_lru(cursor_page);
-                               nr_taken++;
+                               nr_taken += hpage_nr_pages(page);
+                               nr_lumpy_taken++;
+                               if (PageDirty(cursor_page))
+                                       nr_lumpy_dirty++;
                                scan++;
+                       } else {
+                               /* the page is freed already. */
+                               if (!page_count(cursor_page))
+                                       continue;
+                               break;
                        }
                }
+
+               /* If we break out of the loop above, lumpy reclaim failed */
+               if (pfn < end_pfn)
+                       nr_lumpy_failed++;
        }
 
        *scanned = scan;
+
+       trace_mm_vmscan_lru_isolate(order,
+                       nr_to_scan, scan,
+                       nr_taken,
+                       nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
+                       mode);
        return nr_taken;
 }
 
@@ -975,7 +1140,6 @@ static unsigned long isolate_pages_global(unsigned long nr,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
                                        int mode, struct zone *z,
-                                       struct mem_cgroup *mem_cont,
                                        int active, int file)
 {
        int lru = LRU_BASE;
@@ -999,13 +1163,15 @@ static unsigned long clear_active_flags(struct list_head *page_list,
        struct page *page;
 
        list_for_each_entry(page, page_list, lru) {
+               int numpages = hpage_nr_pages(page);
                lru = page_lru_base_type(page);
                if (PageActive(page)) {
                        lru += LRU_ACTIVE;
                        ClearPageActive(page);
-                       nr_active++;
+                       nr_active += numpages;
                }
-               count[lru]++;
+               if (count)
+                       count[lru] += numpages;
        }
 
        return nr_active;
@@ -1082,178 +1248,210 @@ static int too_many_isolated(struct zone *zone, int file,
 }
 
 /*
- * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
- * of reclaimed pages
+ * TODO: Try merging with migrations version of putback_lru_pages
  */
-static unsigned long shrink_inactive_list(unsigned long max_scan,
-                       struct zone *zone, struct scan_control *sc,
-                       int priority, int file)
+static noinline_for_stack void
+putback_lru_pages(struct zone *zone, struct scan_control *sc,
+                               unsigned long nr_anon, unsigned long nr_file,
+                               struct list_head *page_list)
 {
-       LIST_HEAD(page_list);
+       struct page *page;
        struct pagevec pvec;
-       unsigned long nr_scanned = 0;
-       unsigned long nr_reclaimed = 0;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
-       int lumpy_reclaim = 0;
-
-       while (unlikely(too_many_isolated(zone, file, sc))) {
-               congestion_wait(BLK_RW_ASYNC, HZ/10);
 
-               /* We are about to die and free our memory. Return now. */
-               if (fatal_signal_pending(current))
-                       return SWAP_CLUSTER_MAX;
-       }
+       pagevec_init(&pvec, 1);
 
        /*
-        * If we need a large contiguous chunk of memory, or have
-        * trouble getting a small set of contiguous pages, we
-        * will reclaim both active and inactive pages.
-        *
-        * We use the same threshold as pageout congestion_wait below.
+        * Put back any unfreeable pages.
         */
-       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-               lumpy_reclaim = 1;
-       else if (sc->order && priority < DEF_PRIORITY - 2)
-               lumpy_reclaim = 1;
-
-       pagevec_init(&pvec, 1);
+       spin_lock(&zone->lru_lock);
+       while (!list_empty(page_list)) {
+               int lru;
+               page = lru_to_page(page_list);
+               VM_BUG_ON(PageLRU(page));
+               list_del(&page->lru);
+               if (unlikely(!page_evictable(page, NULL))) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       putback_lru_page(page);
+                       spin_lock_irq(&zone->lru_lock);
+                       continue;
+               }
+               SetPageLRU(page);
+               lru = page_lru(page);
+               add_page_to_lru_list(zone, page, lru);
+               if (is_active_lru(lru)) {
+                       int file = is_file_lru(lru);
+                       int numpages = hpage_nr_pages(page);
+                       reclaim_stat->recent_rotated[file] += numpages;
+               }
+               if (!pagevec_add(&pvec, page)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       __pagevec_release(&pvec);
+                       spin_lock_irq(&zone->lru_lock);
+               }
+       }
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+       __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
 
-       lru_add_drain();
-       spin_lock_irq(&zone->lru_lock);
-       do {
-               struct page *page;
-               unsigned long nr_taken;
-               unsigned long nr_scan;
-               unsigned long nr_freed;
-               unsigned long nr_active;
-               unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
-               unsigned long nr_anon;
-               unsigned long nr_file;
-
-               nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
-                            &page_list, &nr_scan, sc->order, mode,
-                               zone, sc->mem_cgroup, 0, file);
+       spin_unlock_irq(&zone->lru_lock);
+       pagevec_release(&pvec);
+}
 
-               if (scanning_global_lru(sc)) {
-                       zone->pages_scanned += nr_scan;
-                       if (current_is_kswapd())
-                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
-                                                      nr_scan);
-                       else
-                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
-                                                      nr_scan);
-               }
+static noinline_for_stack void update_isolated_counts(struct zone *zone,
+                                       struct scan_control *sc,
+                                       unsigned long *nr_anon,
+                                       unsigned long *nr_file,
+                                       struct list_head *isolated_list)
+{
+       unsigned long nr_active;
+       unsigned int count[NR_LRU_LISTS] = { 0, };
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
-               if (nr_taken == 0)
-                       goto done;
+       nr_active = clear_active_flags(isolated_list, count);
+       __count_vm_events(PGDEACTIVATE, nr_active);
+
+       __mod_zone_page_state(zone, NR_ACTIVE_FILE,
+                             -count[LRU_ACTIVE_FILE]);
+       __mod_zone_page_state(zone, NR_INACTIVE_FILE,
+                             -count[LRU_INACTIVE_FILE]);
+       __mod_zone_page_state(zone, NR_ACTIVE_ANON,
+                             -count[LRU_ACTIVE_ANON]);
+       __mod_zone_page_state(zone, NR_INACTIVE_ANON,
+                             -count[LRU_INACTIVE_ANON]);
+
+       *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+       *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
+       __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
+
+       reclaim_stat->recent_scanned[0] += *nr_anon;
+       reclaim_stat->recent_scanned[1] += *nr_file;
+}
 
-               nr_active = clear_active_flags(&page_list, count);
-               __count_vm_events(PGDEACTIVATE, nr_active);
+/*
+ * Returns true if the caller should wait to clean dirty/writeback pages.
+ *
+ * If we are direct reclaiming for contiguous pages and we do not reclaim
+ * everything in the list, try again and wait for writeback IO to complete.
+ * This will stall high-order allocations noticeably. Only do that when really
+ * need to free the pages under high memory pressure.
+ */
+static inline bool should_reclaim_stall(unsigned long nr_taken,
+                                       unsigned long nr_freed,
+                                       int priority,
+                                       struct scan_control *sc)
+{
+       int lumpy_stall_priority;
 
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE,
-                                               -count[LRU_ACTIVE_FILE]);
-               __mod_zone_page_state(zone, NR_INACTIVE_FILE,
-                                               -count[LRU_INACTIVE_FILE]);
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON,
-                                               -count[LRU_ACTIVE_ANON]);
-               __mod_zone_page_state(zone, NR_INACTIVE_ANON,
-                                               -count[LRU_INACTIVE_ANON]);
+       /* kswapd should not stall on sync IO */
+       if (current_is_kswapd())
+               return false;
 
-               nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
-               nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-               __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
-               __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
+       /* Only stall on lumpy reclaim */
+       if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
+               return false;
 
-               reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
-               reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
-               reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
-               reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+       /* If we have relaimed everything on the isolated list, no stall */
+       if (nr_freed == nr_taken)
+               return false;
 
-               spin_unlock_irq(&zone->lru_lock);
+       /*
+        * For high-order allocations, there are two stall thresholds.
+        * High-cost allocations stall immediately where as lower
+        * order allocations such as stacks require the scanning
+        * priority to be much higher before stalling.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               lumpy_stall_priority = DEF_PRIORITY;
+       else
+               lumpy_stall_priority = DEF_PRIORITY / 3;
 
-               nr_scanned += nr_scan;
-               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+       return priority <= lumpy_stall_priority;
+}
 
-               /*
-                * If we are direct reclaiming for contiguous pages and we do
-                * not reclaim everything in the list, try again and wait
-                * for IO to complete. This will stall high-order allocations
-                * but that should be acceptable to the caller
-                */
-               if (nr_freed < nr_taken && !current_is_kswapd() &&
-                   lumpy_reclaim) {
-                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+/*
+ * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
+ * of reclaimed pages
+ */
+static noinline_for_stack unsigned long
+shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
+                       struct scan_control *sc, int priority, int file)
+{
+       LIST_HEAD(page_list);
+       unsigned long nr_scanned;
+       unsigned long nr_reclaimed = 0;
+       unsigned long nr_taken;
+       unsigned long nr_anon;
+       unsigned long nr_file;
 
-                       /*
-                        * The attempt at page out may have made some
-                        * of the pages active, mark them inactive again.
-                        */
-                       nr_active = clear_active_flags(&page_list, count);
-                       count_vm_events(PGDEACTIVATE, nr_active);
+       while (unlikely(too_many_isolated(zone, file, sc))) {
+               congestion_wait(BLK_RW_ASYNC, HZ/10);
 
-                       nr_freed += shrink_page_list(&page_list, sc,
-                                                       PAGEOUT_IO_SYNC);
-               }
+               /* We are about to die and free our memory. Return now. */
+               if (fatal_signal_pending(current))
+                       return SWAP_CLUSTER_MAX;
+       }
 
-               nr_reclaimed += nr_freed;
+       set_reclaim_mode(priority, sc, false);
+       lru_add_drain();
+       spin_lock_irq(&zone->lru_lock);
 
-               local_irq_disable();
+       if (scanning_global_lru(sc)) {
+               nr_taken = isolate_pages_global(nr_to_scan,
+                       &page_list, &nr_scanned, sc->order,
+                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
+                       zone, 0, file);
+               zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
-                       __count_vm_events(KSWAPD_STEAL, nr_freed);
-               __count_zone_vm_events(PGSTEAL, zone, nr_freed);
-
-               spin_lock(&zone->lru_lock);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                              nr_scanned);
+               else
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                              nr_scanned);
+       } else {
+               nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
+                       &page_list, &nr_scanned, sc->order,
+                       sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+                                       ISOLATE_BOTH : ISOLATE_INACTIVE,
+                       zone, sc->mem_cgroup,
+                       0, file);
                /*
-                * Put back any unfreeable pages.
+                * mem_cgroup_isolate_pages() keeps track of
+                * scanned pages on its own.
                 */
-               while (!list_empty(&page_list)) {
-                       int lru;
-                       page = lru_to_page(&page_list);
-                       VM_BUG_ON(PageLRU(page));
-                       list_del(&page->lru);
-                       if (unlikely(!page_evictable(page, NULL))) {
-                               spin_unlock_irq(&zone->lru_lock);
-                               putback_lru_page(page);
-                               spin_lock_irq(&zone->lru_lock);
-                               continue;
-                       }
-                       SetPageLRU(page);
-                       lru = page_lru(page);
-                       add_page_to_lru_list(zone, page, lru);
-                       if (is_active_lru(lru)) {
-                               int file = is_file_lru(lru);
-                               reclaim_stat->recent_rotated[file]++;
-                       }
-                       if (!pagevec_add(&pvec, page)) {
-                               spin_unlock_irq(&zone->lru_lock);
-                               __pagevec_release(&pvec);
-                               spin_lock_irq(&zone->lru_lock);
-                       }
-               }
-               __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
-               __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+       }
+
+       if (nr_taken == 0) {
+               spin_unlock_irq(&zone->lru_lock);
+               return 0;
+       }
 
-       } while (nr_scanned < max_scan);
+       update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
 
-done:
        spin_unlock_irq(&zone->lru_lock);
-       pagevec_release(&pvec);
-       return nr_reclaimed;
-}
 
-/*
- * We are about to scan this zone at a certain priority level.  If that priority
- * level is smaller (ie: more urgent) than the previous priority, then note
- * that priority level within the zone.  This is done so that when the next
- * process comes in to scan this zone, it will immediately start out at this
- * priority level rather than having to build up its own scanning priority.
- * Here, this priority affects only the reclaim-mapped threshold.
- */
-static inline void note_zone_scanning_priority(struct zone *zone, int priority)
-{
-       if (priority < zone->prev_priority)
-               zone->prev_priority = priority;
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc);
+
+       /* Check if we should syncronously wait for writeback */
+       if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
+               set_reclaim_mode(priority, sc, true);
+               nr_reclaimed += shrink_page_list(&page_list, zone, sc);
+       }
+
+       local_irq_disable();
+       if (current_is_kswapd())
+               __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
+       __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+
+       putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
+
+       trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
+               zone_idx(zone),
+               nr_scanned, nr_reclaimed,
+               priority,
+               trace_shrink_flags(file, sc->reclaim_mode));
+       return nr_reclaimed;
 }
 
 /*
@@ -1292,7 +1490,7 @@ static void move_active_pages_to_lru(struct zone *zone,
 
                list_move(&page->lru, &zone->lru[lru].list);
                mem_cgroup_add_lru_list(page, lru);
-               pgmoved++;
+               pgmoved += hpage_nr_pages(page);
 
                if (!pagevec_add(&pvec, page) || list_empty(list)) {
                        spin_unlock_irq(&zone->lru_lock);
@@ -1322,16 +1520,23 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
-                                       ISOLATE_ACTIVE, zone,
-                                       sc->mem_cgroup, 1, file);
-       /*
-        * zone->pages_scanned is used for detect zone's oom
-        * mem_cgroup remembers nr_scan by itself.
-        */
        if (scanning_global_lru(sc)) {
+               nr_taken = isolate_pages_global(nr_pages, &l_hold,
+                                               &pgscanned, sc->order,
+                                               ISOLATE_ACTIVE, zone,
+                                               1, file);
                zone->pages_scanned += pgscanned;
+       } else {
+               nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
+                                               &pgscanned, sc->order,
+                                               ISOLATE_ACTIVE, zone,
+                                               sc->mem_cgroup, 1, file);
+               /*
+                * mem_cgroup_isolate_pages() keeps track of
+                * scanned pages on its own.
+                */
        }
+
        reclaim_stat->recent_scanned[file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
@@ -1352,10 +1557,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        continue;
                }
 
-               /* page_referenced clears PageReferenced */
-               if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-                       nr_rotated++;
+               if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
+                       nr_rotated += hpage_nr_pages(page);
                        /*
                         * Identify referenced, file-backed active pages and
                         * give them one more trip around the active list. So
@@ -1395,6 +1598,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        spin_unlock_irq(&zone->lru_lock);
 }
 
+#ifdef CONFIG_SWAP
 static int inactive_anon_is_low_global(struct zone *zone)
 {
        unsigned long active, inactive;
@@ -1420,12 +1624,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
 {
        int low;
 
+       /*
+        * If we don't have swap space, anonymous page deactivation
+        * is pointless.
+        */
+       if (!total_swap_pages)
+               return 0;
+
        if (scanning_global_lru(sc))
                low = inactive_anon_is_low_global(zone);
        else
                low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
        return low;
 }
+#else
+static inline int inactive_anon_is_low(struct zone *zone,
+                                       struct scan_control *sc)
+{
+       return 0;
+}
+#endif
 
 static int inactive_file_is_low_global(struct zone *zone)
 {
@@ -1487,24 +1705,55 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
 }
 
 /*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan)
+{
+       unsigned long nr;
+
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
+
+       if (nr >= SWAP_CLUSTER_MAX)
+               *nr_saved_scan = 0;
+       else
+               nr = 0;
+
+       return nr;
+}
+
+/*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
  * by looking at the fraction of the pages scanned we did rotate back
  * onto the active list instead of evict.
  *
- * percent[0] specifies how much pressure to put on ram/swap backed
- * memory, while percent[1] determines pressure on the file LRUs.
+ * nr[0] = anon pages to scan; nr[1] = file pages to scan
  */
-static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
-                                       unsigned long *percent)
+static void get_scan_count(struct zone *zone, struct scan_control *sc,
+                                       unsigned long *nr, int priority)
 {
        unsigned long anon, file, free;
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       u64 fraction[2], denominator;
+       enum lru_list l;
+       int noswap = 0;
 
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       /* If we have no swap space, do not bother scanning anon pages. */
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+               noswap = 1;
+               fraction[0] = 0;
+               fraction[1] = 1;
+               denominator = 1;
+               goto out;
+       }
+
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
        file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
 
@@ -1513,13 +1762,21 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                /* If we have very few page cache pages,
                   force-scan anon pages. */
                if (unlikely(file + free <= high_wmark_pages(zone))) {
-                       percent[0] = 100;
-                       percent[1] = 0;
-                       return;
+                       fraction[0] = 1;
+                       fraction[1] = 0;
+                       denominator = 1;
+                       goto out;
                }
        }
 
        /*
+        * With swappiness at 100, anonymous and file have the same priority.
+        * This scanning priority is essentially the inverse of IO cost.
+        */
+       anon_prio = sc->swappiness;
+       file_prio = 200 - sc->swappiness;
+
+       /*
         * OK, so we have swap space and a fair amount of page cache
         * pages.  We use the recently rotated / recently scanned
         * ratios to determine how valuable each cache is.
@@ -1530,28 +1787,18 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
         *
         * anon in [0], file in [1]
         */
+       spin_lock_irq(&zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
-               spin_lock_irq(&zone->lru_lock);
                reclaim_stat->recent_scanned[0] /= 2;
                reclaim_stat->recent_rotated[0] /= 2;
-               spin_unlock_irq(&zone->lru_lock);
        }
 
        if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
-               spin_lock_irq(&zone->lru_lock);
                reclaim_stat->recent_scanned[1] /= 2;
                reclaim_stat->recent_rotated[1] /= 2;
-               spin_unlock_irq(&zone->lru_lock);
        }
 
        /*
-        * With swappiness at 100, anonymous and file have the same priority.
-        * This scanning priority is essentially the inverse of IO cost.
-        */
-       anon_prio = sc->swappiness;
-       file_prio = 200 - sc->swappiness;
-
-       /*
         * The amount of pressure on anon vs file pages is inversely
         * proportional to the fraction of recently scanned pages on
         * each list that were recently referenced and in active use.
@@ -1561,30 +1808,87 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
 
        fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
        fp /= reclaim_stat->recent_rotated[1] + 1;
+       spin_unlock_irq(&zone->lru_lock);
+
+       fraction[0] = ap;
+       fraction[1] = fp;
+       denominator = ap + fp + 1;
+out:
+       for_each_evictable_lru(l) {
+               int file = is_file_lru(l);
+               unsigned long scan;
 
-       /* Normalize to percentages */
-       percent[0] = 100 * ap / (ap + fp + 1);
-       percent[1] = 100 - percent[0];
+               scan = zone_nr_lru_pages(zone, sc, l);
+               if (priority || noswap) {
+                       scan >>= priority;
+                       scan = div64_u64(scan * fraction[file], denominator);
+               }
+               nr[l] = nr_scan_try_batch(scan,
+                                         &reclaim_stat->nr_saved_scan[l]);
+       }
 }
 
 /*
- * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
- * until we collected @swap_cluster_max pages to scan.
+ * Reclaim/compaction depends on a number of pages being freed. To avoid
+ * disruption to the system, a small number of order-0 pages continue to be
+ * rotated and reclaimed in the normal fashion. However, by the time we get
+ * back to the allocator and call try_to_compact_zone(), we ensure that
+ * there are enough free pages for it to be likely successful
  */
-static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
-                                      unsigned long *nr_saved_scan)
+static inline bool should_continue_reclaim(struct zone *zone,
+                                       unsigned long nr_reclaimed,
+                                       unsigned long nr_scanned,
+                                       struct scan_control *sc)
 {
-       unsigned long nr;
+       unsigned long pages_for_compaction;
+       unsigned long inactive_lru_pages;
 
-       *nr_saved_scan += nr_to_scan;
-       nr = *nr_saved_scan;
+       /* If not in reclaim/compaction mode, stop */
+       if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+               return false;
 
-       if (nr >= SWAP_CLUSTER_MAX)
-               *nr_saved_scan = 0;
-       else
-               nr = 0;
+       /* Consider stopping depending on scan and reclaim activity */
+       if (sc->gfp_mask & __GFP_REPEAT) {
+               /*
+                * For __GFP_REPEAT allocations, stop reclaiming if the
+                * full LRU list has been scanned and we are still failing
+                * to reclaim pages. This full LRU scan is potentially
+                * expensive but a __GFP_REPEAT caller really wants to succeed
+                */
+               if (!nr_reclaimed && !nr_scanned)
+                       return false;
+       } else {
+               /*
+                * For non-__GFP_REPEAT allocations which can presumably
+                * fail without consequence, stop if we failed to reclaim
+                * any pages from the last SWAP_CLUSTER_MAX number of
+                * pages that were scanned. This will return to the
+                * caller faster at the risk reclaim/compaction and
+                * the resulting allocation attempt fails
+                */
+               if (!nr_reclaimed)
+                       return false;
+       }
 
-       return nr;
+       /*
+        * If we have not reclaimed enough pages for compaction and the
+        * inactive lists are large enough, continue reclaiming
+        */
+       pages_for_compaction = (2UL << sc->order);
+       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
+                               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       if (sc->nr_reclaimed < pages_for_compaction &&
+                       inactive_lru_pages > pages_for_compaction)
+               return true;
+
+       /* If compaction would go ahead or the allocation would succeed, stop */
+       switch (compaction_suitable(zone, sc->order)) {
+       case COMPACT_PARTIAL:
+       case COMPACT_CONTINUE:
+               return false;
+       default:
+               return true;
+       }
 }
 
 /*
@@ -1595,33 +1899,14 @@ static void shrink_zone(int priority, struct zone *zone,
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
-       unsigned long percent[2];       /* anon @ 0; file @ 1 */
        enum lru_list l;
-       unsigned long nr_reclaimed = sc->nr_reclaimed;
+       unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
-       int noswap = 0;
 
-       /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (nr_swap_pages <= 0)) {
-               noswap = 1;
-               percent[0] = 0;
-               percent[1] = 100;
-       } else
-               get_scan_ratio(zone, sc, percent);
-
-       for_each_evictable_lru(l) {
-               int file = is_file_lru(l);
-               unsigned long scan;
-
-               scan = zone_nr_lru_pages(zone, sc, l);
-               if (priority || noswap) {
-                       scan >>= priority;
-                       scan = (scan * percent[file]) / 100;
-               }
-               nr[l] = nr_scan_try_batch(scan,
-                                         &reclaim_stat->nr_saved_scan[l]);
-       }
+restart:
+       nr_reclaimed = 0;
+       nr_scanned = sc->nr_scanned;
+       get_scan_count(zone, sc, nr, priority);
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
@@ -1646,16 +1931,20 @@ static void shrink_zone(int priority, struct zone *zone,
                if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
                        break;
        }
-
-       sc->nr_reclaimed = nr_reclaimed;
+       sc->nr_reclaimed += nr_reclaimed;
 
        /*
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
+       if (inactive_anon_is_low(zone, sc))
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
+       /* reclaim/compaction might need reclaim to continue */
+       if (should_continue_reclaim(zone, nr_reclaimed,
+                                       sc->nr_scanned - nr_scanned, sc))
+               goto restart;
+
        throttle_vm_writeout(sc->gfp_mask);
 }
 
@@ -1678,13 +1967,11 @@ static void shrink_zone(int priority, struct zone *zone,
 static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
-       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        struct zoneref *z;
        struct zone *zone;
 
-       sc->all_unreclaimable = 1;
-       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
-                                       sc->nodemask) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                       gfp_zone(sc->gfp_mask), sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1694,26 +1981,39 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
                if (scanning_global_lru(sc)) {
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
-                       note_zone_scanning_priority(zone, priority);
-
-                       if (zone_is_all_unreclaimable(zone) &&
-                                               priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
-                       sc->all_unreclaimable = 0;
-               } else {
-                       /*
-                        * Ignore cpuset limitation here. We just want to reduce
-                        * # of used pages by us regardless of memory shortage.
-                        */
-                       sc->all_unreclaimable = 0;
-                       mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
-                                                       priority);
                }
 
                shrink_zone(priority, zone, sc);
        }
 }
 
+static bool zone_reclaimable(struct zone *zone)
+{
+       return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
+/* All zones in zonelist are unreclaimable? */
+static bool all_unreclaimable(struct zonelist *zonelist,
+               struct scan_control *sc)
+{
+       struct zoneref *z;
+       struct zone *zone;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                       gfp_zone(sc->gfp_mask), sc->nodemask) {
+               if (!populated_zone(zone))
+                       continue;
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                       continue;
+               if (!zone->all_unreclaimable)
+                       return false;
+       }
+
+       return true;
+}
+
 /*
  * This is the main entry point to direct page reclaim.
  *
@@ -1734,31 +2034,17 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        int priority;
-       unsigned long ret = 0;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
-       unsigned long lru_pages = 0;
        struct zoneref *z;
        struct zone *zone;
-       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        unsigned long writeback_threshold;
 
+       get_mems_allowed();
        delayacct_freepages_start();
 
        if (scanning_global_lru(sc))
                count_vm_event(ALLOCSTALL);
-       /*
-        * mem_cgroup will not do shrink_slab.
-        */
-       if (scanning_global_lru(sc)) {
-               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-
-                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-                               continue;
-
-                       lru_pages += zone_reclaimable_pages(zone);
-               }
-       }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
@@ -1770,6 +2056,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 * over limit cgroups
                 */
                if (scanning_global_lru(sc)) {
+                       unsigned long lru_pages = 0;
+                       for_each_zone_zonelist(zone, z, zonelist,
+                                       gfp_zone(sc->gfp_mask)) {
+                               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                                       continue;
+
+                               lru_pages += zone_reclaimable_pages(zone);
+                       }
+
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1777,10 +2072,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        }
                }
                total_scanned += sc->nr_scanned;
-               if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
-                       ret = sc->nr_reclaimed;
+               if (sc->nr_reclaimed >= sc->nr_to_reclaim)
                        goto out;
-               }
 
                /*
                 * Try to write back as many pages as we just scanned.  This
@@ -1797,42 +2090,42 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
                /* Take a nap, wait for some writeback to complete */
                if (!sc->hibernation_mode && sc->nr_scanned &&
-                   priority < DEF_PRIORITY - 2)
-                       congestion_wait(BLK_RW_ASYNC, HZ/10);
+                   priority < DEF_PRIORITY - 2) {
+                       struct zone *preferred_zone;
+
+                       first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
+                                               &cpuset_current_mems_allowed,
+                                               &preferred_zone);
+                       wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
+               }
        }
-       /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (!sc->all_unreclaimable && scanning_global_lru(sc))
-               ret = sc->nr_reclaimed;
-out:
-       /*
-        * Now that we've scanned all the zones at this priority level, note
-        * that level within the zone so that the next thread which performs
-        * scanning of this zone will immediately start out at this priority
-        * level.  This affects only the decision whether or not to bring
-        * mapped pages onto the inactive list.
-        */
-       if (priority < 0)
-               priority = 0;
 
-       if (scanning_global_lru(sc)) {
-               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+out:
+       delayacct_freepages_end();
+       put_mems_allowed();
 
-                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-                               continue;
+       if (sc->nr_reclaimed)
+               return sc->nr_reclaimed;
 
-                       zone->prev_priority = priority;
-               }
-       } else
-               mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
+       /*
+        * As hibernation is going on, kswapd is freezed so that it can't mark
+        * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+        * check.
+        */
+       if (oom_killer_disabled)
+               return 0;
 
-       delayacct_freepages_end();
+       /* top priority shrink_zones still had more to do? don't OOM, then */
+       if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
+               return 1;
 
-       return ret;
+       return 0;
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                gfp_t gfp_mask, nodemask_t *nodemask)
 {
+       unsigned long nr_reclaimed;
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
@@ -1842,11 +2135,18 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
-               .isolate_pages = isolate_pages_global,
                .nodemask = nodemask,
        };
 
-       return do_try_to_free_pages(zonelist, &sc);
+       trace_mm_vmscan_direct_reclaim_begin(order,
+                               sc.may_writepage,
+                               gfp_mask);
+
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
+       trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
+
+       return nr_reclaimed;
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -1854,24 +2154,24 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone, int nid)
+                                               struct zone *zone)
 {
        struct scan_control sc = {
+               .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = !noswap,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem,
-               .isolate_pages = mem_cgroup_isolate_pages,
        };
-       nodemask_t nm  = nodemask_of_node(nid);
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
-       sc.nodemask = &nm;
-       sc.nr_reclaimed = 0;
-       sc.nr_scanned = 0;
+
+       trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+                                                     sc.may_writepage,
+                                                     sc.gfp_mask);
+
        /*
         * NOTE: Although we can get the priority field, using it
         * here is not a good idea, since it limits the pages we can scan.
@@ -1880,6 +2180,9 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
         * the priority and make it zero.
         */
        shrink_zone(0, zone, &sc);
+
+       trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
+
        return sc.nr_reclaimed;
 }
 
@@ -1889,6 +2192,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           unsigned int swappiness)
 {
        struct zonelist *zonelist;
+       unsigned long nr_reclaimed;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -1897,46 +2201,107 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
-               .isolate_pages = mem_cgroup_isolate_pages,
                .nodemask = NULL, /* we don't care the placement */
        };
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
-       return do_try_to_free_pages(zonelist, &sc);
+
+       trace_mm_vmscan_memcg_reclaim_begin(0,
+                                           sc.may_writepage,
+                                           sc.gfp_mask);
+
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
+       trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
+
+       return nr_reclaimed;
 }
 #endif
 
+/*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+ * by the callers classzone_idx are added to balanced_pages. The total of
+ * balanced pages must be at least 25% of the zones allowed by classzone_idx
+ * for the node to be considered balanced. Forcing all zones to be balanced
+ * for high orders can cause excessive reclaim when there are imbalanced zones.
+ * The choice of 25% is due to
+ *   o a 16M DMA zone that is balanced will not balance a zone on any
+ *     reasonable sized machine
+ *   o On all other machines, the top zone must be at least a reasonable
+ *     precentage of the middle zones. For example, on 32-bit x86, highmem
+ *     would need to be at least 256M for it to be balance a whole node.
+ *     Similarly, on x86-64 the Normal zone would need to be at least 1G
+ *     to balance a node on its own. These seemed like reasonable ratios.
+ */
+static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
+                                               int classzone_idx)
+{
+       unsigned long present_pages = 0;
+       int i;
+
+       for (i = 0; i <= classzone_idx; i++)
+               present_pages += pgdat->node_zones[i].present_pages;
+
+       /* A special case here: if zone has no page, we think it's balanced */
+       return balanced_pages >= (present_pages >> 2);
+}
+
 /* is kswapd sleeping prematurely? */
-static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+                                       int classzone_idx)
 {
        int i;
+       unsigned long balanced = 0;
+       bool all_zones_ok = true;
 
        /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
        if (remaining)
-               return 1;
+               return true;
 
-       /* If after HZ/10, a zone is below the high mark, it's premature */
-       for (i = 0; i < pgdat->nr_zones; i++) {
+       /* Check the watermark levels */
+       for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
                if (!populated_zone(zone))
                        continue;
 
-               if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
-                                                               0, 0))
-                       return 1;
+               /*
+                * balance_pgdat() skips over all_unreclaimable after
+                * DEF_PRIORITY. Effectively, it considers them balanced so
+                * they must be considered balanced here as well if kswapd
+                * is to sleep
+                */
+               if (zone->all_unreclaimable) {
+                       balanced += zone->present_pages;
+                       continue;
+               }
+
+               if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+                                                       i, 0))
+                       all_zones_ok = false;
+               else
+                       balanced += zone->present_pages;
        }
 
-       return 0;
+       /*
+        * For high-order requests, the balanced zones must contain at least
+        * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
+        * must be balanced
+        */
+       if (order)
+               return !pgdat_balanced(pgdat, balanced, classzone_idx);
+       else
+               return !all_zones_ok;
 }
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
  *
- * Returns the number of pages which were actually freed.
+ * Returns the final order kswapd was reclaiming at
  *
  * There is special handling here for zones which are full of pinned pages.
  * This can happen if the pages are all mlocked, or if they are all used by
@@ -1953,11 +2318,14 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
  * interoperates with the page allocator fallback scheme to ensure that aging
  * of pages is balanced across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
+                                                       int *classzone_idx)
 {
        int all_zones_ok;
+       unsigned long balanced;
        int priority;
        int i;
+       int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
@@ -1972,26 +2340,14 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
-               .isolate_pages = isolate_pages_global,
        };
-       /*
-        * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to
-        * free_pages == high_wmark_pages(zone).
-        */
-       int temp_priority[MAX_NR_ZONES];
-
 loop_again:
        total_scanned = 0;
        sc.nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
-       for (i = 0; i < pgdat->nr_zones; i++)
-               temp_priority[i] = DEF_PRIORITY;
-
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-               int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
                unsigned long lru_pages = 0;
                int has_under_min_watermark_zone = 0;
 
@@ -2000,6 +2356,7 @@ loop_again:
                        disable_swap_token();
 
                all_zones_ok = 1;
+               balanced = 0;
 
                /*
                 * Scan in the highmem->dma direction for the highest
@@ -2011,8 +2368,7 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone_is_all_unreclaimable(zone) &&
-                           priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
                        /*
@@ -2023,7 +2379,7 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_watermark_ok(zone, order,
+                       if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
@@ -2050,48 +2406,49 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
-                       int nid, zid;
+                       unsigned long balance_gap;
 
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone_is_all_unreclaimable(zone) &&
-                                       priority != DEF_PRIORITY)
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order,
-                                       high_wmark_pages(zone), end_zone, 0))
-                               all_zones_ok = 0;
-                       temp_priority[i] = priority;
                        sc.nr_scanned = 0;
-                       note_zone_scanning_priority(zone, priority);
 
-                       nid = pgdat->node_id;
-                       zid = zone_idx(zone);
                        /*
                         * Call soft limit reclaim before calling shrink_zone.
                         * For now we ignore the return value
                         */
-                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
-                                                       nid, zid);
+                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
+
                        /*
-                        * We put equal pressure on every zone, unless one
-                        * zone has way too many pages free already.
+                        * We put equal pressure on every zone, unless
+                        * one zone has way too many pages free
+                        * already. The "too many pages" is defined
+                        * as the high wmark plus a "gap" where the
+                        * gap is either the low watermark or 1%
+                        * of the zone, whichever is smaller.
                         */
-                       if (!zone_watermark_ok(zone, order,
-                                       8*high_wmark_pages(zone), end_zone, 0))
+                       balance_gap = min(low_wmark_pages(zone),
+                               (zone->present_pages +
+                                       KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+                               KSWAPD_ZONE_BALANCE_GAP_RATIO);
+                       if (!zone_watermark_ok_safe(zone, order,
+                                       high_wmark_pages(zone) + balance_gap,
+                                       end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
-                       reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
-                                               lru_pages);
-                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
-                       total_scanned += sc.nr_scanned;
-                       if (zone_is_all_unreclaimable(zone))
-                               continue;
-                       if (nr_slab == 0 && zone->pages_scanned >=
-                                       (zone_reclaimable_pages(zone) * 6))
-                                       zone_set_flag(zone,
-                                                     ZONE_ALL_UNRECLAIMABLE);
+
+                               reclaim_state->reclaimed_slab = 0;
+                               nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
+                                                       lru_pages);
+                               sc.nr_reclaimed += reclaim_state->reclaimed_slab;
+                               total_scanned += sc.nr_scanned;
+
+                               if (nr_slab == 0 && !zone_reclaimable(zone))
+                                       zone->all_unreclaimable = 1;
+                       }
+
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -2101,16 +2458,38 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
-                       /*
-                        * We are still under min water mark. it mean we have
-                        * GFP_ATOMIC allocation failure risk. Hurry up!
-                        */
-                       if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
-                                             end_zone, 0))
-                               has_under_min_watermark_zone = 1;
+                       if (zone->all_unreclaimable) {
+                               if (end_zone && end_zone == i)
+                                       end_zone--;
+                               continue;
+                       }
+
+                       if (!zone_watermark_ok_safe(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0)) {
+                               all_zones_ok = 0;
+                               /*
+                                * We are still under min water mark.  This
+                                * means that we have a GFP_ATOMIC allocation
+                                * failure risk. Hurry up!
+                                */
+                               if (!zone_watermark_ok_safe(zone, order,
+                                           min_wmark_pages(zone), end_zone, 0))
+                                       has_under_min_watermark_zone = 1;
+                       } else {
+                               /*
+                                * If a zone reaches its high watermark,
+                                * consider it to be no longer congested. It's
+                                * possible there are dirty pages backed by
+                                * congested BDIs but as pressure is relieved,
+                                * spectulatively avoid congestion waits
+                                */
+                               zone_clear_flag(zone, ZONE_CONGESTED);
+                               if (i <= *classzone_idx)
+                                       balanced += zone->present_pages;
+                       }
 
                }
-               if (all_zones_ok)
+               if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
                        break;          /* kswapd: all done */
                /*
                 * OK, kswapd is getting into trouble.  Take a nap, then take
@@ -2133,17 +2512,13 @@ loop_again:
                        break;
        }
 out:
+
        /*
-        * Note within each zone the priority level at which this zone was
-        * brought into a happy state.  So that the next thread which scans this
-        * zone will start out at that priority level.
+        * order-0: All zones must meet high watermark for a balanced node
+        * high-order: Balanced zones must make up at least 25% of the node
+        *             for the node to be balanced
         */
-       for (i = 0; i < pgdat->nr_zones; i++) {
-               struct zone *zone = pgdat->node_zones + i;
-
-               zone->prev_priority = temp_priority[i];
-       }
-       if (!all_zones_ok) {
+       if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
                cond_resched();
 
                try_to_freeze();
@@ -2168,7 +2543,88 @@ out:
                goto loop_again;
        }
 
-       return sc.nr_reclaimed;
+       /*
+        * If kswapd was reclaiming at a higher order, it has the option of
+        * sleeping without all zones being balanced. Before it does, it must
+        * ensure that the watermarks for order-0 on *all* zones are met and
+        * that the congestion flags are cleared. The congestion flag must
+        * be cleared as kswapd is the only mechanism that clears the flag
+        * and it is potentially going to sleep here.
+        */
+       if (order) {
+               for (i = 0; i <= end_zone; i++) {
+                       struct zone *zone = pgdat->node_zones + i;
+
+                       if (!populated_zone(zone))
+                               continue;
+
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                               continue;
+
+                       /* Confirm the zone is balanced for order-0 */
+                       if (!zone_watermark_ok(zone, 0,
+                                       high_wmark_pages(zone), 0, 0)) {
+                               order = sc.order = 0;
+                               goto loop_again;
+                       }
+
+                       /* If balanced, clear the congested flag */
+                       zone_clear_flag(zone, ZONE_CONGESTED);
+               }
+       }
+
+       /*
+        * Return the order we were reclaiming at so sleeping_prematurely()
+        * makes a decision on the order we were last reclaiming at. However,
+        * if another caller entered the allocator slow path while kswapd
+        * was awake, order will remain at the higher level
+        */
+       *classzone_idx = end_zone;
+       return order;
+}
+
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+{
+       long remaining = 0;
+       DEFINE_WAIT(wait);
+
+       if (freezing(current) || kthread_should_stop())
+               return;
+
+       prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+
+       /* Try to sleep for a short interval */
+       if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+               remaining = schedule_timeout(HZ/10);
+               finish_wait(&pgdat->kswapd_wait, &wait);
+               prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+       }
+
+       /*
+        * After a short sleep, check if it was a premature sleep. If not, then
+        * go fully to sleep until explicitly woken up.
+        */
+       if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+               trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
+
+               /*
+                * vmstat counters are not perfectly accurate and the estimated
+                * value for counters such as NR_FREE_PAGES can deviate from the
+                * true value by nr_online_cpus * threshold. To avoid the zone
+                * watermarks being breached while under pressure, we reduce the
+                * per-cpu vmstat threshold while kswapd is awake and restore
+                * them before going back to sleep.
+                */
+               set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+               schedule();
+               set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+       } else {
+               if (remaining)
+                       count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+               else
+                       count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+       }
+       finish_wait(&pgdat->kswapd_wait, &wait);
 }
 
 /*
@@ -2186,10 +2642,11 @@ out:
  */
 static int kswapd(void *p)
 {
-       unsigned long order;
+       unsigned long order, new_order;
+       int classzone_idx, new_classzone_idx;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
-       DEFINE_WAIT(wait);
+
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
@@ -2216,49 +2673,37 @@ static int kswapd(void *p)
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
        set_freezable();
 
-       order = 0;
+       order = new_order = 0;
+       classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
        for ( ; ; ) {
-               unsigned long new_order;
                int ret;
 
-               prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-               new_order = pgdat->kswapd_max_order;
-               pgdat->kswapd_max_order = 0;
-               if (order < new_order) {
+               /*
+                * If the last balance_pgdat was unsuccessful it's unlikely a
+                * new request of a similar or harder type will succeed soon
+                * so consider going to sleep on the basis we reclaimed at
+                */
+               if (classzone_idx >= new_classzone_idx && order == new_order) {
+                       new_order = pgdat->kswapd_max_order;
+                       new_classzone_idx = pgdat->classzone_idx;
+                       pgdat->kswapd_max_order =  0;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
+               }
+
+               if (order < new_order || classzone_idx > new_classzone_idx) {
                        /*
                         * Don't sleep if someone wants a larger 'order'
-                        * allocation
+                        * allocation or has tigher zone constraints
                         */
                        order = new_order;
+                       classzone_idx = new_classzone_idx;
                } else {
-                       if (!freezing(current) && !kthread_should_stop()) {
-                               long remaining = 0;
-
-                               /* Try to sleep for a short interval */
-                               if (!sleeping_prematurely(pgdat, order, remaining)) {
-                                       remaining = schedule_timeout(HZ/10);
-                                       finish_wait(&pgdat->kswapd_wait, &wait);
-                                       prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-                               }
-
-                               /*
-                                * After a short sleep, check if it was a
-                                * premature sleep. If not, then go fully
-                                * to sleep until explicitly woken up
-                                */
-                               if (!sleeping_prematurely(pgdat, order, remaining))
-                                       schedule();
-                               else {
-                                       if (remaining)
-                                               count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
-                                       else
-                                               count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
-                               }
-                       }
-
+                       kswapd_try_to_sleep(pgdat, order, classzone_idx);
                        order = pgdat->kswapd_max_order;
+                       classzone_idx = pgdat->classzone_idx;
+                       pgdat->kswapd_max_order = 0;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
                }
-               finish_wait(&pgdat->kswapd_wait, &wait);
 
                ret = try_to_freeze();
                if (kthread_should_stop())
@@ -2268,8 +2713,10 @@ static int kswapd(void *p)
                 * We can speed up thawing tasks if we don't call balance_pgdat
                 * after returning from the refrigerator
                 */
-               if (!ret)
-                       balance_pgdat(pgdat, order);
+               if (!ret) {
+                       trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
+                       order = balance_pgdat(pgdat, order, &classzone_idx);
+               }
        }
        return 0;
 }
@@ -2277,22 +2724,26 @@ static int kswapd(void *p)
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone, int order)
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
 {
        pg_data_t *pgdat;
 
        if (!populated_zone(zone))
                return;
 
-       pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
-               return;
-       if (pgdat->kswapd_max_order < order)
-               pgdat->kswapd_max_order = order;
        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
+       pgdat = zone->zone_pgdat;
+       if (pgdat->kswapd_max_order < order) {
+               pgdat->kswapd_max_order = order;
+               pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
+       }
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
+       if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+               return;
+
+       trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
@@ -2352,7 +2803,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .hibernation_mode = 1,
                .swappiness = vm_swappiness,
                .order = 0,
-               .isolate_pages = isolate_pages_global,
        };
        struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
        struct task_struct *p = current;
@@ -2537,11 +2987,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
                .order = order,
-               .isolate_pages = isolate_pages_global,
        };
-       unsigned long slab_reclaimable;
+       unsigned long nr_slab_pages0, nr_slab_pages1;
 
-       disable_swap_token();
        cond_resched();
        /*
         * We need to be able to allocate from the reserves for RECLAIM_SWAP
@@ -2549,6 +2997,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * and RECLAIM_SWAP.
         */
        p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
+       lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
@@ -2559,14 +3008,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 */
                priority = ZONE_RECLAIM_PRIORITY;
                do {
-                       note_zone_scanning_priority(zone, priority);
                        shrink_zone(priority, zone, &sc);
                        priority--;
                } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
        }
 
-       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
-       if (slab_reclaimable > zone->min_slab_pages) {
+       nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (nr_slab_pages0 > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
                 * many pages were freed in this zone. So we take the current
@@ -2577,21 +3025,32 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * Note that shrink_slab will free memory on all zones and may
                 * take a long time.
                 */
-               while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
-                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
-                               slab_reclaimable - nr_pages)
-                       ;
+               for (;;) {
+                       unsigned long lru_pages = zone_reclaimable_pages(zone);
+
+                       /* No reclaimable slab or very low memory pressure */
+                       if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
+                               break;
+
+                       /* Freed enough memory */
+                       nr_slab_pages1 = zone_page_state(zone,
+                                                       NR_SLAB_RECLAIMABLE);
+                       if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
+                               break;
+               }
 
                /*
                 * Update nr_reclaimed by the number of slab pages we
                 * reclaimed from this zone.
                 */
-               sc.nr_reclaimed += slab_reclaimable -
-                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+               nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+               if (nr_slab_pages1 < nr_slab_pages0)
+                       sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
        }
 
        p->reclaim_state = NULL;
        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
+       lockdep_clear_current_reclaim_state();
        return sc.nr_reclaimed >= nr_pages;
 }
 
@@ -2614,7 +3073,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
            zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
                return ZONE_RECLAIM_FULL;
 
-       if (zone_is_all_unreclaimable(zone))
+       if (zone->all_unreclaimable)
                return ZONE_RECLAIM_FULL;
 
        /*
@@ -2845,6 +3304,7 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
        return 0;
 }
 
+#ifdef CONFIG_NUMA
 /*
  * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
  * a specified node's per zone unevictable lists for evictable pages.
@@ -2891,4 +3351,4 @@ void scan_unevictable_unregister_node(struct node *node)
 {
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
-
+#endif