mm: vmscan: do not apply pressure to slab if we are not applying pressure to zone
[linux-flexiantxendom0-natty.git] / mm / vmscan.c
index 47a5096..7f8e890 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
-#include <linux/compaction.h>
+#include <linux/oom.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -230,8 +230,11 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        if (scanned == 0)
                scanned = SWAP_CLUSTER_MAX;
 
-       if (!down_read_trylock(&shrinker_rwsem))
-               return 1;       /* Assume we'll be able to shrink next time */
+       if (!down_read_trylock(&shrinker_rwsem)) {
+               /* Assume we'll be able to shrink next time */
+               ret = 1;
+               goto out;
+       }
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
@@ -282,6 +285,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                shrinker->nr += total_scan;
        }
        up_read(&shrinker_rwsem);
+out:
+       cond_resched();
        return ret;
 }
 
@@ -1842,16 +1847,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
        if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
                return false;
 
-       /*
-        * If we failed to reclaim and have scanned the full list, stop.
-        * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
-        *       faster but obviously would be less likely to succeed
-        *       allocation. If this is desirable, use GFP_REPEAT to decide
-        *       if both reclaimed and scanned should be checked or just
-        *       reclaimed
-        */
-       if (!nr_reclaimed && !nr_scanned)
-               return false;
+       /* Consider stopping depending on scan and reclaim activity */
+       if (sc->gfp_mask & __GFP_REPEAT) {
+               /*
+                * For __GFP_REPEAT allocations, stop reclaiming if the
+                * full LRU list has been scanned and we are still failing
+                * to reclaim pages. This full LRU scan is potentially
+                * expensive but a __GFP_REPEAT caller really wants to succeed
+                */
+               if (!nr_reclaimed && !nr_scanned)
+                       return false;
+       } else {
+               /*
+                * For non-__GFP_REPEAT allocations which can presumably
+                * fail without consequence, stop if we failed to reclaim
+                * any pages from the last SWAP_CLUSTER_MAX number of
+                * pages that were scanned. This will return to the
+                * caller faster at the risk reclaim/compaction and
+                * the resulting allocation attempt fails
+                */
+               if (!nr_reclaimed)
+                       return false;
+       }
 
        /*
         * If we have not reclaimed enough pages for compaction and the
@@ -1883,12 +1900,12 @@ static void shrink_zone(int priority, struct zone *zone,
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
        enum lru_list l;
-       unsigned long nr_reclaimed;
+       unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-       unsigned long nr_scanned = sc->nr_scanned;
 
 restart:
        nr_reclaimed = 0;
+       nr_scanned = sc->nr_scanned;
        get_scan_count(zone, sc, nr, priority);
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1977,17 +1994,12 @@ static bool zone_reclaimable(struct zone *zone)
        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
 }
 
-/*
- * As hibernation is going on, kswapd is freezed so that it can't mark
- * the zone into all_unreclaimable. It can't handle OOM during hibernation.
- * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
- */
+/* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
                struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1995,13 +2007,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
                        continue;
                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
-               if (zone_reclaimable(zone)) {
-                       all_unreclaimable = false;
-                       break;
-               }
+               if (!zone->all_unreclaimable)
+                       return false;
        }
 
-       return all_unreclaimable;
+       return true;
 }
 
 /*
@@ -2084,7 +2094,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        struct zone *preferred_zone;
 
                        first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
-                                                       NULL, &preferred_zone);
+                                               &cpuset_current_mems_allowed,
+                                               &preferred_zone);
                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
                }
        }
@@ -2096,6 +2107,14 @@ out:
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
 
+       /*
+        * As hibernation is going on, kswapd is freezed so that it can't mark
+        * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+        * check.
+        */
+       if (oom_killer_disabled)
+               return 0;
+
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
@@ -2242,7 +2261,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                return true;
 
        /* Check the watermark levels */
-       for (i = 0; i < pgdat->nr_zones; i++) {
+       for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
                if (!populated_zone(zone))
@@ -2272,7 +2291,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
         * must be balanced
         */
        if (order)
-               return pgdat_balanced(pgdat, balanced, classzone_idx);
+               return !pgdat_balanced(pgdat, balanced, classzone_idx);
        else
                return !all_zones_ok;
 }
@@ -2385,9 +2404,9 @@ loop_again:
                 * cause too much scanning of the lower zones.
                 */
                for (i = 0; i <= end_zone; i++) {
-                       int compaction;
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
+                       unsigned long balance_gap;
 
                        if (!populated_zone(zone))
                                continue;
@@ -2404,38 +2423,32 @@ loop_again:
                        mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
 
                        /*
-                        * We put equal pressure on every zone, unless one
-                        * zone has way too many pages free already.
+                        * We put equal pressure on every zone, unless
+                        * one zone has way too many pages free
+                        * already. The "too many pages" is defined
+                        * as the high wmark plus a "gap" where the
+                        * gap is either the low watermark or 1%
+                        * of the zone, whichever is smaller.
                         */
+                       balance_gap = min(low_wmark_pages(zone),
+                               (zone->present_pages +
+                                       KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+                               KSWAPD_ZONE_BALANCE_GAP_RATIO);
                        if (!zone_watermark_ok_safe(zone, order,
-                                       8*high_wmark_pages(zone), end_zone, 0))
+                                       high_wmark_pages(zone) + balance_gap,
+                                       end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
-                       reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
-                                               lru_pages);
-                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
-                       total_scanned += sc.nr_scanned;
-
-                       compaction = 0;
-                       if (order &&
-                           zone_watermark_ok(zone, 0,
-                                              high_wmark_pages(zone),
-                                             end_zone, 0) &&
-                           !zone_watermark_ok(zone, order,
-                                              high_wmark_pages(zone),
-                                              end_zone, 0)) {
-                               compact_zone_order(zone,
-                                                  order,
-                                                  sc.gfp_mask, false,
-                                                  COMPACT_MODE_KSWAPD);
-                               compaction = 1;
+
+                               reclaim_state->reclaimed_slab = 0;
+                               nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
+                                                       lru_pages);
+                               sc.nr_reclaimed += reclaim_state->reclaimed_slab;
+                               total_scanned += sc.nr_scanned;
+
+                               if (nr_slab == 0 && !zone_reclaimable(zone))
+                                       zone->all_unreclaimable = 1;
                        }
 
-                       if (zone->all_unreclaimable)
-                               continue;
-                       if (!compaction && nr_slab == 0 &&
-                           !zone_reclaimable(zone))
-                               zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -2445,6 +2458,9 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
+                       if (zone->all_unreclaimable)
+                               continue;
+
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;