- Update to 2.6.25-rc3.
[linux-flexiantxendom0-3.2.10.git] / mm / rmap.c
index dbc2ca2..8fd527c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,7 +36,6 @@
  *                 mapping->tree_lock (widely used, in set_page_dirty,
  *                           in arch-dependent flush_dcache_mmap_lock,
  *                           within inode_lock in __sync_single_inode)
- *                   zone->lock (within radix tree node alloc)
  */
 
 #include <linux/mm.h>
@@ -49,6 +48,7 @@
 #include <linux/rcupdate.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/memcontrol.h>
 
 #include <asm/tlbflush.h>
 
@@ -284,7 +284,10 @@ static int page_referenced_one(struct page *page,
        if (!pte)
                goto out;
 
-       if (ptep_clear_flush_young(vma, address, pte))
+       if (vma->vm_flags & VM_LOCKED) {
+               referenced++;
+               *mapcount = 1;  /* break early from loop */
+       } else if (ptep_clear_flush_young(vma, address, pte))
                referenced++;
 
        /* Pretend the page is referenced if the task has the
@@ -299,7 +302,8 @@ out:
        return referenced;
 }
 
-static int page_referenced_anon(struct page *page)
+static int page_referenced_anon(struct page *page,
+                               struct mem_cgroup *mem_cont)
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
@@ -312,6 +316,13 @@ static int page_referenced_anon(struct page *page)
 
        mapcount = page_mapcount(page);
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               /*
+                * If we are reclaiming on behalf of a cgroup, skip
+                * counting on behalf of references from different
+                * cgroups
+                */
+               if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
+                       continue;
                referenced += page_referenced_one(page, vma, &mapcount);
                if (!mapcount)
                        break;
@@ -332,7 +343,8 @@ static int page_referenced_anon(struct page *page)
  *
  * This function is only called from page_referenced for object-based pages.
  */
-static int page_referenced_file(struct page *page)
+static int page_referenced_file(struct page *page,
+                               struct mem_cgroup *mem_cont)
 {
        unsigned int mapcount;
        struct address_space *mapping = page->mapping;
@@ -365,6 +377,13 @@ static int page_referenced_file(struct page *page)
        mapcount = page_mapcount(page);
 
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               /*
+                * If we are reclaiming on behalf of a cgroup, skip
+                * counting on behalf of references from different
+                * cgroups
+                */
+               if (mem_cont && !vm_match_cgroup(vma->vm_mm, mem_cont))
+                       continue;
                if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
                                  == (VM_LOCKED|VM_MAYSHARE)) {
                        referenced++;
@@ -387,7 +406,8 @@ static int page_referenced_file(struct page *page)
  * Quick test_and_clear_referenced for all mappings to a page,
  * returns the number of ptes which referenced the page.
  */
-int page_referenced(struct page *page, int is_locked)
+int page_referenced(struct page *page, int is_locked,
+                       struct mem_cgroup *mem_cont)
 {
        int referenced = 0;
 
@@ -399,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
 
        if (page_mapped(page) && page->mapping) {
                if (PageAnon(page))
-                       referenced += page_referenced_anon(page);
+                       referenced += page_referenced_anon(page, mem_cont);
                else if (is_locked)
-                       referenced += page_referenced_file(page);
+                       referenced += page_referenced_file(page, mem_cont);
                else if (TestSetPageLocked(page))
                        referenced++;
                else {
                        if (page->mapping)
-                               referenced += page_referenced_file(page);
+                               referenced +=
+                                       page_referenced_file(page, mem_cont);
                        unlock_page(page);
                }
        }
@@ -552,8 +573,14 @@ void page_add_anon_rmap(struct page *page,
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        if (atomic_inc_and_test(&page->_mapcount))
                __page_set_anon_rmap(page, vma, address);
-       else
+       else {
                __page_check_anon_rmap(page, vma, address);
+               /*
+                * We unconditionally charged during prepare, we uncharge here
+                * This takes care of balancing the reference counts
+                */
+               mem_cgroup_uncharge_page(page);
+       }
 }
 
 /*
@@ -584,6 +611,12 @@ void page_add_file_rmap(struct page *page)
 {
        if (atomic_inc_and_test(&page->_mapcount))
                __inc_zone_page_state(page, NR_FILE_MAPPED);
+       else
+               /*
+                * We unconditionally charged during prepare, we uncharge here
+                * This takes care of balancing the reference counts
+                */
+               mem_cgroup_uncharge_page(page);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -644,6 +677,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
                        page_clear_dirty(page);
                        set_page_dirty(page);
                }
+               mem_cgroup_uncharge_page(page);
+
                __dec_zone_page_state(page,
                                PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
        }