add objrmap nonlinear and anon-vma ppc support
authorHubert Mantel <mantel@suse.de>
Thu, 25 Mar 2004 10:28:01 +0000 (10:28 +0000)
committerHubert Mantel <mantel@suse.de>
Thu, 25 Mar 2004 10:28:01 +0000 (10:28 +0000)
suse-commit: 41e04cc4d1eda5f4448f93ced7aea9f85b6b654f

14 files changed:
arch/arm/mm/mm-armv.c
arch/ppc/mm/pgtable.c
arch/ppc64/mm/hugetlbpage.c
arch/ppc64/mm/tlb.c
include/asm-arm/kmap_types.h
include/asm-i386/kmap_types.h
include/asm-mips/kmap_types.h
include/asm-ppc64/pgalloc.h
include/asm-sparc/kmap_types.h
include/linux/objrmap.h
include/linux/page-flags.h
mm/fremap.c
mm/memory.c
mm/objrmap.c

index 4d98fea..602ebb1 100644 (file)
@@ -231,7 +231,7 @@ void free_pgd_slow(pgd_t *pgd)
 
        pte = pmd_page(*pmd);
        pmd_clear(pmd);
-       pgtable_remove_rmap(pte);
+       dec_page_state(nr_page_table_pages);
        pte_free(pte);
        pmd_free(pmd);
 free:
index b62b30c..1f6f5b9 100644 (file)
@@ -86,9 +86,14 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        extern int mem_init_done;
        extern void *early_get_page(void);
 
-       if (mem_init_done)
+       if (mem_init_done) {
                pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-       else
+               if (pte) {
+                       struct page *ptepage = virt_to_page(pte);
+                       ptepage->mapping = (void *) mm;
+                       ptepage->index = address & PMD_MASK;
+               }
+       } else
                pte = (pte_t *)early_get_page();
        if (pte)
                clear_page(pte);
@@ -106,8 +111,11 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
 #endif
 
        pte = alloc_pages(flags, 0);
-       if (pte)
+       if (pte) {
+               pte->mapping = (void *) mm;
+               pte->index = address & PMD_MASK;
                clear_highpage(pte);
+       }
        return pte;
 }
 
@@ -116,6 +124,7 @@ void pte_free_kernel(pte_t *pte)
 #ifdef CONFIG_SMP
        hash_page_sync();
 #endif
+       virt_to_page(pte)->mapping = NULL;
        free_page((unsigned long)pte);
 }
 
index 124f06c..126572a 100644 (file)
@@ -272,7 +272,7 @@ static int open_32bit_htlbpage_range(struct mm_struct *mm)
                        }
 
                        pmd_clear(pmd);
-                       pgtable_remove_rmap(page);
+                       dec_page_state(nr_page_table_pages);
                        pte_free(page);
                }
        }
index 99be5d0..87bb241 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/tlb.h>
 #include <asm/hardirq.h>
 #include <linux/highmem.h>
-#include <asm/rmap.h>
 
 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
 
@@ -59,7 +58,7 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
 
        ptepage = virt_to_page(ptep);
        mm = (struct mm_struct *) ptepage->mapping;
-       addr = ptep_to_address(ptep);
+       addr = ptepage->index + (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
 
        if (REGION_ID(addr) == USER_REGION_ID)
                context = mm->context.id;
index 69d9679..45def13 100644 (file)
@@ -14,7 +14,6 @@ enum km_type {
        KM_BIO_DST_IRQ,
        KM_PTE0,
        KM_PTE1,
-       KM_PTE2,
        KM_IRQ0,
        KM_IRQ1,
        KM_SOFTIRQ0,
index 5f524b3..a1a9810 100644 (file)
@@ -19,14 +19,13 @@ D(5)        KM_BIO_SRC_IRQ,
 D(6)   KM_BIO_DST_IRQ,
 D(7)   KM_PTE0,
 D(8)   KM_PTE1,
-D(9)   KM_PTE2,
-D(10)  KM_IRQ0,
-D(11)  KM_IRQ1,
-D(12)  KM_SOFTIRQ0,
-D(13)  KM_SOFTIRQ1,
-D(14)  KM_KDB,
-D(15)  KM_DUMP,
-D(16)  KM_TYPE_NR
+D(9)   KM_IRQ0,
+D(10)  KM_IRQ1,
+D(11)  KM_SOFTIRQ0,
+D(12)  KM_SOFTIRQ1,
+D(13)  KM_KDB,
+D(14)  KM_DUMP,
+D(15)  KM_TYPE_NR
 };
 
 #undef D
index a117c0a..9668222 100644 (file)
@@ -19,7 +19,6 @@ D(5)  KM_BIO_SRC_IRQ,
 D(6)   KM_BIO_DST_IRQ,
 D(7)   KM_PTE0,
 D(8)   KM_PTE1,
-D(9)   KM_PTE2,
 D(10)  KM_IRQ0,
 D(11)  KM_IRQ1,
 D(12)  KM_SOFTIRQ0,
index 43c70d2..e0a6b4e 100644 (file)
@@ -48,28 +48,42 @@ pmd_free(pmd_t *pmd)
        pmd_populate_kernel(mm, pmd, page_address(pte_page))
 
 static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte;
+       pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
+       if (pte) {
+               struct page *ptepage = virt_to_page(pte);
+               ptepage->mapping = (void *) mm;
+               ptepage->index = address & PMD_MASK;
+       }
+       return pte;
 }
 
 static inline struct page *
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = pte_alloc_one_kernel(mm, address);
-
-       if (pte)
-               return virt_to_page(pte);
-
-       return NULL;
+       pte_t *pte;
+       pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
+       if (pte) {
+               struct page *ptepage = virt_to_page(pte);
+               ptepage->mapping = (void *) mm;
+               ptepage->index = address & PMD_MASK;
+               return ptepage;
+       }
 }
                
 static inline void pte_free_kernel(pte_t *pte)
 {
+       virt_to_page(pte)->mapping = NULL;
        kmem_cache_free(zero_cache, pte);
 }
 
-#define pte_free(pte_page)     pte_free_kernel(page_address(pte_page))
+static inline void pte_free(struct page *ptepage)
+{
+       ptepage->mapping = NULL;
+       kmem_cache_free(zero_cache, page_address(ptepage));
+}
 
 struct pte_freelist_batch
 {
index cbacf44..e215f71 100644 (file)
@@ -11,7 +11,6 @@ enum km_type {
        KM_BIO_DST_IRQ,
        KM_PTE0,
        KM_PTE1,
-       KM_PTE2,
        KM_IRQ0,
        KM_IRQ1,
        KM_SOFTIRQ0,
index cfff591..0f3d8fe 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _LINUX_RMAP_H
-#define _LINUX_RMAP_H
+#ifndef _LINUX_OBJRMAP_H
+#define _LINUX_OBJRMAP_H
 /*
  * Declarations for Object Reverse Mapping functions in mm/objrmap.c
  */
@@ -75,4 +75,4 @@ int FASTCALL(page_referenced(struct page *));
 
 #endif /* CONFIG_MMU */
 
-#endif /* _LINUX_RMAP_H */
+#endif /* _LINUX_OBJRMAP_H */
index 7e2c842..6f328a6 100644 (file)
 #define PG_nosave              14      /* Used for system suspend/resume */
 #define PG_maplock             15      /* lock bit for ->as.anon_vma and ->mapcount */
 
+#define PG_swapcache           16      /* SwapCache page */
 #define PG_mappedtodisk                17      /* Has blocks allocated on-disk */
 #define PG_reclaim             18      /* To be reclaimed asap */
 #define PG_compound            19      /* Part of a compound page */
 #define PG_anon                        20      /* Anonymous page */
-#define PG_swapcache           21      /* SwapCache page */
 
 
 /*
index 1f755d0..1bc8368 100644 (file)
@@ -61,8 +61,8 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
        pmd_t *pmd;
        pte_t pte_val;
 
-       pgd = pgd_offset(mm, addr);
        spin_lock(&mm->page_table_lock);
+       pgd = pgd_offset(mm, addr);
 
        pmd = pmd_alloc(mm, pgd, addr);
        if (!pmd)
@@ -103,8 +103,8 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
        pmd_t *pmd;
        pte_t pte_val;
 
-       pgd = pgd_offset(mm, addr);
        spin_lock(&mm->page_table_lock);
+       pgd = pgd_offset(mm, addr);
 
        pmd = pmd_alloc(mm, pgd, addr);
        if (!pmd)
index e720b1f..8fc8cda 100644 (file)
@@ -97,7 +97,7 @@ static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
 
        if (pmd_none(*dir))
                return;
-       if (pmd_bad(*dir)) {
+       if (unlikely(pmd_bad(*dir))) {
                pmd_ERROR(*dir);
                pmd_clear(dir);
                return;
@@ -115,7 +115,7 @@ static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
 
        if (pgd_none(*dir))
                return;
-       if (pgd_bad(*dir)) {
+       if (unlikely(pgd_bad(*dir))) {
                pgd_ERROR(*dir);
                pgd_clear(dir);
                return;
@@ -232,7 +232,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                
                if (pgd_none(*src_pgd))
                        goto skip_copy_pmd_range;
-               if (pgd_bad(*src_pgd)) {
+               if (unlikely(pgd_bad(*src_pgd))) {
                        pgd_ERROR(*src_pgd);
                        pgd_clear(src_pgd);
 skip_copy_pmd_range:   address = (address + PGDIR_SIZE) & PGDIR_MASK;
@@ -253,7 +253,7 @@ skip_copy_pmd_range:        address = (address + PGDIR_SIZE) & PGDIR_MASK;
                
                        if (pmd_none(*src_pmd))
                                goto skip_copy_pte_range;
-                       if (pmd_bad(*src_pmd)) {
+                       if (unlikely(pmd_bad(*src_pmd))) {
                                pmd_ERROR(*src_pmd);
                                pmd_clear(src_pmd);
 skip_copy_pte_range:
@@ -383,7 +383,7 @@ zap_pte_range(struct mmu_gather *tlb, pmd_t * pmd,
 
        if (pmd_none(*pmd))
                return;
-       if (pmd_bad(*pmd)) {
+       if (unlikely(pmd_bad(*pmd))) {
                pmd_ERROR(*pmd);
                pmd_clear(pmd);
                return;
@@ -426,27 +426,25 @@ zap_pte_range(struct mmu_gather *tlb, pmd_t * pmd,
 
 static void
 zap_pmd_range(struct mmu_gather *tlb, pgd_t * dir,
-               unsigned long address, unsigned long size)
+               unsigned long address, unsigned long end)
 {
        pmd_t * pmd;
-       unsigned long end;
 
        if (pgd_none(*dir))
                return;
-       if (pgd_bad(*dir)) {
+       if (unlikely(pgd_bad(*dir))) {
                pgd_ERROR(*dir);
                pgd_clear(dir);
                return;
        }
        pmd = pmd_offset(dir, address);
-       end = address + size;
        if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
                end = ((address + PGDIR_SIZE) & PGDIR_MASK);
        do {
                zap_pte_range(tlb, pmd, address, end - address);
                address = (address + PMD_SIZE) & PMD_MASK; 
                pmd++;
-       } while (address < end);
+       } while (address && (address < end));
 }
 
 void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
@@ -464,7 +462,7 @@ void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        dir = pgd_offset(vma->vm_mm, address);
        tlb_start_vma(tlb, vma);
        do {
-               zap_pmd_range(tlb, dir, address, end - address);
+               zap_pmd_range(tlb, dir, address, end);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
@@ -634,7 +632,7 @@ follow_page(struct mm_struct *mm, unsigned long address, int write)
                goto out;
        if (pmd_huge(*pmd))
                return follow_huge_pmd(mm, address, pmd, write);
-       if (pmd_bad(*pmd))
+       if (unlikely(pmd_bad(*pmd)))
                goto out;
 
        ptep = pte_offset_map(pmd, address);
index d3f3d11..792d3de 100644 (file)
  * Released under the General Public License (GPL).
  */
 
+/*
+ * nonlinear pagetable walking elaborated from mm/memory.c under
+ * Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ */
+
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
@@ -62,7 +67,7 @@ static inline void validate_anon_vma_find_vma(struct vm_area_struct * find_vma)
  * 
  * It is the caller's responsibility to unmap the pte if it is returned.
  */
-static inline pte_t *
+static pte_t *
 find_pte(struct vm_area_struct *vma, struct page *page, unsigned long *addr)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -120,12 +125,19 @@ page_referenced_one(struct vm_area_struct *vma, struct page *page)
        pte_t *pte;
        int referenced = 0;
 
-       if (!spin_trylock(&mm->page_table_lock))
+       /*
+        * Tracking the referenced info is too expensive
+        * for nonlinear mappings.
+        */
+       if (vma->vm_flags & VM_NONLINEAR)
+               goto out;
+
+       if (unlikely(!spin_trylock(&mm->page_table_lock)))
                goto out;
 
        pte = find_pte(vma, page, NULL);
        if (pte) {
-               if (ptep_test_and_clear_young(pte))
+               if (pte_young(*pte) && ptep_test_and_clear_young(pte))
                        referenced++;
                pte_unmap(pte);
        }
@@ -158,7 +170,7 @@ page_referenced_inode(struct page *page)
 
        BUG_ON(PageSwapCache(page));
 
-       if (down_trylock(&mapping->i_shared_sem))
+       if (unlikely(down_trylock(&mapping->i_shared_sem)))
                goto out;
 
        list_for_each_entry(vma, &mapping->i_mmap, shared)
@@ -212,7 +224,7 @@ int fastcall page_referenced(struct page * page)
        BUG_ON(!page->mapping);
 
        if (page_test_and_clear_young(page))
-               mark_page_accessed(page);
+               referenced++;
 
        if (TestClearPageReferenced(page))
                referenced++;
@@ -257,8 +269,6 @@ void fastcall page_add_rmap(struct page *page, struct vm_area_struct * vma,
        if (PageReserved(page))
                return;
 
-       BUG_ON(vma->vm_flags & VM_RESERVED);
-
        page_map_lock(page);
 
        /*
@@ -327,8 +337,11 @@ void fastcall page_remove_rmap(struct page *page)
        if (!page_mapped(page))
                goto out_unlock;
 
-       if (!--page->mapcount)
+       if (!--page->mapcount) {
                dec_page_state(nr_mapped);
+               if (page_test_and_clear_dirty(page))
+                       set_page_dirty(page);
+       }
 
        if (PageAnon(page))
                anon_vma_page_unlink(page);
@@ -344,38 +357,13 @@ void fastcall page_remove_rmap(struct page *page)
   
  out_unlock:
        page_map_unlock(page);
-       return;
 }
 
-/**
- * try_to_unmap_one - unmap a page using the object-based rmap method
- * @page: the page to unmap
- *
- * Determine whether a page is mapped in a given vma and unmap it if it's found.
- *
- * This function is strictly a helper function for try_to_unmap_inode.
- */
-static int
-try_to_unmap_one(struct vm_area_struct *vma, struct page *page)
+static void
+unmap_pte_page(struct page * page, struct vm_area_struct * vma,
+              unsigned long address, pte_t * pte)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
-       pte_t *pte;
        pte_t pteval;
-       int ret = SWAP_AGAIN;
-
-       if (!spin_trylock(&mm->page_table_lock))
-               return ret;
-
-       pte = find_pte(vma, page, &address);
-       if (!pte)
-               goto out;
-
-       BUG_ON(vma->vm_flags & VM_RESERVED);
-       if (vma->vm_flags & VM_LOCKED) {
-               ret =  SWAP_FAIL;
-               goto out_unmap;
-       }
 
        flush_cache_page(vma, address);
        pteval = ptep_clear_flush(vma, address, pte);
@@ -388,9 +376,14 @@ try_to_unmap_one(struct vm_area_struct *vma, struct page *page)
                swp_entry_t entry = { .val = page->private };
                swap_duplicate(entry);
                set_pte(pte, swp_entry_to_pte(entry));
+
                BUG_ON(pte_file(*pte));
+               BUG_ON(!PageAnon(page));
+               BUG_ON(!page->mapping);
+               BUG_ON(!page->mapcount);
        } else {
                unsigned long pgidx;
+
                /*
                 * If a nonlinear mapping then store the file page offset
                 * in the pte.
@@ -402,22 +395,173 @@ try_to_unmap_one(struct vm_area_struct *vma, struct page *page)
                        set_pte(pte, pgoff_to_pte(page->index));
                        BUG_ON(!pte_file(*pte));
                }
+
+               BUG_ON(!page->mapping);
+               BUG_ON(!page->mapcount);
+               BUG_ON(PageAnon(page));
        }
 
        if (pte_dirty(pteval))
                set_page_dirty(page);
 
-       BUG_ON(!page->mapcount);
-
-       mm->rss--;
+       vma->vm_mm->rss--;
        if (!--page->mapcount && PageAnon(page))
                anon_vma_page_unlink(page);
        page_cache_release(page);
+}
 
-out_unmap:
-       pte_unmap(pte);
+static void
+try_to_unmap_nonlinear_pte(struct vm_area_struct * vma,
+                          pmd_t * pmd, unsigned long address, unsigned long size)
+{
+       unsigned long offset;
+       pte_t *ptep;
 
-out:
+       if (pmd_none(*pmd))
+               return;
+       if (unlikely(pmd_bad(*pmd))) {
+               pmd_ERROR(*pmd);
+               pmd_clear(pmd);
+               return;
+       }
+       ptep = pte_offset_map(pmd, address);
+       offset = address & ~PMD_MASK;
+       if (offset + size > PMD_SIZE)
+               size = PMD_SIZE - offset;
+       size &= PAGE_MASK;
+       for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+               pte_t pte = *ptep;
+               if (pte_none(pte))
+                       continue;
+               if (pte_present(pte)) {
+                       unsigned long pfn = pte_pfn(pte);
+                       struct page * page;
+
+                       if (!pfn_valid(pfn))
+                               continue;
+                       page = pfn_to_page(pfn);
+                       if (PageReserved(page))
+                               continue;
+                       if (pte_young(pte) && ptep_test_and_clear_young(ptep))
+                               continue;
+                       /*
+                        * any other page in the nonlinear mapping will not wait
+                        * on us since only one cpu can take the i_shared_sem
+                        * and reach this point.
+                        */
+                       page_map_lock(page);
+                       /* check that we're not in between set_pte and page_add_rmap */
+                       if (page_mapped(page)) {
+                               unmap_pte_page(page, vma, address + offset, ptep);
+                               if (!page_mapped(page) && page_test_and_clear_dirty(page))
+                                       set_page_dirty(page);
+                       }
+                       page_map_unlock(page);
+               }
+       }
+       pte_unmap(ptep-1);
+}
+
+static void
+try_to_unmap_nonlinear_pmd(struct vm_area_struct * vma,
+                          pgd_t * dir, unsigned long address, unsigned long end)
+{
+       pmd_t * pmd;
+
+       if (pgd_none(*dir))
+               return;
+       if (unlikely(pgd_bad(*dir))) {
+               pgd_ERROR(*dir);
+               pgd_clear(dir);
+               return;
+       }
+       pmd = pmd_offset(dir, address);
+       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
+       do {
+               try_to_unmap_nonlinear_pte(vma, pmd, address, end - address);
+               address = (address + PMD_SIZE) & PMD_MASK; 
+               pmd++;
+       } while (address && (address < end));
+}
+
+static void
+try_to_unmap_nonlinear(struct vm_area_struct *vma)
+{
+       pgd_t * dir;
+       unsigned long address = vma->vm_start, end = vma->vm_end;
+
+       dir = pgd_offset(vma->vm_mm, address);
+       do {
+               try_to_unmap_nonlinear_pmd(vma, dir, address, end);
+               address = (address + PGDIR_SIZE) & PGDIR_MASK;
+               dir++;
+       } while (address && (address < end));
+}
+
+/**
+ * try_to_unmap_one - unmap a page using the object-based rmap method
+ * @page: the page to unmap
+ *
+ * Determine whether a page is mapped in a given vma and unmap it if it's found.
+ *
+ * This function is strictly a helper function for try_to_unmap_inode.
+ */
+static int
+try_to_unmap_one(struct vm_area_struct *vma, struct page *page, int * young)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long address;
+       pte_t *pte;
+       int ret;
+
+       BUG_ON(vma->vm_flags & VM_RESERVED);
+       if (unlikely(vma->vm_flags & VM_LOCKED))
+               return SWAP_FAIL;
+
+       ret = SWAP_AGAIN;
+       if (unlikely(!spin_trylock(&mm->page_table_lock)))
+               return ret;
+
+       if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
+               /*
+                * If this was a false positive generated by a
+                * failed trylock in the referenced pass let's
+                * avoid to pay the big cost of the nonlinear
+                * swap, we'd better be sure we've to pay that
+                * cost before running it.
+                */
+               if (!*young) {
+                       /*
+                        * All it matters is that the page won't go
+                        * away under us after we unlock.
+                        */
+                       page_map_unlock(page);
+                       try_to_unmap_nonlinear(vma);
+                       page_map_lock(page);
+               }
+               goto out;
+       }
+
+       pte = find_pte(vma, page, &address);
+       if (!pte)
+               goto out;
+
+       /*
+        * We use trylocks in the "reference" methods, if they fails
+        * we let the VM to go ahead unmapping to avoid locking
+        * congestions, so here we may be trying to unmap young
+        * ptes, if that happens we givup trying unmapping this page
+        * and we clear all other reference bits instead (basically
+        * downgrading to a page_referenced pass).
+        */
+       if ((!pte_young(*pte) || !ptep_test_and_clear_young(pte)) && !*young)
+               unmap_pte_page(page, vma, address, pte);
+       else
+               *young = 1;
+
+       pte_unmap(pte);
+ out:
        spin_unlock(&mm->page_table_lock);
        return ret;
 }
@@ -439,21 +583,21 @@ try_to_unmap_inode(struct page *page)
 {
        struct address_space *mapping = page->mapping;
        struct vm_area_struct *vma;
-       int ret = SWAP_AGAIN;
+       int ret = SWAP_AGAIN, young = 0;
 
        BUG_ON(PageSwapCache(page));
 
-       if (down_trylock(&mapping->i_shared_sem))
+       if (unlikely(down_trylock(&mapping->i_shared_sem)))
                return ret;
        
        list_for_each_entry(vma, &mapping->i_mmap, shared) {
-               ret = try_to_unmap_one(vma, page);
+               ret = try_to_unmap_one(vma, page, &young);
                if (ret == SWAP_FAIL || !page->mapcount)
                        goto out;
        }
 
        list_for_each_entry(vma, &mapping->i_mmap_shared, shared) {
-               ret = try_to_unmap_one(vma, page);
+               ret = try_to_unmap_one(vma, page, &young);
                if (ret == SWAP_FAIL || !page->mapcount)
                        goto out;
        }
@@ -466,7 +610,7 @@ out:
 static int
 try_to_unmap_anon(struct page * page)
 {
-       int ret = SWAP_AGAIN;
+       int ret = SWAP_AGAIN, young = 0;
        struct vm_area_struct * vma;
        anon_vma_t * anon_vma = (anon_vma_t *) page->mapping;
 
@@ -476,7 +620,7 @@ try_to_unmap_anon(struct page * page)
        spin_lock(&anon_vma->anon_vma_lock);
        BUG_ON(list_empty(&anon_vma->anon_vma_head));
        list_for_each_entry(vma, &anon_vma->anon_vma_head, anon_vma_node) {
-               ret = try_to_unmap_one(vma, page);
+               ret = try_to_unmap_one(vma, page, &young);
                if (ret == SWAP_FAIL || !page->mapcount)
                        break;
        }
@@ -522,7 +666,10 @@ int fastcall try_to_unmap(struct page * page)
        if (!page_mapped(page)) {
                dec_page_state(nr_mapped);
                ret = SWAP_SUCCESS;
+               if (page_test_and_clear_dirty(page))
+                       set_page_dirty(page);
        }
+
        return ret;
 }