hugetlb: fix race condition in hugetlb_fault()
[linux-flexiantxendom0.git] / mm / hugetlb.c
index c6d342d..bd936ed 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -575,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -591,7 +593,6 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
-
 EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -900,7 +901,6 @@ retry:
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -914,6 +914,7 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
@@ -2132,9 +2133,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2189,9 +2189,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp)) {
+       if (non_swap_entry(swp) && is_migration_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2202,9 +2202,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2423,6 +2423,8 @@ retry_avoidcopy:
         * anon_vma prepared.
         */
        if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
@@ -2567,7 +2569,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON | 
+                       ret = VM_FAULT_HWPOISON |
                              VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
@@ -2635,7 +2637,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON_LARGE | 
+                       return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
        }
 
@@ -2684,6 +2686,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * so no worry about deadlock.
         */
        page = pte_page(entry);
+       get_page(page);
        if (page != pagecache_page)
                lock_page(page);
 
@@ -2715,6 +2718,7 @@ out_page_table_lock:
        }
        if (page != pagecache_page)
                unlock_page(page);
+       put_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);