Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / huge_memory.c
index 470dcda..f0e5306 100644 (file)
@@ -89,7 +89,8 @@ struct khugepaged_scan {
        struct list_head mm_head;
        struct mm_slot *mm_slot;
        unsigned long address;
-} khugepaged_scan = {
+};
+static struct khugepaged_scan khugepaged_scan = {
        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
@@ -486,41 +487,68 @@ static struct attribute_group khugepaged_attr_group = {
        .attrs = khugepaged_attr,
        .name = "khugepaged",
 };
-#endif /* CONFIG_SYSFS */
 
-static int __init hugepage_init(void)
+static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 {
        int err;
-#ifdef CONFIG_SYSFS
-       static struct kobject *hugepage_kobj;
-#endif
-
-       err = -EINVAL;
-       if (!has_transparent_hugepage()) {
-               transparent_hugepage_flags = 0;
-               goto out;
-       }
 
-#ifdef CONFIG_SYSFS
-       err = -ENOMEM;
-       hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
-       if (unlikely(!hugepage_kobj)) {
+       *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
+       if (unlikely(!*hugepage_kobj)) {
                printk(KERN_ERR "hugepage: failed kobject create\n");
-               goto out;
+               return -ENOMEM;
        }
 
-       err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
+       err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
        if (err) {
                printk(KERN_ERR "hugepage: failed register hugeage group\n");
-               goto out;
+               goto delete_obj;
        }
 
-       err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
+       err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
        if (err) {
                printk(KERN_ERR "hugepage: failed register hugeage group\n");
-               goto out;
+               goto remove_hp_group;
        }
-#endif
+
+       return 0;
+
+remove_hp_group:
+       sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
+delete_obj:
+       kobject_put(*hugepage_kobj);
+       return err;
+}
+
+static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
+{
+       sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
+       sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
+       kobject_put(hugepage_kobj);
+}
+#else
+static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
+{
+       return 0;
+}
+
+static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
+{
+}
+#endif /* CONFIG_SYSFS */
+
+static int __init hugepage_init(void)
+{
+       int err;
+       struct kobject *hugepage_kobj;
+
+       if (!has_transparent_hugepage()) {
+               transparent_hugepage_flags = 0;
+               return -EINVAL;
+       }
+
+       err = hugepage_init_sysfs(&hugepage_kobj);
+       if (err)
+               return err;
 
        err = khugepaged_slab_init();
        if (err)
@@ -544,7 +572,9 @@ static int __init hugepage_init(void)
 
        set_recommended_min_free_kbytes();
 
+       return 0;
 out:
+       hugepage_exit_sysfs(hugepage_kobj);
        return err;
 }
 module_init(hugepage_init)
@@ -641,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                set_pmd_at(mm, haddr, pmd, entry);
                prepare_pmd_huge_pte(pgtable, mm);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -759,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
        prepare_pmd_huge_pte(pgtable, dst_mm);
+       dst_mm->nr_ptes++;
 
        ret = 0;
 out_unlock:
@@ -829,7 +861,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                copy_user_highpage(pages[i], page + i,
-                                  haddr + PAGE_SHIFT*i, vma);
+                                  haddr + PAGE_SIZE * i, vma);
                __SetPageUptodate(pages[i]);
                cond_resched();
        }
@@ -857,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
        kfree(pages);
 
-       mm->nr_ptes++;
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        page_remove_rmap(page);
@@ -989,41 +1020,34 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
        VM_BUG_ON(!PageCompound(page));
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
 
 out:
        return page;
 }
 
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
-                pmd_t *pmd)
+                pmd_t *pmd, unsigned long addr)
 {
        int ret = 0;
 
-       spin_lock(&tlb->mm->page_table_lock);
-       if (likely(pmd_trans_huge(*pmd))) {
-               if (unlikely(pmd_trans_splitting(*pmd))) {
-                       spin_unlock(&tlb->mm->page_table_lock);
-                       wait_split_huge_page(vma->anon_vma,
-                                            pmd);
-               } else {
-                       struct page *page;
-                       pgtable_t pgtable;
-                       pgtable = get_pmd_huge_pte(tlb->mm);
-                       page = pmd_page(*pmd);
-                       pmd_clear(pmd);
-                       page_remove_rmap(page);
-                       VM_BUG_ON(page_mapcount(page) < 0);
-                       add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
-                       VM_BUG_ON(!PageHead(page));
-                       spin_unlock(&tlb->mm->page_table_lock);
-                       tlb_remove_page(tlb, page);
-                       pte_free(tlb->mm, pgtable);
-                       ret = 1;
-               }
-       } else
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               struct page *page;
+               pgtable_t pgtable;
+               pgtable = get_pmd_huge_pte(tlb->mm);
+               page = pmd_page(*pmd);
+               pmd_clear(pmd);
+               tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
+               page_remove_rmap(page);
+               VM_BUG_ON(page_mapcount(page) < 0);
+               add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+               VM_BUG_ON(!PageHead(page));
+               tlb->mm->nr_ptes--;
                spin_unlock(&tlb->mm->page_table_lock);
-
+               tlb_remove_page(tlb, page);
+               pte_free(tlb->mm, pgtable);
+               ret = 1;
+       }
        return ret;
 }
 
@@ -1033,22 +1057,52 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        int ret = 0;
 
-       spin_lock(&vma->vm_mm->page_table_lock);
-       if (likely(pmd_trans_huge(*pmd))) {
-               ret = !pmd_trans_splitting(*pmd);
-               spin_unlock(&vma->vm_mm->page_table_lock);
-               if (unlikely(!ret))
-                       wait_split_huge_page(vma->anon_vma, pmd);
-               else {
-                       /*
-                        * All logical pages in the range are present
-                        * if backed by a huge page.
-                        */
-                       memset(vec, 1, (end - addr) >> PAGE_SHIFT);
-               }
-       } else
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               /*
+                * All logical pages in the range are present
+                * if backed by a huge page.
+                */
                spin_unlock(&vma->vm_mm->page_table_lock);
+               memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
+                 unsigned long old_addr,
+                 unsigned long new_addr, unsigned long old_end,
+                 pmd_t *old_pmd, pmd_t *new_pmd)
+{
+       int ret = 0;
+       pmd_t pmd;
+
+       struct mm_struct *mm = vma->vm_mm;
+
+       if ((old_addr & ~HPAGE_PMD_MASK) ||
+           (new_addr & ~HPAGE_PMD_MASK) ||
+           old_end - old_addr < HPAGE_PMD_SIZE ||
+           (new_vma->vm_flags & VM_NOHUGEPAGE))
+               goto out;
+
+       /*
+        * The destination pmd shouldn't be established, free_pgtables()
+        * should have release it.
+        */
+       if (WARN_ON(!pmd_none(*new_pmd))) {
+               VM_BUG_ON(pmd_trans_huge(*new_pmd));
+               goto out;
+       }
 
+       ret = __pmd_trans_huge_lock(old_pmd, vma);
+       if (ret == 1) {
+               pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
+               VM_BUG_ON(!pmd_none(*new_pmd));
+               set_pmd_at(mm, new_addr, new_pmd, pmd);
+               spin_unlock(&mm->page_table_lock);
+       }
+out:
        return ret;
 }
 
@@ -1058,25 +1112,41 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        struct mm_struct *mm = vma->vm_mm;
        int ret = 0;
 
-       spin_lock(&mm->page_table_lock);
+       if (__pmd_trans_huge_lock(pmd, vma) == 1) {
+               pmd_t entry;
+               entry = pmdp_get_and_clear(mm, addr, pmd);
+               entry = pmd_modify(entry, newprot);
+               set_pmd_at(mm, addr, pmd, entry);
+               spin_unlock(&vma->vm_mm->page_table_lock);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+/*
+ * Returns 1 if a given pmd maps a stable (not under splitting) thp.
+ * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
+ *
+ * Note that if it returns 1, this routine returns without unlocking page
+ * table locks. So callers must unlock them.
+ */
+int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
+{
+       spin_lock(&vma->vm_mm->page_table_lock);
        if (likely(pmd_trans_huge(*pmd))) {
                if (unlikely(pmd_trans_splitting(*pmd))) {
-                       spin_unlock(&mm->page_table_lock);
+                       spin_unlock(&vma->vm_mm->page_table_lock);
                        wait_split_huge_page(vma->anon_vma, pmd);
+                       return -1;
                } else {
-                       pmd_t entry;
-
-                       entry = pmdp_get_and_clear(mm, addr, pmd);
-                       entry = pmd_modify(entry, newprot);
-                       set_pmd_at(mm, addr, pmd, entry);
-                       spin_unlock(&vma->vm_mm->page_table_lock);
-                       flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
-                       ret = 1;
+                       /* Thp mapped by 'pmd' is stable, so we can
+                        * handle it as it is. */
+                       return 1;
                }
-       } else
-               spin_unlock(&vma->vm_mm->page_table_lock);
-
-       return ret;
+       }
+       spin_unlock(&vma->vm_mm->page_table_lock);
+       return 0;
 }
 
 pmd_t *page_check_address_pmd(struct page *page,
@@ -1139,7 +1209,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->lock to
+                * and it won't wait on the anon_vma->root->mutex to
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush_notify(vma, address, pmd);
@@ -1153,22 +1223,39 @@ static int __split_huge_page_splitting(struct page *page,
 static void __split_huge_page_refcount(struct page *page)
 {
        int i;
-       unsigned long head_index = page->index;
        struct zone *zone = page_zone(page);
-       int zonestat;
+       int tail_count = 0;
 
        /* prevent PageLRU to go away from under us, and freeze lru stats */
        spin_lock_irq(&zone->lru_lock);
        compound_lock(page);
+       /* complete memcg works before add pages to LRU */
+       mem_cgroup_split_huge_fixup(page);
 
-       for (i = 1; i < HPAGE_PMD_NR; i++) {
+       for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
                struct page *page_tail = page + i;
 
-               /* tail_page->_count cannot change */
-               atomic_sub(atomic_read(&page_tail->_count), &page->_count);
-               BUG_ON(page_count(page) <= 0);
-               atomic_add(page_mapcount(page) + 1, &page_tail->_count);
-               BUG_ON(atomic_read(&page_tail->_count) <= 0);
+               /* tail_page->_mapcount cannot change */
+               BUG_ON(page_mapcount(page_tail) < 0);
+               tail_count += page_mapcount(page_tail);
+               /* check for overflow */
+               BUG_ON(tail_count < 0);
+               BUG_ON(atomic_read(&page_tail->_count) != 0);
+               /*
+                * tail_page->_count is zero and not changing from
+                * under us. But get_page_unless_zero() may be running
+                * from under us on the tail_page. If we used
+                * atomic_set() below instead of atomic_add(), we
+                * would then run atomic_set() concurrently with
+                * get_page_unless_zero(), and atomic_set() is
+                * implemented in C not using locked ops. spin_unlock
+                * on x86 sometime uses locked ops because of PPro
+                * errata 66, 92, so unless somebody can guarantee
+                * atomic_set() here would be safe on all archs (and
+                * not only on x86), it's safer to use atomic_add().
+                */
+               atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
+                          &page_tail->_count);
 
                /* after clearing PageTail the gup refcount can be released */
                smp_mb();
@@ -1186,10 +1273,7 @@ static void __split_huge_page_refcount(struct page *page)
                                      (1L << PG_uptodate)));
                page_tail->flags |= (1L << PG_dirty);
 
-               /*
-                * 1) clear PageTail before overwriting first_page
-                * 2) clear PageTail before clearing PageHead for VM_BUG_ON
-                */
+               /* clear PageTail before overwriting first_page */
                smp_wmb();
 
                /*
@@ -1206,36 +1290,27 @@ static void __split_huge_page_refcount(struct page *page)
                 * status is achieved setting a reserved bit in the
                 * pmd, not by clearing the present bit.
                */
-               BUG_ON(page_mapcount(page_tail));
                page_tail->_mapcount = page->_mapcount;
 
                BUG_ON(page_tail->mapping);
                page_tail->mapping = page->mapping;
 
-               page_tail->index = ++head_index;
+               page_tail->index = page->index + i;
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
                BUG_ON(!PageDirty(page_tail));
                BUG_ON(!PageSwapBacked(page_tail));
 
-               mem_cgroup_split_huge_fixup(page, page_tail);
 
                lru_add_page_tail(zone, page, page_tail);
        }
+       atomic_sub(tail_count, &page->_count);
+       BUG_ON(atomic_read(&page->_count) <= 0);
 
        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
 
-       /*
-        * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
-        * so adjust those appropriately if this page is on the LRU.
-        */
-       if (PageLRU(page)) {
-               zonestat = NR_LRU_BASE + page_lru(page);
-               __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
-       }
-
        ClearPageCompound(page);
        compound_unlock(page);
        spin_unlock_irq(&zone->lru_lock);
@@ -1295,7 +1370,6 @@ static int __split_huge_page_map(struct page *page,
                        pte_unmap(pte);
                }
 
-               mm->nr_ptes++;
                smp_wmb(); /* make pte visible before pmd */
                /*
                 * Up to this point the pmd is present and huge and
@@ -1333,7 +1407,7 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->lock hold */
+/* must be called with anon_vma->root->mutex hold */
 static void __split_huge_page(struct page *page,
                              struct anon_vma *anon_vma)
 {
@@ -1408,6 +1482,9 @@ out:
        return ret;
 }
 
+#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
+                  VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+
 int hugepage_madvise(struct vm_area_struct *vma,
                     unsigned long *vm_flags, int advice)
 {
@@ -1416,11 +1493,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_HUGEPAGE |
-                                VM_SHARED   | VM_MAYSHARE   |
-                                VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
-                                VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
-                                VM_MIXEDMAP | VM_SAO))
+               if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
                        return -EINVAL;
                *vm_flags &= ~VM_NOHUGEPAGE;
                *vm_flags |= VM_HUGEPAGE;
@@ -1436,11 +1509,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                /*
                 * Be somewhat over-protective like KSM for now!
                 */
-               if (*vm_flags & (VM_NOHUGEPAGE |
-                                VM_SHARED   | VM_MAYSHARE   |
-                                VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
-                                VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
-                                VM_MIXEDMAP | VM_SAO))
+               if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
                        return -EINVAL;
                *vm_flags &= ~VM_HUGEPAGE;
                *vm_flags |= VM_NOHUGEPAGE;
@@ -1574,10 +1643,14 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
                 * page fault if needed.
                 */
                return 0;
-       if (vma->vm_file || vma->vm_ops)
+       if (vma->vm_ops)
                /* khugepaged not yet working on file or special mappings */
                return 0;
-       VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+       /*
+        * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
+        * true too, verify it here.
+        */
+       VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
        hend = vma->vm_end & HPAGE_PMD_MASK;
        if (hstart < hend)
@@ -1597,14 +1670,13 @@ void __khugepaged_exit(struct mm_struct *mm)
                list_del(&mm_slot->mm_node);
                free = 1;
        }
+       spin_unlock(&khugepaged_mm_lock);
 
        if (free) {
-               spin_unlock(&khugepaged_mm_lock);
                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
                free_mm_slot(mm_slot);
                mmdrop(mm);
        } else if (mm_slot) {
-               spin_unlock(&khugepaged_mm_lock);
                /*
                 * This is required to serialize against
                 * khugepaged_test_exit() (which is guaranteed to run
@@ -1615,8 +1687,7 @@ void __khugepaged_exit(struct mm_struct *mm)
                 */
                down_write(&mm->mmap_sem);
                up_write(&mm->mmap_sem);
-       } else
-               spin_unlock(&khugepaged_mm_lock);
+       }
 }
 
 static void release_pte_page(struct page *page)
@@ -1772,12 +1843,9 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 #ifndef CONFIG_NUMA
+       up_read(&mm->mmap_sem);
        VM_BUG_ON(!*hpage);
        new_page = *hpage;
-       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-               up_read(&mm->mmap_sem);
-               return;
-       }
 #else
        VM_BUG_ON(*hpage);
        /*
@@ -1792,22 +1860,26 @@ static void collapse_huge_page(struct mm_struct *mm,
         */
        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
                                      node, __GFP_OTHER_NODE);
+
+       /*
+        * After allocating the hugepage, release the mmap_sem read lock in
+        * preparation for taking it in write mode.
+        */
+       up_read(&mm->mmap_sem);
        if (unlikely(!new_page)) {
-               up_read(&mm->mmap_sem);
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
                *hpage = ERR_PTR(-ENOMEM);
                return;
        }
+#endif
+
        count_vm_event(THP_COLLAPSE_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
-               up_read(&mm->mmap_sem);
+#ifdef CONFIG_NUMA
                put_page(new_page);
+#endif
                return;
        }
-#endif
-
-       /* after allocating the hugepage upgrade to mmap_sem write mode */
-       up_read(&mm->mmap_sem);
 
        /*
         * Prevent all access to pagetables with the exception of
@@ -1828,12 +1900,15 @@ static void collapse_huge_page(struct mm_struct *mm,
            (vma->vm_flags & VM_NOHUGEPAGE))
                goto out;
 
-       /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
-       if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+       if (!vma->anon_vma || vma->vm_ops)
                goto out;
        if (is_vma_temporary_stack(vma))
                goto out;
-       VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+       /*
+        * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
+        * true too, verify it here.
+        */
+       VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
@@ -1905,9 +1980,8 @@ static void collapse_huge_page(struct mm_struct *mm,
        BUG_ON(!pmd_none(*pmd));
        page_add_new_anon_rmap(new_page, vma, address);
        set_pmd_at(mm, address, pmd, _pmd);
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, _pmd);
        prepare_pmd_huge_pte(pgtable, mm);
-       mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
 
 #ifndef CONFIG_NUMA
@@ -2002,7 +2076,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 {
        struct mm_struct *mm = mm_slot->mm;
 
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
@@ -2023,6 +2097,8 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 
 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                                            struct page **hpage)
+       __releases(&khugepaged_mm_lock)
+       __acquires(&khugepaged_mm_lock)
 {
        struct mm_slot *mm_slot;
        struct mm_struct *mm;
@@ -2030,7 +2106,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
        int progress = 0;
 
        VM_BUG_ON(!pages);
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_scan.mm_slot)
                mm_slot = khugepaged_scan.mm_slot;
@@ -2066,13 +2142,16 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
                        progress++;
                        continue;
                }
-               /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
-               if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+               if (!vma->anon_vma || vma->vm_ops)
                        goto skip;
                if (is_vma_temporary_stack(vma))
                        goto skip;
-
-               VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+               /*
+                * If is_pfn_mapping() is true is_learn_pfn_mapping()
+                * must be true too, verify it here.
+                */
+               VM_BUG_ON(is_linear_pfn_mapping(vma) ||
+                         vma->vm_flags & VM_NO_THP);
 
                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
                hend = vma->vm_end & HPAGE_PMD_MASK;
@@ -2192,12 +2271,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2228,11 +2303,8 @@ static void khugepaged_loop(void)
        while (likely(khugepaged_enabled())) {
 #ifndef CONFIG_NUMA
                hpage = khugepaged_alloc_hugepage();
-               if (unlikely(!hpage)) {
-                       count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+               if (unlikely(!hpage))
                        break;
-               }
-               count_vm_event(THP_COLLAPSE_ALLOC);
 #else
                if (IS_ERR(hpage)) {
                        khugepaged_alloc_sleep();
@@ -2249,14 +2321,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());