Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / hugetlb.c
index b61d2db..ae8f708 100644 (file)
 #include <linux/bootmem.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -50,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
  */
 static DEFINE_SPINLOCK(hugetlb_lock);
 
+static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
+{
+       bool free = (spool->count == 0) && (spool->used_hpages == 0);
+
+       spin_unlock(&spool->lock);
+
+       /* If no pages are used, and no other handles to the subpool
+        * remain, free the subpool the subpool remain */
+       if (free)
+               kfree(spool);
+}
+
+struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
+{
+       struct hugepage_subpool *spool;
+
+       spool = kmalloc(sizeof(*spool), GFP_KERNEL);
+       if (!spool)
+               return NULL;
+
+       spin_lock_init(&spool->lock);
+       spool->count = 1;
+       spool->max_hpages = nr_blocks;
+       spool->used_hpages = 0;
+
+       return spool;
+}
+
+void hugepage_put_subpool(struct hugepage_subpool *spool)
+{
+       spin_lock(&spool->lock);
+       BUG_ON(!spool->count);
+       spool->count--;
+       unlock_or_release_subpool(spool);
+}
+
+static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
+                                     long delta)
+{
+       int ret = 0;
+
+       if (!spool)
+               return 0;
+
+       spin_lock(&spool->lock);
+       if ((spool->used_hpages + delta) <= spool->max_hpages) {
+               spool->used_hpages += delta;
+       } else {
+               ret = -ENOMEM;
+       }
+       spin_unlock(&spool->lock);
+
+       return ret;
+}
+
+static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
+                                      long delta)
+{
+       if (!spool)
+               return;
+
+       spin_lock(&spool->lock);
+       spool->used_hpages -= delta;
+       /* If hugetlbfs_put_super couldn't free spool due to
+       * an outstanding quota reference, free it now. */
+       unlock_or_release_subpool(spool);
+}
+
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+       return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
+static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
+{
+       return subpool_inode(vma->vm_file->f_dentry->d_inode);
+}
+
 /*
  * Region tracking -- allows tracking of reservations and instantiated pages
  *                    across the pages in a mapping.
@@ -59,10 +140,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -143,7 +224,7 @@ static long region_chg(struct list_head *head, long f, long t)
                if (rg->from > t)
                        return chg;
 
-               /* We overlap with this area, if it extends futher than
+               /* We overlap with this area, if it extends further than
                 * us then we must extend ourselves.  Account for its
                 * existing reservation. */
                if (rg->to > t) {
@@ -220,6 +301,12 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
                        (vma->vm_pgoff >> huge_page_order(h));
 }
 
+pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
+                                    unsigned long address)
+{
+       return vma_hugecache_offset(hstate_vma(vma), vma, address);
+}
+
 /*
  * Return the size of the pages allocated when backing a VMA. In the majority
  * cases this will be same size as used by the page table entries.
@@ -385,67 +472,37 @@ static int vma_has_reserves(struct vm_area_struct *vma)
        return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-                       unsigned long addr, unsigned long sz)
-{
-       int i;
-       struct page *p = page;
-
-       might_sleep();
-       for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
-               cond_resched();
-               clear_user_highpage(p, addr + i * PAGE_SIZE);
-       }
-}
-static void clear_huge_page(struct page *page,
-                       unsigned long addr, unsigned long sz)
+static void copy_gigantic_page(struct page *dst, struct page *src)
 {
        int i;
-
-       if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
-               clear_gigantic_page(page, addr, sz);
-               return;
-       }
-
-       might_sleep();
-       for (i = 0; i < sz/PAGE_SIZE; i++) {
-               cond_resched();
-               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
-       }
-}
-
-static void copy_gigantic_page(struct page *dst, struct page *src,
-                          unsigned long addr, struct vm_area_struct *vma)
-{
-       int i;
-       struct hstate *h = hstate_vma(vma);
+       struct hstate *h = page_hstate(src);
        struct page *dst_base = dst;
        struct page *src_base = src;
-       might_sleep();
+
        for (i = 0; i < pages_per_huge_page(h); ) {
                cond_resched();
-               copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+               copy_highpage(dst, src);
 
                i++;
                dst = mem_map_next(dst, dst_base, i);
                src = mem_map_next(src, src_base, i);
        }
 }
-static void copy_huge_page(struct page *dst, struct page *src,
-                          unsigned long addr, struct vm_area_struct *vma)
+
+void copy_huge_page(struct page *dst, struct page *src)
 {
        int i;
-       struct hstate *h = hstate_vma(vma);
+       struct hstate *h = page_hstate(src);
 
        if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-               copy_gigantic_page(dst, src, addr, vma);
+               copy_gigantic_page(dst, src);
                return;
        }
 
        might_sleep();
        for (i = 0; i < pages_per_huge_page(h); i++) {
                cond_resched();
-               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
+               copy_highpage(dst + i, src + i);
        }
 }
 
@@ -457,19 +514,34 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        h->free_huge_pages_node[nid]++;
 }
 
+static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
+{
+       struct page *page;
+
+       if (list_empty(&h->hugepage_freelists[nid]))
+               return NULL;
+       page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
+       list_del(&page->lru);
+       set_page_refcounted(page);
+       h->free_huge_pages--;
+       h->free_huge_pages_node[nid]--;
+       return page;
+}
+
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve)
 {
-       int nid;
        struct page *page = NULL;
        struct mempolicy *mpol;
        nodemask_t *nodemask;
        struct zonelist *zonelist;
        struct zone *zone;
        struct zoneref *z;
+       unsigned int cpuset_mems_cookie;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
        zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask, &mpol, &nodemask);
        /*
@@ -483,29 +555,28 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 
        /* If reserves cannot be used, ensure enough pages are in the pool */
        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
-               goto err;;
+               goto err;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                MAX_NR_ZONES - 1, nodemask) {
-               nid = zone_to_nid(zone);
-               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
-                   !list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-
-                       if (!avoid_reserve)
-                               decrement_hugepage_resv_vma(h, vma);
-
-                       break;
+               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
+                       page = dequeue_huge_page_node(h, zone_to_nid(zone));
+                       if (page) {
+                               if (!avoid_reserve)
+                                       decrement_hugepage_resv_vma(h, vma);
+                               break;
+                       }
                }
        }
-err:
+
        mpol_cond_put(mpol);
-       put_mems_allowed();
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
        return page;
+
+err:
+       mpol_cond_put(mpol);
+       return NULL;
 }
 
 static void update_and_free_page(struct hstate *h, struct page *page)
@@ -517,9 +588,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -546,12 +618,13 @@ static void free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct address_space *mapping;
+       struct hugepage_subpool *spool =
+               (struct hugepage_subpool *)page_private(page);
 
-       mapping = (struct address_space *) page_private(page);
        set_page_private(page, 0);
        page->mapping = NULL;
        BUG_ON(page_count(page));
+       BUG_ON(page_mapcount(page));
        INIT_LIST_HEAD(&page->lru);
 
        spin_lock(&hugetlb_lock);
@@ -563,8 +636,7 @@ static void free_huge_page(struct page *page)
                enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
-       if (mapping)
-               hugetlb_put_quota(mapping, 1);
+       hugepage_subpool_put_pages(spool, 1);
 }
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
@@ -588,6 +660,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -604,6 +677,7 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
+EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
@@ -758,11 +832,10 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
        return ret;
 }
 
-static struct page *alloc_buddy_huge_page(struct hstate *h,
-                       struct vm_area_struct *vma, unsigned long address)
+static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 {
        struct page *page;
-       unsigned int nid;
+       unsigned int r_nid;
 
        if (h->order >= MAX_ORDER)
                return NULL;
@@ -800,30 +873,29 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
        }
        spin_unlock(&hugetlb_lock);
 
-       page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
-                                       __GFP_REPEAT|__GFP_NOWARN,
-                                       huge_page_order(h));
+       if (nid == NUMA_NO_NODE)
+               page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+                                  __GFP_REPEAT|__GFP_NOWARN,
+                                  huge_page_order(h));
+       else
+               page = alloc_pages_exact_node(nid,
+                       htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+                       __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
 
        if (page && arch_prepare_hugepage(page)) {
                __free_pages(page, huge_page_order(h));
-               return NULL;
+               page = NULL;
        }
 
        spin_lock(&hugetlb_lock);
        if (page) {
-               /*
-                * This page is now managed by the hugetlb allocator and has
-                * no users -- drop the buddy allocator's reference.
-                */
-               put_page_testzero(page);
-               VM_BUG_ON(page_count(page));
-               nid = page_to_nid(page);
+               r_nid = page_to_nid(page);
                set_compound_page_dtor(page, free_huge_page);
                /*
                 * We incremented the global counters already
                 */
-               h->nr_huge_pages_node[nid]++;
-               h->surplus_huge_pages_node[nid]++;
+               h->nr_huge_pages_node[r_nid]++;
+               h->surplus_huge_pages_node[r_nid]++;
                __count_vm_event(HTLB_BUDDY_PGALLOC);
        } else {
                h->nr_huge_pages--;
@@ -836,7 +908,26 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 }
 
 /*
- * Increase the hugetlb pool such that it can accomodate a reservation
+ * This allocation function is useful in the context where vma is irrelevant.
+ * E.g. soft-offlining uses this function because it only cares physical
+ * address of error page.
+ */
+struct page *alloc_huge_page_node(struct hstate *h, int nid)
+{
+       struct page *page;
+
+       spin_lock(&hugetlb_lock);
+       page = dequeue_huge_page_node(h, nid);
+       spin_unlock(&hugetlb_lock);
+
+       if (!page)
+               page = alloc_buddy_huge_page(h, nid);
+
+       return page;
+}
+
+/*
+ * Increase the hugetlb pool such that it can accommodate a reservation
  * of size 'delta'.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
@@ -845,6 +936,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
        struct page *page, *tmp;
        int ret, i;
        int needed, allocated;
+       bool alloc_ok = true;
 
        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
        if (needed <= 0) {
@@ -859,21 +951,14 @@ static int gather_surplus_pages(struct hstate *h, int delta)
 retry:
        spin_unlock(&hugetlb_lock);
        for (i = 0; i < needed; i++) {
-               page = alloc_buddy_huge_page(h, NULL, 0);
+               page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
-                       /*
-                        * We were not able to allocate enough pages to
-                        * satisfy the entire reservation so we free what
-                        * we've allocated so far.
-                        */
-                       spin_lock(&hugetlb_lock);
-                       needed = 0;
-                       goto free;
+                       alloc_ok = false;
+                       break;
                }
-
                list_add(&page->lru, &surplus_list);
        }
-       allocated += needed;
+       allocated += i;
 
        /*
         * After retaking hugetlb_lock, we need to recalculate 'needed'
@@ -882,12 +967,19 @@ retry:
        spin_lock(&hugetlb_lock);
        needed = (h->resv_huge_pages + delta) -
                        (h->free_huge_pages + allocated);
-       if (needed > 0)
-               goto retry;
-
+       if (needed > 0) {
+               if (alloc_ok)
+                       goto retry;
+               /*
+                * We were not able to allocate enough pages to
+                * satisfy the entire reservation so we free what
+                * we've allocated so far.
+                */
+               goto free;
+       }
        /*
         * The surplus_list now contains _at_least_ the number of extra pages
-        * needed to accomodate the reservation.  Add the appropriate number
+        * needed to accommodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
         * allocator.  Commit the entire reservation here to prevent another
         * process from stealing the pages as they are added to the pool but
@@ -896,31 +988,31 @@ retry:
        needed += allocated;
        h->resv_huge_pages += delta;
        ret = 0;
-free:
+
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
                        break;
                list_del(&page->lru);
+               /*
+                * This page is now managed by the hugetlb allocator and has
+                * no users -- drop the buddy allocator's reference.
+                */
+               put_page_testzero(page);
+               VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+free:
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
        if (!list_empty(&surplus_list)) {
-               spin_unlock(&hugetlb_lock);
                list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                        list_del(&page->lru);
-                       /*
-                        * The page has a reference count of zero already, so
-                        * call free_huge_page directly instead of using
-                        * put_page.  This must be done with hugetlb_lock
-                        * unlocked which is safe because free_huge_page takes
-                        * hugetlb_lock before deciding how to free the page.
-                        */
-                       free_huge_page(page);
+                       put_page(page);
                }
-               spin_lock(&hugetlb_lock);
        }
+       spin_lock(&hugetlb_lock);
 
        return ret;
 }
@@ -962,11 +1054,12 @@ static void return_unused_surplus_pages(struct hstate *h,
 /*
  * Determine if the huge page at addr within the vma has an associated
  * reservation.  Where it does not we will need to logically increase
- * reservation and actually increase quota before an allocation can occur.
- * Where any new reservation would be required the reservation change is
- * prepared, but not committed.  Once the page has been quota'd allocated
- * an instantiated the change should be committed via vma_commit_reservation.
- * No action is required on failure.
+ * reservation and actually increase subpool usage before an allocation
+ * can occur.  Where any new reservation would be required the
+ * reservation change is prepared, but not committed.  Once the page
+ * has been allocated from the subpool and instantiated the change should
+ * be committed via vma_commit_reservation.  No action is required on
+ * failure.
  */
 static long vma_needs_reservation(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long addr)
@@ -1015,40 +1108,39 @@ static void vma_commit_reservation(struct hstate *h,
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
                                    unsigned long addr, int avoid_reserve)
 {
+       struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
        struct page *page;
-       struct address_space *mapping = vma->vm_file->f_mapping;
-       struct inode *inode = mapping->host;
        long chg;
 
        /*
-        * Processes that did not create the mapping will have no reserves and
-        * will not have accounted against quota. Check that the quota can be
-        * made before satisfying the allocation
-        * MAP_NORESERVE mappings may also need pages and quota allocated
-        * if no reserve mapping overlaps.
+        * Processes that did not create the mapping will have no
+        * reserves and will not have accounted against subpool
+        * limit. Check that the subpool limit can be made before
+        * satisfying the allocation MAP_NORESERVE mappings may also
+        * need pages and subpool limit allocated allocated if no reserve
+        * mapping overlaps.
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(chg);
+               return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
-               if (hugetlb_get_quota(inode->i_mapping, chg))
-                       return ERR_PTR(-ENOSPC);
+               if (hugepage_subpool_get_pages(spool, chg))
+                       return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
        spin_unlock(&hugetlb_lock);
 
        if (!page) {
-               page = alloc_buddy_huge_page(h, vma, addr);
+               page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
-                       hugetlb_put_quota(inode->i_mapping, chg);
+                       hugepage_subpool_put_pages(spool, chg);
                        return ERR_PTR(-VM_FAULT_SIGBUS);
                }
        }
 
-       set_page_refcounted(page);
-       set_page_private(page, (unsigned long) mapping);
+       set_page_private(page, (unsigned long)spool);
 
        vma_commit_reservation(h, vma, addr);
 
@@ -1103,12 +1195,28 @@ static void __init gather_bootmem_prealloc(void)
        struct huge_bootmem_page *m;
 
        list_for_each_entry(m, &huge_boot_pages, list) {
-               struct page *page = virt_to_page(m);
                struct hstate *h = m->hstate;
+               struct page *page;
+
+#ifdef CONFIG_HIGHMEM
+               page = pfn_to_page(m->phys >> PAGE_SHIFT);
+               free_bootmem_late((unsigned long)m,
+                                 sizeof(struct huge_bootmem_page));
+#else
+               page = virt_to_page(m);
+#endif
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
@@ -1361,6 +1469,7 @@ static ssize_t nr_hugepages_show_common(struct kobject *kobj,
 
        return sprintf(buf, "%lu\n", nr_huge_pages);
 }
+
 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                        struct kobject *kobj, struct kobj_attribute *attr,
                        const char *buf, size_t len)
@@ -1373,9 +1482,14 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
 
        err = strict_strtoul(buf, 10, &count);
        if (err)
-               return 0;
+               goto out;
 
        h = kobj_to_hstate(kobj, &nid);
+       if (h->order >= MAX_ORDER) {
+               err = -EINVAL;
+               goto out;
+       }
+
        if (nid == NUMA_NO_NODE) {
                /*
                 * global hstate attribute
@@ -1401,6 +1515,9 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                NODEMASK_FREE(nodes_allowed);
 
        return len;
+out:
+       NODEMASK_FREE(nodes_allowed);
+       return err;
 }
 
 static ssize_t nr_hugepages_show(struct kobject *kobj,
@@ -1443,6 +1560,7 @@ static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
        struct hstate *h = kobj_to_hstate(kobj, NULL);
        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
 }
+
 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
                struct kobj_attribute *attr, const char *buf, size_t count)
 {
@@ -1450,9 +1568,12 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
        unsigned long input;
        struct hstate *h = kobj_to_hstate(kobj, NULL);
 
+       if (h->order >= MAX_ORDER)
+               return -EINVAL;
+
        err = strict_strtoul(buf, 10, &input);
        if (err)
-               return 0;
+               return err;
 
        spin_lock(&hugetlb_lock);
        h->nr_overcommit_huge_pages = input;
@@ -1560,9 +1681,9 @@ static void __init hugetlb_sysfs_init(void)
 
 /*
  * node_hstate/s - associate per node hstate attributes, via their kobjects,
- * with node sysdevs in node_devices[] using a parallel array.  The array
- * index of a node sysdev or _hstate == node id.
- * This is here to avoid any static dependency of the node sysdev driver, in
+ * with node devices in node_devices[] using a parallel array.  The array
+ * index of a node device or _hstate == node id.
+ * This is here to avoid any static dependency of the node device driver, in
  * the base kernel, on the hugetlb module.
  */
 struct node_hstate {
@@ -1572,7 +1693,7 @@ struct node_hstate {
 struct node_hstate node_hstates[MAX_NUMNODES];
 
 /*
- * A subset of global hstate attributes for node sysdevs
+ * A subset of global hstate attributes for node devices
  */
 static struct attribute *per_node_hstate_attrs[] = {
        &nr_hugepages_attr.attr,
@@ -1586,7 +1707,7 @@ static struct attribute_group per_node_hstate_attr_group = {
 };
 
 /*
- * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  * Returns node id via non-NULL nidp.
  */
 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
@@ -1609,13 +1730,13 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
 }
 
 /*
- * Unregister hstate attributes from a single node sysdev.
+ * Unregister hstate attributes from a single node device.
  * No-op if no hstate attributes attached.
  */
 void hugetlb_unregister_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
 
        if (!nhs->hugepages_kobj)
                return;         /* no hstate attributes */
@@ -1631,7 +1752,7 @@ void hugetlb_unregister_node(struct node *node)
 }
 
 /*
- * hugetlb module exit:  unregister hstate attributes from node sysdevs
+ * hugetlb module exit:  unregister hstate attributes from node devices
  * that have them.
  */
 static void hugetlb_unregister_all_nodes(void)
@@ -1639,7 +1760,7 @@ static void hugetlb_unregister_all_nodes(void)
        int nid;
 
        /*
-        * disable node sysdev registrations.
+        * disable node device registrations.
         */
        register_hugetlbfs_with_node(NULL, NULL);
 
@@ -1651,20 +1772,20 @@ static void hugetlb_unregister_all_nodes(void)
 }
 
 /*
- * Register hstate attributes for a single node sysdev.
+ * Register hstate attributes for a single node device.
  * No-op if attributes already registered.
  */
 void hugetlb_register_node(struct node *node)
 {
        struct hstate *h;
-       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       struct node_hstate *nhs = &node_hstates[node->dev.id];
        int err;
 
        if (nhs->hugepages_kobj)
                return;         /* already allocated */
 
        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
-                                                       &node->sysdev.kobj);
+                                                       &node->dev.kobj);
        if (!nhs->hugepages_kobj)
                return;
 
@@ -1675,7 +1796,7 @@ void hugetlb_register_node(struct node *node)
                if (err) {
                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
                                        " for node %d\n",
-                                               h->name, node->sysdev.id);
+                                               h->name, node->dev.id);
                        hugetlb_unregister_node(node);
                        break;
                }
@@ -1684,8 +1805,8 @@ void hugetlb_register_node(struct node *node)
 
 /*
  * hugetlb init time:  register hstate attributes for all registered node
- * sysdevs of nodes that have memory.  All on-line nodes should have
- * registered their associated sysdev by this time.
+ * devices of nodes that have memory.  All on-line nodes should have
+ * registered their associated device by this time.
  */
 static void hugetlb_register_all_nodes(void)
 {
@@ -1693,12 +1814,12 @@ static void hugetlb_register_all_nodes(void)
 
        for_each_node_state(nid, N_HIGH_MEMORY) {
                struct node *node = &node_devices[nid];
-               if (node->sysdev.id == nid)
+               if (node->dev.id == nid)
                        hugetlb_register_node(node);
        }
 
        /*
-        * Let the node sysdev driver know we're here so it can
+        * Let the node device driver know we're here so it can
         * [un]register hstate attributes on node hotplug.
         */
        register_hugetlbfs_with_node(hugetlb_register_node,
@@ -1855,13 +1976,18 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
+       int ret;
 
-       if (!write)
-               tmp = h->max_huge_pages;
+       tmp = h->max_huge_pages;
+
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
 
        if (write) {
                NODEMASK_ALLOC(nodemask_t, nodes_allowed,
@@ -1876,8 +2002,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
                if (nodes_allowed != &node_states[N_HIGH_MEMORY])
                        NODEMASK_FREE(nodes_allowed);
        }
-
-       return 0;
+out:
+       return ret;
 }
 
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
@@ -1915,21 +2041,26 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
+       int ret;
+
+       tmp = h->nr_overcommit_huge_pages;
 
-       if (!write)
-               tmp = h->nr_overcommit_huge_pages;
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
 
        if (write) {
                spin_lock(&hugetlb_lock);
                h->nr_overcommit_huge_pages = tmp;
                spin_unlock(&hugetlb_lock);
        }
-
-       return 0;
+out:
+       return ret;
 }
 
 #endif /* CONFIG_SYSCTL */
@@ -2018,7 +2149,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
         * This new VMA should share its siblings reservation map if present.
         * The VMA will only ever have a valid reservation map pointer where
         * it is being copied for another still existing VMA.  As that VMA
-        * has a reference to the reservation map it cannot dissappear until
+        * has a reference to the reservation map it cannot disappear until
         * after this open call completes.  It is therefore safe to take a
         * new reference here without additional locking.
         */
@@ -2030,6 +2161,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
        struct resv_map *reservations = vma_resv_map(vma);
+       struct hugepage_subpool *spool = subpool_vma(vma);
        unsigned long reserve;
        unsigned long start;
        unsigned long end;
@@ -2045,7 +2177,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 
                if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
-                       hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+                       hugepage_subpool_put_pages(spool, reserve);
                }
        }
 }
@@ -2091,9 +2223,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2129,6 +2260,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
+                       page_dup_rmap(ptepage);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                }
                spin_unlock(&src->page_table_lock);
@@ -2140,6 +2272,32 @@ nomem:
        return -ENOMEM;
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end, struct page *ref_page)
 {
@@ -2153,7 +2311,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long sz = huge_page_size(h);
 
        /*
-        * A page gathering list, protected by per file i_mmap_lock. The
+        * A page gathering list, protected by per file i_mmap_mutex. The
         * lock is used to avoid list corruption from multiple unmapping
         * of the same page since we are using page->lru.
         */
@@ -2173,16 +2331,23 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
 
+               pte = huge_ptep_get(ptep);
+               if (huge_pte_none(pte))
+                       continue;
+
+               /*
+                * HWPoisoned hugepage is already unmapped and dropped reference
+                */
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+                       continue;
+
+               page = pte_page(pte);
                /*
                 * If a reference page is supplied, it is because a specific
                 * page is being unmapped, not a range. Ensure the page we
                 * are about to unmap is the actual page of interest.
                 */
                if (ref_page) {
-                       pte = huge_ptep_get(ptep);
-                       if (huge_pte_none(pte))
-                               continue;
-                       page = pte_page(pte);
                        if (page != ref_page)
                                continue;
 
@@ -2195,18 +2360,19 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                }
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
-               if (huge_pte_none(pte))
-                       continue;
-
-               page = pte_page(pte);
                if (pte_dirty(pte))
                        set_page_dirty(page);
                list_add(&page->lru, &page_list);
+
+               /* Bail out after unmapping reference page if supplied */
+               if (ref_page)
+                       break;
        }
-       spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       spin_unlock(&mm->page_table_lock);
        mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
+               page_remove_rmap(page);
                list_del(&page->lru);
                put_page(page);
        }
@@ -2215,9 +2381,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        __unmap_hugepage_range(vma, start, end, ref_page);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 /*
@@ -2240,16 +2406,15 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * from page cache lookup which is in HPAGE_SIZE units.
         */
        address = address & huge_page_mask(h);
-       pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
-               + (vma->vm_pgoff >> PAGE_SHIFT);
-       mapping = (struct address_space *)page_private(page);
+       pgoff = vma_hugecache_offset(h, vma, address);
+       mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2267,11 +2432,17 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                address, address + huge_page_size(h),
                                page);
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        return 1;
 }
 
+/*
+ * Hugetlb_cow() should be called with page lock of the original hugepage held.
+ * Called with hugetlb_instantiation_mutex held and pte_page locked so we
+ * cannot race with other handlers or page migration.
+ * Keep the pte_same checks anyway to make transition from the mutex easier.
+ */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte,
                        struct page *pagecache_page)
@@ -2286,8 +2457,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 retry_avoidcopy:
        /* If no-one else is actually using this page, avoid the copy
         * and just make the page writable */
-       avoidcopy = (page_count(old_page) == 1);
+       avoidcopy = (page_mapcount(old_page) == 1);
        if (avoidcopy) {
+               if (PageAnon(old_page))
+                       page_move_anon_rmap(old_page, vma, address);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2325,10 +2498,16 @@ retry_avoidcopy:
                if (outside_reserve) {
                        BUG_ON(huge_pte_none(pte));
                        if (unmap_ref_private(mm, vma, old_page, address)) {
-                               BUG_ON(page_count(old_page) != 1);
                                BUG_ON(huge_pte_none(pte));
                                spin_lock(&mm->page_table_lock);
-                               goto retry_avoidcopy;
+                               ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+                               if (likely(pte_same(huge_ptep_get(ptep), pte)))
+                                       goto retry_avoidcopy;
+                               /*
+                                * race occurs while re-acquiring page_table_lock, and
+                                * our job is done.
+                                */
+                               return 0;
                        }
                        WARN_ON_ONCE(1);
                }
@@ -2338,7 +2517,20 @@ retry_avoidcopy:
                return -PTR_ERR(new_page);
        }
 
-       copy_huge_page(new_page, old_page, address, vma);
+       /*
+        * When the original hugepage is shared one, it does not have
+        * anon_vma prepared.
+        */
+       if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
+               /* Caller expects lock to be held */
+               spin_lock(&mm->page_table_lock);
+               return VM_FAULT_OOM;
+       }
+
+       copy_user_huge_page(new_page, old_page, address, vma,
+                           pages_per_huge_page(h));
        __SetPageUptodate(new_page);
 
        /*
@@ -2355,6 +2547,8 @@ retry_avoidcopy:
                huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
+               page_remove_rmap(old_page);
+               hugepage_add_new_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
                mmu_notifier_invalidate_range_end(mm,
@@ -2404,6 +2598,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
+       int anon_rmap = 0;
        pgoff_t idx;
        unsigned long size;
        struct page *page;
@@ -2413,7 +2608,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /*
         * Currently, we are forced to kill the process in the event the
         * original mapper has unmapped pages from the child due to a failed
-        * COW. Warn that such a situation has occured as it may not be obvious
+        * COW. Warn that such a situation has occurred as it may not be obvious
         */
        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
                printk(KERN_WARNING
@@ -2440,7 +2635,7 @@ retry:
                        ret = -PTR_ERR(page);
                        goto out;
                }
-               clear_huge_page(page, address, huge_page_size(h));
+               clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_MAYSHARE) {
@@ -2460,7 +2655,22 @@ retry:
                        spin_unlock(&inode->i_lock);
                } else {
                        lock_page(page);
-                       page->mapping = HUGETLB_POISON;
+                       if (unlikely(anon_vma_prepare(vma))) {
+                               ret = VM_FAULT_OOM;
+                               goto backout_unlocked;
+                       }
+                       anon_rmap = 1;
+               }
+       } else {
+               /*
+                * If memory error occurs between mmap() and fault, some process
+                * don't have hwpoisoned swap entry for errored virtual address.
+                * So we need to block hugepage fault by PG_hwpoison bit check.
+                */
+               if (unlikely(PageHWPoison(page))) {
+                       ret = VM_FAULT_HWPOISON |
+                             VM_FAULT_SET_HINDEX(h - hstates);
+                       goto backout_unlocked;
                }
        }
 
@@ -2485,6 +2695,10 @@ retry:
        if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
+       if (anon_rmap)
+               hugepage_add_new_anon_rmap(page, vma, address);
+       else
+               page_dup_rmap(page);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
@@ -2513,10 +2727,24 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t *ptep;
        pte_t entry;
        int ret;
+       struct page *page = NULL;
        struct page *pagecache_page = NULL;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
+       address &= huge_page_mask(h);
+
+       ptep = huge_pte_offset(mm, address);
+       if (ptep) {
+               entry = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_migration(entry))) {
+                       migration_entry_wait(mm, (pmd_t *)ptep, address);
+                       return 0;
+               } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
+                       return VM_FAULT_HWPOISON_LARGE |
+                              VM_FAULT_SET_HINDEX(h - hstates);
+       }
+
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
        if (!ptep)
                return VM_FAULT_OOM;
@@ -2554,6 +2782,18 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                                                vma, address);
        }
 
+       /*
+        * hugetlb_cow() requires page locks of pte_page(entry) and
+        * pagecache_page, so here we need take the former one
+        * when page != pagecache_page or !pagecache_page.
+        * Note that locking order is always pagecache_page -> page,
+        * so no worry about deadlock.
+        */
+       page = pte_page(entry);
+       get_page(page);
+       if (page != pagecache_page)
+               lock_page(page);
+
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
@@ -2580,6 +2820,9 @@ out_page_table_lock:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
+       if (page != pagecache_page)
+               unlock_page(page);
+       put_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
@@ -2691,7 +2934,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
@@ -2706,7 +2949,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
        flush_tlb_range(vma, start, end);
 }
@@ -2714,17 +2957,18 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
-                                       int acctflag)
+                                       vm_flags_t vm_flags)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page
-        * and filesystem quota without using reserves
+        * without using reserves
         */
-       if (acctflag & VM_NORESERVE)
+       if (vm_flags & VM_NORESERVE)
                return 0;
 
        /*
@@ -2749,17 +2993,17 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (chg < 0)
                return chg;
 
-       /* There must be enough filesystem quota for the mapping */
-       if (hugetlb_get_quota(inode->i_mapping, chg))
+       /* There must be enough pages in the subpool for the mapping */
+       if (hugepage_subpool_get_pages(spool, chg))
                return -ENOSPC;
 
        /*
         * Check enough hugepages are available for the reservation.
-        * Hand back the quota if there are not
+        * Hand the pages back to the subpool if there are not
         */
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
-               hugetlb_put_quota(inode->i_mapping, chg);
+               hugepage_subpool_put_pages(spool, chg);
                return ret;
        }
 
@@ -2783,11 +3027,51 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
 {
        struct hstate *h = hstate_inode(inode);
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        spin_lock(&inode->i_lock);
        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
        spin_unlock(&inode->i_lock);
 
-       hugetlb_put_quota(inode->i_mapping, (chg - freed));
+       hugepage_subpool_put_pages(spool, (chg - freed));
        hugetlb_acct_memory(h, -(chg - freed));
 }
+
+#ifdef CONFIG_MEMORY_FAILURE
+
+/* Should be called in hugetlb_lock */
+static int is_hugepage_on_freelist(struct page *hpage)
+{
+       struct page *page;
+       struct page *tmp;
+       struct hstate *h = page_hstate(hpage);
+       int nid = page_to_nid(hpage);
+
+       list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
+               if (page == hpage)
+                       return 1;
+       return 0;
+}
+
+/*
+ * This function is called from memory failure code.
+ * Assume the caller holds page lock of the head page.
+ */
+int dequeue_hwpoisoned_huge_page(struct page *hpage)
+{
+       struct hstate *h = page_hstate(hpage);
+       int nid = page_to_nid(hpage);
+       int ret = -EBUSY;
+
+       spin_lock(&hugetlb_lock);
+       if (is_hugepage_on_freelist(hpage)) {
+               list_del(&hpage->lru);
+               set_page_refcounted(hpage);
+               h->free_huge_pages--;
+               h->free_huge_pages_node[nid]--;
+               ret = 0;
+       }
+       spin_unlock(&hugetlb_lock);
+       return ret;
+}
+#endif