memcg,thp: fix res_counter:96 regression
[linux-flexiantxendom0-3.2.10.git] / mm / memcontrol.c
index 88113ee..7685d4a 100644 (file)
@@ -89,7 +89,6 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
        MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
-       MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
        MEM_CGROUP_STAT_NSTATS,
 };
 
@@ -298,6 +297,12 @@ struct mem_cgroup {
         */
        unsigned long   move_charge_at_immigrate;
        /*
+        * set > 0 if pages under this cgroup are moving to other cgroup.
+        */
+       atomic_t        moving_account;
+       /* taken only while moving_account > 0 */
+       spinlock_t      move_lock;
+       /*
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu *stat;
@@ -1287,40 +1292,48 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
        return memcg->swappiness;
 }
 
-static void mem_cgroup_start_move(struct mem_cgroup *memcg)
-{
-       int cpu;
+/*
+ * memcg->moving_account is used for checking possibility that some thread is
+ * calling move_account(). When a thread on CPU-A starts moving pages under
+ * a memcg, other threads should check memcg->moving_account under
+ * rcu_read_lock(), like this:
+ *
+ *         CPU-A                                    CPU-B
+ *                                              rcu_read_lock()
+ *         memcg->moving_account+1              if (memcg->mocing_account)
+ *                                                   take heavy locks.
+ *         synchronize_rcu()                    update something.
+ *                                              rcu_read_unlock()
+ *         start move here.
+ */
 
-       get_online_cpus();
-       spin_lock(&memcg->pcp_counter_lock);
-       for_each_online_cpu(cpu)
-               per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
-       memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
-       spin_unlock(&memcg->pcp_counter_lock);
-       put_online_cpus();
+/* for quick checking without looking up memcg */
+atomic_t memcg_moving __read_mostly;
 
+static void mem_cgroup_start_move(struct mem_cgroup *memcg)
+{
+       atomic_inc(&memcg_moving);
+       atomic_inc(&memcg->moving_account);
        synchronize_rcu();
 }
 
 static void mem_cgroup_end_move(struct mem_cgroup *memcg)
 {
-       int cpu;
-
-       if (!memcg)
-               return;
-       get_online_cpus();
-       spin_lock(&memcg->pcp_counter_lock);
-       for_each_online_cpu(cpu)
-               per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
-       memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
-       spin_unlock(&memcg->pcp_counter_lock);
-       put_online_cpus();
+       /*
+        * Now, mem_cgroup_clear_mc() may call this function with NULL.
+        * We check NULL in callee rather than caller.
+        */
+       if (memcg) {
+               atomic_dec(&memcg_moving);
+               atomic_dec(&memcg->moving_account);
+       }
 }
+
 /*
  * 2 routines for checking "mem" is under move_account() or not.
  *
- * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
- *                       for avoiding race in accounting. If true,
+ * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
+ *                       is used for avoiding races in accounting.  If true,
  *                       pc->mem_cgroup may be overwritten.
  *
  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
@@ -1328,10 +1341,10 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
  *                       waiting at hith-memory prressure caused by "move".
  */
 
-static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
+static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
 {
        VM_BUG_ON(!rcu_read_lock_held());
-       return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
+       return atomic_read(&memcg->moving_account) > 0;
 }
 
 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
@@ -1372,6 +1385,24 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
        return false;
 }
 
+/*
+ * Take this lock when
+ * - a code tries to modify page's memcg while it's USED.
+ * - a code tries to modify page state accounting in a memcg.
+ * see mem_cgroup_stolen(), too.
+ */
+static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
+                                 unsigned long *flags)
+{
+       spin_lock_irqsave(&memcg->move_lock, *flags);
+}
+
+static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
+                               unsigned long *flags)
+{
+       spin_unlock_irqrestore(&memcg->move_lock, *flags);
+}
+
 /**
  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
  * @memcg: The memory cgroup that went over limit
@@ -1882,41 +1913,66 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
  * by flags.
  *
  * Considering "move", this is an only case we see a race. To make the race
- * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
- * possibility of race condition. If there is, we take a lock.
+ * small, we check mm->moving_account and detect there are possibility of race
+ * If there is, we take a lock.
  */
 
+void __mem_cgroup_begin_update_page_stat(struct page *page,
+                               bool *locked, unsigned long *flags)
+{
+       struct mem_cgroup *memcg;
+       struct page_cgroup *pc;
+
+       pc = lookup_page_cgroup(page);
+again:
+       memcg = pc->mem_cgroup;
+       if (unlikely(!memcg || !PageCgroupUsed(pc)))
+               return;
+       /*
+        * If this memory cgroup is not under account moving, we don't
+        * need to take move_lock_page_cgroup(). Because we already hold
+        * rcu_read_lock(), any calls to move_account will be delayed until
+        * rcu_read_unlock() if mem_cgroup_stolen() == true.
+        */
+       if (!mem_cgroup_stolen(memcg))
+               return;
+
+       move_lock_mem_cgroup(memcg, flags);
+       if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
+               move_unlock_mem_cgroup(memcg, flags);
+               goto again;
+       }
+       *locked = true;
+}
+
+void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
+{
+       struct page_cgroup *pc = lookup_page_cgroup(page);
+
+       /*
+        * It's guaranteed that pc->mem_cgroup never changes while
+        * lock is held because a routine modifies pc->mem_cgroup
+        * should take move_lock_page_cgroup().
+        */
+       move_unlock_mem_cgroup(pc->mem_cgroup, flags);
+}
+
 void mem_cgroup_update_page_stat(struct page *page,
                                 enum mem_cgroup_page_stat_item idx, int val)
 {
        struct mem_cgroup *memcg;
        struct page_cgroup *pc = lookup_page_cgroup(page);
-       bool need_unlock = false;
        unsigned long uninitialized_var(flags);
 
        if (mem_cgroup_disabled())
                return;
 
-       rcu_read_lock();
        memcg = pc->mem_cgroup;
        if (unlikely(!memcg || !PageCgroupUsed(pc)))
-               goto out;
-       /* pc->mem_cgroup is unstable ? */
-       if (unlikely(mem_cgroup_stealed(memcg))) {
-               /* take a lock against to access pc->mem_cgroup */
-               move_lock_page_cgroup(pc, &flags);
-               need_unlock = true;
-               memcg = pc->mem_cgroup;
-               if (!memcg || !PageCgroupUsed(pc))
-                       goto out;
-       }
+               return;
 
        switch (idx) {
        case MEMCG_NR_FILE_MAPPED:
-               if (val > 0)
-                       SetPageCgroupFileMapped(pc);
-               else if (!page_mapped(page))
-                       ClearPageCgroupFileMapped(pc);
                idx = MEM_CGROUP_STAT_FILE_MAPPED;
                break;
        default:
@@ -1924,13 +1980,7 @@ void mem_cgroup_update_page_stat(struct page *page,
        }
 
        this_cpu_add(memcg->stat->count[idx], val);
-
-out:
-       if (unlikely(need_unlock))
-               move_unlock_page_cgroup(pc, &flags);
-       rcu_read_unlock();
 }
-EXPORT_SYMBOL(mem_cgroup_update_page_stat);
 
 /*
  * size of first charge trial. "32" comes from vmscan.c's magic value.
@@ -2101,17 +2151,6 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
                per_cpu(memcg->stat->events[i], cpu) = 0;
                memcg->nocpu_base.events[i] += x;
        }
-       /* need to clear ON_MOVE value, works as a kind of lock. */
-       per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
-       spin_unlock(&memcg->pcp_counter_lock);
-}
-
-static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
-{
-       int idx = MEM_CGROUP_ON_MOVE;
-
-       spin_lock(&memcg->pcp_counter_lock);
-       per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
        spin_unlock(&memcg->pcp_counter_lock);
 }
 
@@ -2123,13 +2162,10 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
        struct memcg_stock_pcp *stock;
        struct mem_cgroup *iter;
 
-       if ((action == CPU_ONLINE)) {
-               for_each_mem_cgroup(iter)
-                       synchronize_mem_cgroup_on_move(iter, cpu);
+       if (action == CPU_ONLINE)
                return NOTIFY_OK;
-       }
 
-       if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
+       if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
                return NOTIFY_OK;
 
        for_each_mem_cgroup(iter)
@@ -2440,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                                       struct page *page,
                                       unsigned int nr_pages,
-                                      struct page_cgroup *pc,
                                       enum charge_type ctype,
                                       bool lrucare)
 {
+       struct page_cgroup *pc = lookup_page_cgroup(page);
        struct zone *uninitialized_var(zone);
        bool was_on_lru = false;
        bool anon;
@@ -2511,8 +2547,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
-#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
-                       (1 << PCG_MIGRATION))
+#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
 /*
  * Because tail pages are not marked as "used", set it. We're under
  * zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2583,9 +2618,9 @@ static int mem_cgroup_move_account(struct page *page,
        if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
                goto unlock;
 
-       move_lock_page_cgroup(pc, &flags);
+       move_lock_mem_cgroup(from, &flags);
 
-       if (PageCgroupFileMapped(pc)) {
+       if (!anon && page_mapped(page)) {
                /* Update mapped_file data for mem_cgroup */
                preempt_disable();
                __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
@@ -2607,7 +2642,7 @@ static int mem_cgroup_move_account(struct page *page,
         * guaranteed that "to" is never removed. So, we don't check rmdir
         * status here.
         */
-       move_unlock_page_cgroup(pc, &flags);
+       move_unlock_mem_cgroup(from, &flags);
        ret = 0;
 unlock:
        unlock_page_cgroup(pc);
@@ -2681,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
 {
        struct mem_cgroup *memcg = NULL;
        unsigned int nr_pages = 1;
-       struct page_cgroup *pc;
        bool oom = true;
        int ret;
 
@@ -2695,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
                oom = false;
        }
 
-       pc = lookup_page_cgroup(page);
        ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
        if (ret == -ENOMEM)
                return ret;
-       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
+       __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
        return 0;
 }
 
@@ -2796,16 +2829,13 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
                                        enum charge_type ctype)
 {
-       struct page_cgroup *pc;
-
        if (mem_cgroup_disabled())
                return;
        if (!memcg)
                return;
        cgroup_exclude_rmdir(&memcg->css);
 
-       pc = lookup_page_cgroup(page);
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
+       __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -2950,6 +2980,11 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 
        switch (ctype) {
        case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+               /*
+                * Generally PageAnon tells if it's the anon statistics to be
+                * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
+                * used before page reached the stage of being marked PageAnon.
+                */
                anon = true;
                /* fallthrough */
        case MEM_CGROUP_CHARGE_TYPE_DROP:
@@ -3258,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page,
         * page. In the case new page is migrated but not remapped, new page's
         * mapcount will be finally 0 and we call uncharge in end_migration().
         */
-       pc = lookup_page_cgroup(newpage);
        if (PageAnon(page))
                ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
        else if (page_is_file_cache(page))
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
        return ret;
 }
 
@@ -3352,7 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
         * the newpage may be on LRU(or pagevec for LRU) already. We lock
         * LRU while we overwrite pc->mem_cgroup.
         */
-       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -3723,7 +3757,7 @@ move_account:
                        goto try_to_free;
                cond_resched();
        /* "ret" should also be checked to ensure all lists are empty. */
-       } while (memcg->res.usage > 0 || ret);
+       } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
 out:
        css_put(&memcg->css);
        return ret;
@@ -3738,7 +3772,7 @@ try_to_free:
        lru_add_drain_all();
        /* try to free all pages in this cgroup */
        shrink = 1;
-       while (nr_retries && memcg->res.usage > 0) {
+       while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
                int progress;
 
                if (signal_pending(current)) {
@@ -3862,7 +3896,6 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
                break;
        default:
                BUG();
-               break;
        }
        return val;
 }
@@ -4427,12 +4460,6 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
        else
                BUG();
 
-       /*
-        * Something went wrong if we trying to unregister a threshold
-        * if we don't have thresholds
-        */
-       BUG_ON(!thresholds);
-
        if (!thresholds->primary)
                goto unlock;
 
@@ -4480,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
 swap_buffers:
        /* Swap primary and spare array */
        thresholds->spare = thresholds->primary;
+       /* If all events are unregistered, free the spare array */
+       if (!new) {
+               kfree(thresholds->spare);
+               thresholds->spare = NULL;
+       }
+
        rcu_assign_pointer(thresholds->primary, new);
 
        /* To be sure that nobody uses thresholds */
@@ -4982,6 +5015,7 @@ mem_cgroup_create(struct cgroup *cont)
        atomic_set(&memcg->refcnt, 1);
        memcg->move_charge_at_immigrate = 0;
        mutex_init(&memcg->thresholds_lock);
+       spin_lock_init(&memcg->move_lock);
        return &memcg->css;
 free_out:
        __mem_cgroup_free(memcg);
@@ -5076,7 +5110,7 @@ one_by_one:
 }
 
 /**
- * is_target_pte_for_mc - check a pte whether it is valid for move charge
+ * get_mctgt_type - get target type of moving charge
  * @vma: the vma the pte to be checked belongs
  * @addr: the address corresponding to the pte to be checked
  * @ptent: the pte to be checked
@@ -5099,7 +5133,7 @@ union mc_target {
 };
 
 enum mc_target_type {
-       MC_TARGET_NONE, /* not used */
+       MC_TARGET_NONE = 0,
        MC_TARGET_PAGE,
        MC_TARGET_SWAP,
 };
@@ -5180,12 +5214,12 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
        return page;
 }
 
-static int is_target_pte_for_mc(struct vm_area_struct *vma,
+static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
                unsigned long addr, pte_t ptent, union mc_target *target)
 {
        struct page *page = NULL;
        struct page_cgroup *pc;
-       int ret = 0;
+       enum mc_target_type ret = MC_TARGET_NONE;
        swp_entry_t ent = { .val = 0 };
 
        if (pte_present(ptent))
@@ -5196,7 +5230,7 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
                page = mc_handle_file_pte(vma, addr, ptent, &ent);
 
        if (!page && !ent.val)
-               return 0;
+               return ret;
        if (page) {
                pc = lookup_page_cgroup(page);
                /*
@@ -5222,6 +5256,41 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
        return ret;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * We don't consider swapping or file mapped pages because THP does not
+ * support them for now.
+ * Caller should make sure that pmd_trans_huge(pmd) is true.
+ */
+static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
+               unsigned long addr, pmd_t pmd, union mc_target *target)
+{
+       struct page *page = NULL;
+       struct page_cgroup *pc;
+       enum mc_target_type ret = MC_TARGET_NONE;
+
+       page = pmd_page(pmd);
+       VM_BUG_ON(!page || !PageHead(page));
+       if (!move_anon())
+               return ret;
+       pc = lookup_page_cgroup(page);
+       if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+               ret = MC_TARGET_PAGE;
+               if (target) {
+                       get_page(page);
+                       target->page = page;
+               }
+       }
+       return ret;
+}
+#else
+static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
+               unsigned long addr, pmd_t pmd, union mc_target *target)
+{
+       return MC_TARGET_NONE;
+}
+#endif
+
 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
                                        unsigned long addr, unsigned long end,
                                        struct mm_walk *walk)
@@ -5230,13 +5299,18 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
        pte_t *pte;
        spinlock_t *ptl;
 
-       split_huge_page_pmd(walk->mm, pmd);
-       if (pmd_trans_unstable(pmd))
+       if (pmd_trans_huge_lock(pmd, vma) == 1) {
+               if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
+                       mc.precharge += HPAGE_PMD_NR;
+               spin_unlock(&vma->vm_mm->page_table_lock);
                return 0;
+       }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE)
-               if (is_target_pte_for_mc(vma, addr, *pte, NULL))
+               if (get_mctgt_type(vma, addr, *pte, NULL))
                        mc.precharge++; /* increment precharge temporarily */
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
@@ -5391,25 +5465,57 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        struct vm_area_struct *vma = walk->private;
        pte_t *pte;
        spinlock_t *ptl;
+       enum mc_target_type target_type;
+       union mc_target target;
+       struct page *page;
+       struct page_cgroup *pc;
+
+       /*
+        * We don't take compound_lock() here but no race with splitting thp
+        * happens because:
+        *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
+        *    under splitting, which means there's no concurrent thp split,
+        *  - if another thread runs into split_huge_page() just after we
+        *    entered this if-block, the thread must wait for page table lock
+        *    to be unlocked in __split_huge_page_splitting(), where the main
+        *    part of thp split is not executed yet.
+        */
+       if (pmd_trans_huge_lock(pmd, vma) == 1) {
+               if (mc.precharge < HPAGE_PMD_NR) {
+                       spin_unlock(&vma->vm_mm->page_table_lock);
+                       return 0;
+               }
+               target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
+               if (target_type == MC_TARGET_PAGE) {
+                       page = target.page;
+                       if (!isolate_lru_page(page)) {
+                               pc = lookup_page_cgroup(page);
+                               if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
+                                                            pc, mc.from, mc.to,
+                                                            false)) {
+                                       mc.precharge -= HPAGE_PMD_NR;
+                                       mc.moved_charge += HPAGE_PMD_NR;
+                               }
+                               putback_lru_page(page);
+                       }
+                       put_page(page);
+               }
+               spin_unlock(&vma->vm_mm->page_table_lock);
+               return 0;
+       }
 
-       split_huge_page_pmd(walk->mm, pmd);
        if (pmd_trans_unstable(pmd))
                return 0;
 retry:
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; addr += PAGE_SIZE) {
                pte_t ptent = *(pte++);
-               union mc_target target;
-               int type;
-               struct page *page;
-               struct page_cgroup *pc;
                swp_entry_t ent;
 
                if (!mc.precharge)
                        break;
 
-               type = is_target_pte_for_mc(vma, addr, ptent, &target);
-               switch (type) {
+               switch (get_mctgt_type(vma, addr, ptent, &target)) {
                case MC_TARGET_PAGE:
                        page = target.page;
                        if (isolate_lru_page(page))
@@ -5422,7 +5528,7 @@ retry:
                                mc.moved_charge++;
                        }
                        putback_lru_page(page);
-put:                   /* is_target_pte_for_mc() gets the page */
+put:                   /* get_mctgt_type() gets the page */
                        put_page(page);
                        break;
                case MC_TARGET_SWAP: