3 #include <linux/module.h>
5 #include <xen/features.h>
6 #include <asm/pgalloc.h>
7 #include <asm/pgtable.h>
9 #include <asm/fixmap.h>
10 #include <asm/hypervisor.h>
11 #include <asm/mmu_context.h>
13 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
16 #define PGALLOC_USER_GFP __GFP_HIGHMEM
18 #define PGALLOC_USER_GFP 0
21 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
23 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
25 pte_t *pte = (pte_t *)__get_free_page(PGALLOC_GFP);
27 make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
31 static void _pte_free(struct page *page, unsigned int order)
37 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
41 pte = alloc_pages(__userpte_alloc_gfp, 0);
43 pgtable_page_ctor(pte);
44 SetPageForeign(pte, _pte_free);
50 static int __init setup_userpte(char *arg)
56 * "userpte=nohigh" disables allocation of user pagetables in
59 if (strcmp(arg, "nohigh") == 0)
60 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
65 early_param("userpte", setup_userpte);
67 void __pte_free(pgtable_t pte)
69 if (!PageHighMem(pte)) {
70 if (PagePinned(pte)) {
71 unsigned long pfn = page_to_pfn(pte);
73 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
87 ClearPageForeign(pte);
89 pgtable_page_dtor(pte);
93 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
95 pgtable_page_dtor(pte);
96 paravirt_release_pte(page_to_pfn(pte));
97 tlb_remove_page(tlb, pte);
100 #if PAGETABLE_LEVELS > 2
101 static void _pmd_free(struct page *page, unsigned int order)
107 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
111 pmd = alloc_pages(PGALLOC_GFP, 0);
114 SetPageForeign(pmd, _pmd_free);
115 init_page_count(pmd);
116 return page_address(pmd);
119 void __pmd_free(pgtable_t pmd)
121 if (PagePinned(pmd)) {
122 unsigned long pfn = page_to_pfn(pmd);
124 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
125 pfn_pte(pfn, PAGE_KERNEL),
128 ClearPagePinned(pmd);
131 ClearPageForeign(pmd);
132 init_page_count(pmd);
136 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
138 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
139 tlb_remove_page(tlb, virt_to_page(pmd));
142 #if PAGETABLE_LEVELS > 3
143 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
145 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
146 tlb_remove_page(tlb, virt_to_page(pud));
148 #endif /* PAGETABLE_LEVELS > 3 */
149 #endif /* PAGETABLE_LEVELS > 2 */
151 static void _pin_lock(struct mm_struct *mm, int lock) {
153 spin_lock(&mm->page_table_lock);
154 #if USE_SPLIT_PTLOCKS
155 /* While mm->page_table_lock protects us against insertions and
156 * removals of higher level page table pages, it doesn't protect
157 * against updates of pte-s. Such updates, however, require the
158 * pte pages to be in consistent state (unpinned+writable or
159 * pinned+readonly). The pinning and attribute changes, however
160 * cannot be done atomically, which is why such updates must be
161 * prevented from happening concurrently.
162 * Note that no pte lock can ever elsewhere be acquired nesting
163 * with an already acquired one in the same mm, or with the mm's
164 * page_table_lock already acquired, as that would break in the
165 * non-split case (where all these are actually resolving to the
166 * one page_table_lock). Thus acquiring all of them here is not
167 * going to result in dead locks, and the order of acquires
171 pgd_t *pgd = mm->pgd;
174 for (g = 0; g <= ((TASK_SIZE_MAX-1) / PGDIR_SIZE); g++, pgd++) {
180 pud = pud_offset(pgd, 0);
181 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
187 pmd = pmd_offset(pud, 0);
188 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
193 ptl = pte_lockptr(0, pmd);
204 spin_unlock(&mm->page_table_lock);
206 #define pin_lock(mm) _pin_lock(mm, 1)
207 #define pin_unlock(mm) _pin_lock(mm, 0)
209 #define PIN_BATCH sizeof(void *)
210 static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
212 static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
213 unsigned int cpu, unsigned int seq)
215 unsigned long pfn = page_to_pfn(page);
217 if (pgprot_val(flags) & _PAGE_RW)
218 ClearPagePinned(page);
221 if (PageHighMem(page))
223 MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
224 (unsigned long)__va(pfn << PAGE_SHIFT),
225 pfn_pte(pfn, flags), 0);
226 if (unlikely(++seq == PIN_BATCH)) {
227 if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
236 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
238 pgd_t *pgd = pgd_base;
242 unsigned int cpu, seq;
243 multicall_entry_t *mcl;
245 if (xen_feature(XENFEAT_auto_translated_physmap))
251 * Cannot iterate up to USER_PTRS_PER_PGD on x86-64 as these pagetables
252 * may not be the 'current' task's pagetables (e.g., current may be
253 * 32-bit, but the pagetables may be for a 64-bit task).
254 * Subtracting 1 from TASK_SIZE_MAX means the loop limit is correct
255 * regardless of whether TASK_SIZE_MAX is a multiple of PGDIR_SIZE.
257 for (g = 0, seq = 0; g <= ((TASK_SIZE_MAX-1) / PGDIR_SIZE); g++, pgd++) {
260 pud = pud_offset(pgd, 0);
261 if (PTRS_PER_PUD > 1) /* not folded */
262 seq = pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
263 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
266 pmd = pmd_offset(pud, 0);
267 if (PTRS_PER_PMD > 1) /* not folded */
268 seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
269 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
272 seq = pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
277 #ifdef CONFIG_X86_PAE
278 for (; g < PTRS_PER_PGD; g++, pgd++) {
279 BUG_ON(pgd_none(*pgd));
280 pud = pud_offset(pgd, 0);
281 BUG_ON(pud_none(*pud));
282 pmd = pmd_offset(pud, 0);
283 seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
287 mcl = per_cpu(pb_mcl, cpu);
289 if (unlikely(seq > PIN_BATCH - 2)) {
290 if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
294 pgd = __user_pgd(pgd_base);
296 MULTI_update_va_mapping(mcl + seq,
298 pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, flags),
300 MULTI_update_va_mapping(mcl + seq + 1,
301 (unsigned long)pgd_base,
302 pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
304 if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
307 if (likely(seq != 0)) {
308 MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
309 (unsigned long)pgd_base,
310 pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
312 if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
315 } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
316 pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
324 void __init xen_init_pgd_pin(void)
326 pgd_t *pgd = init_mm.pgd;
329 unsigned int g, u, m;
331 if (xen_feature(XENFEAT_auto_translated_physmap))
334 SetPagePinned(virt_to_page(pgd));
335 for (g = 0; g < PTRS_PER_PGD; g++, pgd++) {
336 #ifndef CONFIG_X86_PAE
337 if (g >= pgd_index(HYPERVISOR_VIRT_START)
338 && g <= pgd_index(HYPERVISOR_VIRT_END - 1))
341 if (!pgd_present(*pgd))
343 pud = pud_offset(pgd, 0);
344 if (PTRS_PER_PUD > 1) /* not folded */
345 SetPagePinned(virt_to_page(pud));
346 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
347 if (!pud_present(*pud) || pud_large(*pud))
349 pmd = pmd_offset(pud, 0);
350 if (PTRS_PER_PMD > 1) /* not folded */
351 SetPagePinned(virt_to_page(pmd));
352 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
353 #ifdef CONFIG_X86_PAE
354 if (g == pgd_index(HYPERVISOR_VIRT_START)
355 && m >= pmd_index(HYPERVISOR_VIRT_START))
358 if (!pmd_present(*pmd) || pmd_large(*pmd))
360 SetPagePinned(pmd_page(*pmd));
365 SetPagePinned(virt_to_page(level3_user_pgt));
369 static void __pgd_pin(pgd_t *pgd)
371 pgd_walk(pgd, PAGE_KERNEL_RO);
374 SetPagePinned(virt_to_page(pgd));
377 static void __pgd_unpin(pgd_t *pgd)
380 pgd_walk(pgd, PAGE_KERNEL);
381 ClearPagePinned(virt_to_page(pgd));
384 static void pgd_test_and_unpin(pgd_t *pgd)
386 if (PagePinned(virt_to_page(pgd)))
390 void mm_pin(struct mm_struct *mm)
392 if (xen_feature(XENFEAT_writable_page_tables))
400 void mm_unpin(struct mm_struct *mm)
402 if (xen_feature(XENFEAT_writable_page_tables))
406 __pgd_unpin(mm->pgd);
410 void mm_pin_all(void)
414 if (xen_feature(XENFEAT_writable_page_tables))
418 * Allow uninterrupted access to the pgd_list. Also protects
419 * __pgd_pin() by ensuring preemption is disabled.
420 * All other CPUs must be at a safe point (e.g., in stop_machine
421 * or offlined entirely).
423 BUG_ON(!irqs_disabled());
424 spin_lock(&pgd_lock);
425 list_for_each_entry(page, &pgd_list, lru) {
426 if (!PagePinned(page))
427 __pgd_pin((pgd_t *)page_address(page));
429 spin_unlock(&pgd_lock);
432 void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
434 if (!PagePinned(virt_to_page(mm->pgd)))
439 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much*
440 * faster this way, as no hypercalls are needed for the page table updates.
442 static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm)
443 __releases(tsk->alloc_lock)
445 if (tsk->active_mm == mm) {
446 tsk->active_mm = &init_mm;
447 atomic_inc(&init_mm.mm_count);
449 switch_mm(mm, &init_mm, tsk);
451 if (atomic_dec_and_test(&mm->mm_count))
458 static void _leave_active_mm(void *mm)
460 struct task_struct *tsk = current;
462 if (spin_trylock(&tsk->alloc_lock))
463 leave_active_mm(tsk, mm);
466 void arch_exit_mmap(struct mm_struct *mm)
468 struct task_struct *tsk = current;
471 leave_active_mm(tsk, mm);
474 smp_call_function_many(mm_cpumask(mm), _leave_active_mm, mm, 1);
477 if (PagePinned(virt_to_page(mm->pgd))
478 && atomic_read(&mm->mm_count) == 1
479 && !mm->context.has_foreign_mappings)
483 static inline void pgd_list_add(pgd_t *pgd)
485 struct page *page = virt_to_page(pgd);
487 list_add(&page->lru, &pgd_list);
490 static inline void pgd_list_del(pgd_t *pgd)
492 struct page *page = virt_to_page(pgd);
494 list_del(&page->lru);
497 #define UNSHARED_PTRS_PER_PGD \
498 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
501 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
503 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
504 virt_to_page(pgd)->index = (pgoff_t)mm;
507 struct mm_struct *pgd_page_get_mm(struct page *page)
509 return (struct mm_struct *)page->index;
512 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
514 pgd_test_and_unpin(pgd);
516 /* If the pgd points to a shared pagetable level (either the
517 ptes in non-PAE, or shared PMD in PAE), then just copy the
518 references from swapper_pg_dir. */
519 if (PAGETABLE_LEVELS == 2 ||
520 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
521 PAGETABLE_LEVELS == 4) {
522 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
523 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
528 /* set level3_user_pgt for vsyscall area */
529 __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
530 __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
533 /* list required to sync kernel mapping updates */
534 if (!SHARED_KERNEL_PMD) {
540 static void pgd_dtor(pgd_t *pgd)
542 if (!SHARED_KERNEL_PMD) {
543 spin_lock(&pgd_lock);
545 spin_unlock(&pgd_lock);
548 pgd_test_and_unpin(pgd);
552 * List of all pgd's needed for non-PAE so it can invalidate entries
553 * in both cached and uncached pgd's; not needed for PAE since the
554 * kernel pmd is shared. If PAE were not to share the pmd a similar
555 * tactic would be needed. This is essentially codepath-based locking
556 * against pageattr.c; it is the unique case in which a valid change
557 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
558 * vmalloc faults work because attached pagetables are never freed.
562 #ifdef CONFIG_X86_PAE
564 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
565 * updating the top-level pagetable entries to guarantee the
566 * processor notices the update. Since this is expensive, and
567 * all 4 top-level entries are used almost immediately in a
568 * new process's life, we just pre-populate them here.
570 * Also, if we're in a paravirt environment where the kernel pmd is
571 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
572 * and initialize the kernel pmds here.
574 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
576 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
578 /* Note: almost everything apart from _PAGE_PRESENT is
579 reserved at the pmd (PDPT) level. */
580 pud_t pud = __pud(__pa(pmd) | _PAGE_PRESENT);
582 paravirt_alloc_pmd(mm, page_to_pfn(virt_to_page(pmd)));
584 if (likely(!PagePinned(virt_to_page(pudp)))) {
592 * According to Intel App note "TLBs, Paging-Structure Caches,
593 * and Their Invalidation", April 2007, document 317080-001,
594 * section 8.1: in PAE mode we explicitly have to flush the
595 * TLB via cr3 if the top-level pgd is changed...
599 #else /* !CONFIG_X86_PAE */
601 /* No need to prepopulate any pagetable entries in non-PAE modes. */
602 #define PREALLOCATED_PMDS 0
604 #endif /* CONFIG_X86_PAE */
606 static void free_pmds(pmd_t *pmds[], struct mm_struct *mm, bool contig)
610 #ifdef CONFIG_X86_PAE
612 xen_destroy_contiguous_region((unsigned long)mm->pgd, 0);
615 for(i = 0; i < PREALLOCATED_PMDS; i++)
617 pmd_free(mm, pmds[i]);
620 static int preallocate_pmds(pmd_t *pmds[], struct mm_struct *mm)
625 for(i = 0; i < PREALLOCATED_PMDS; i++) {
626 pmd_t *pmd = pmd_alloc_one(mm, i << PUD_SHIFT);
633 free_pmds(pmds, mm, false);
641 * Mop up any pmd pages which may still be attached to the pgd.
642 * Normally they will be freed by munmap/exit_mmap, but any pmd we
643 * preallocate which never got a corresponding vma will need to be
646 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
650 for(i = 0; i < PREALLOCATED_PMDS; i++) {
653 if (__pgd_val(pgd) != 0) {
654 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
656 pgdp[i] = xen_make_pgd(0);
658 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
663 #ifdef CONFIG_X86_PAE
664 if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
665 xen_destroy_contiguous_region((unsigned long)pgdp, 0);
669 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
675 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
678 pud = pud_offset(pgd, 0);
679 for (addr = i = 0; i < PREALLOCATED_PMDS;
680 i++, pud++, addr += PUD_SIZE) {
681 pmd_t *pmd = pmds[i];
683 if (i >= KERNEL_PGD_BOUNDARY)
685 (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
686 sizeof(pmd_t) * PTRS_PER_PMD);
688 /* It is safe to poke machine addresses of pmds under the pgd_lock. */
689 pud_populate(mm, pud, pmd);
693 static inline pgd_t *user_pgd_alloc(pgd_t *pgd)
697 pgd_t *upgd = (void *)__get_free_page(PGALLOC_GFP);
700 set_page_private(virt_to_page(pgd),
701 (unsigned long)upgd);
703 free_page((unsigned long)pgd);
711 static inline void user_pgd_free(pgd_t *pgd)
714 free_page(page_private(virt_to_page(pgd)));
718 pgd_t *pgd_alloc(struct mm_struct *mm)
721 pmd_t *pmds[PREALLOCATED_PMDS];
723 pgd = user_pgd_alloc((void *)__get_free_page(PGALLOC_GFP));
730 if (preallocate_pmds(pmds, mm) != 0)
733 if (paravirt_pgd_alloc(mm) != 0)
737 * Make sure that pre-populating the pmds is atomic with
738 * respect to anything walking the pgd_list, so that they
739 * never see a partially populated pgd.
741 spin_lock(&pgd_lock);
743 #ifdef CONFIG_X86_PAE
744 /* Protect against save/restore: move below 4GB under pgd_lock. */
745 if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)
746 && xen_create_contiguous_region((unsigned long)pgd, 0, 32)) {
747 spin_unlock(&pgd_lock);
753 pgd_prepopulate_pmd(mm, pgd, pmds);
755 spin_unlock(&pgd_lock);
760 free_pmds(pmds, mm, !xen_feature(XENFEAT_pae_pgdir_above_4gb));
763 free_page((unsigned long)pgd);
768 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
771 * After this the pgd should not be pinned for the duration of this
772 * function's execution. We should never sleep and thus never race:
773 * 1. User pmds will not become write-protected under our feet due
774 * to a concurrent mm_pin_all().
775 * 2. The machine addresses in PGD entries will not become invalid
776 * due to a concurrent save/restore.
780 pgd_mop_up_pmds(mm, pgd);
781 paravirt_pgd_free(mm, pgd);
783 free_page((unsigned long)pgd);
786 /* blktap and gntdev need this, as otherwise they would implicitly (and
787 * needlessly, as they never use it) reference init_mm. */
788 pte_t xen_ptep_get_and_clear_full(struct vm_area_struct *vma,
789 unsigned long addr, pte_t *ptep, int full)
791 return ptep_get_and_clear_full(vma ? vma->vm_mm : &init_mm,
794 EXPORT_SYMBOL_GPL(xen_ptep_get_and_clear_full);
796 int ptep_set_access_flags(struct vm_area_struct *vma,
797 unsigned long address, pte_t *ptep,
798 pte_t entry, int dirty)
800 int changed = !pte_same(*ptep, entry);
802 if (changed && dirty) {
803 if (likely(vma->vm_mm == current->mm)) {
804 if (HYPERVISOR_update_va_mapping(address,
806 uvm_multi(mm_cpumask(vma->vm_mm))|UVMF_INVLPG))
809 xen_l1_entry_update(ptep, entry);
810 flush_tlb_page(vma, address);
817 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
818 int pmdp_set_access_flags(struct vm_area_struct *vma,
819 unsigned long address, pmd_t *pmdp,
820 pmd_t entry, int dirty)
822 int changed = !pmd_same(*pmdp, entry);
824 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
826 if (changed && dirty) {
828 pmd_update_defer(vma->vm_mm, address, pmdp);
829 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
836 int ptep_test_and_clear_young(struct vm_area_struct *vma,
837 unsigned long addr, pte_t *ptep)
841 if (pte_young(*ptep))
842 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
843 (unsigned long *) &ptep->pte);
846 pte_update(vma->vm_mm, addr, ptep);
851 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
852 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
853 unsigned long addr, pmd_t *pmdp)
857 if (pmd_young(*pmdp))
858 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
859 (unsigned long *)pmdp);
862 pmd_update(vma->vm_mm, addr, pmdp);
868 int ptep_clear_flush_young(struct vm_area_struct *vma,
869 unsigned long address, pte_t *ptep)
872 int young = pte_young(pte);
874 pte = pte_mkold(pte);
875 if (PagePinned(virt_to_page(vma->vm_mm->pgd)))
876 ptep_set_access_flags(vma, address, ptep, pte, young);
878 ptep->pte_low = pte.pte_low;
883 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
884 int pmdp_clear_flush_young(struct vm_area_struct *vma,
885 unsigned long address, pmd_t *pmdp)
889 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
891 young = pmdp_test_and_clear_young(vma, address, pmdp);
893 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
898 void pmdp_splitting_flush(struct vm_area_struct *vma,
899 unsigned long address, pmd_t *pmdp)
902 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
903 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
904 (unsigned long *)pmdp);
906 pmd_update(vma->vm_mm, address, pmdp);
907 /* need tlb flush only to serialize against gup-fast */
908 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
914 * reserve_top_address - reserves a hole in the top of kernel address space
915 * @reserve - size of hole to reserve
917 * Can be used to relocate the fixmap area and poke a hole in the top
918 * of kernel address space to make room for a hypervisor.
920 void __init reserve_top_address(unsigned long reserve)
923 BUG_ON(fixmaps_set > 0);
924 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
926 __FIXADDR_TOP = -reserve - PAGE_SIZE;
932 void xen_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
934 unsigned long address = __fix_to_virt(idx);
937 if (idx >= __end_of_fixed_addresses) {
944 extern pte_t level1_fixmap_pgt[PTRS_PER_PTE];
946 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
948 pte = pfn_pte(phys >> PAGE_SHIFT, flags);
949 set_pte_vaddr_pud(level3_user_pgt, address, pte);
951 case FIX_EARLYCON_MEM_BASE:
952 case FIX_SHARED_INFO:
953 case FIX_ISAMAP_END ... FIX_ISAMAP_BEGIN:
954 xen_l1_entry_update(level1_fixmap_pgt + pte_index(address),
955 pfn_pte_ma(phys >> PAGE_SHIFT, flags));
961 pte = pfn_pte(phys >> PAGE_SHIFT, flags);
965 pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
968 set_pte_vaddr(address, pte);