1 #include <linux/sched.h>
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
6 #include <linux/swap.h>
8 #include <linux/highmem.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13 #include <linux/quicklist.h>
15 #include <asm/system.h>
16 #include <asm/pgtable.h>
17 #include <asm/pgalloc.h>
18 #include <asm/fixmap.h>
21 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
25 #include <xen/features.h>
26 #include <asm/hypervisor.h>
28 unsigned int __VMALLOC_RESERVE = 128 << 20;
31 * Associate a virtual page frame with a given physical page frame
32 * and protection flags for that frame.
34 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
42 pgd = swapper_pg_dir + pgd_index(vaddr);
47 pud = pud_offset(pgd, vaddr);
52 pmd = pmd_offset(pud, vaddr);
57 pte = pte_offset_kernel(pmd, vaddr);
59 set_pte_at(&init_mm, vaddr, pte, pteval);
61 pte_clear(&init_mm, vaddr, pte);
64 * It's enough to flush this one mapping.
65 * (PGE mappings get flushed as well)
67 __flush_tlb_one(vaddr);
69 if (HYPERVISOR_update_va_mapping(vaddr, pteval,
70 UVMF_INVLPG|UVMF_ALL))
76 * Associate a large virtual page frame with a given physical page frame
77 * and protection flags for that frame. pfn is for the base of the page,
78 * vaddr is what the page gets mapped to - both must be properly aligned.
79 * The pmd must already be instantiated. Assumes PAE mode.
81 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
87 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
88 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
91 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
92 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
95 pgd = swapper_pg_dir + pgd_index(vaddr);
97 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
100 pud = pud_offset(pgd, vaddr);
101 pmd = pmd_offset(pud, vaddr);
102 set_pmd(pmd, pfn_pmd(pfn, flags));
104 * It's enough to flush this one mapping.
105 * (PGE mappings get flushed as well)
107 __flush_tlb_one(vaddr);
110 unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
111 unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - PAGE_SIZE);
112 EXPORT_SYMBOL(__FIXADDR_TOP);
115 * vmalloc=size forces the vmalloc area to be exactly 'size'
116 * bytes. This can be used to increase (or decrease) the
117 * vmalloc area - the default is 128m.
119 static int __init parse_vmalloc(char *arg)
124 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
125 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
128 early_param("vmalloc", parse_vmalloc);
132 * reservetop=size reserves a hole at the top of the kernel address space which
133 * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
134 * so relocating the fixmap can be done before paging initialization.
136 static int __init parse_reservetop(char *arg)
138 unsigned long address;
143 address = memparse(arg, &arg);
144 reserve_top_address(address);
147 early_param("reservetop", parse_reservetop);
150 void make_lowmem_page_readonly(void *va, unsigned int feature)
156 if (xen_feature(feature))
159 pte = lookup_address((unsigned long)va, &level);
160 BUG_ON(!pte || level != PG_LEVEL_4K || !pte_present(*pte));
161 rc = HYPERVISOR_update_va_mapping(
162 (unsigned long)va, pte_wrprotect(*pte), 0);
166 void make_lowmem_page_writable(void *va, unsigned int feature)
172 if (xen_feature(feature))
175 pte = lookup_address((unsigned long)va, &level);
176 BUG_ON(!pte || level != PG_LEVEL_4K || !pte_present(*pte));
177 rc = HYPERVISOR_update_va_mapping(
178 (unsigned long)va, pte_mkwrite(*pte), UVMF_INVLPG);