Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / mm / highmem_32-xen.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4
5 void *kmap(struct page *page)
6 {
7         might_sleep();
8         if (!PageHighMem(page))
9                 return page_address(page);
10         return kmap_high(page);
11 }
12 EXPORT_SYMBOL(kmap);
13
14 void kunmap(struct page *page)
15 {
16         if (in_interrupt())
17                 BUG();
18         if (!PageHighMem(page))
19                 return;
20         kunmap_high(page);
21 }
22 EXPORT_SYMBOL(kunmap);
23
24 /*
25  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
26  * no global lock is needed and because the kmap code must perform a global TLB
27  * invalidation when the kmap pool wraps.
28  *
29  * However when holding an atomic kmap it is not legal to sleep, so atomic
30  * kmaps are appropriate for short, tight code paths only.
31  */
32 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33 {
34         unsigned long vaddr;
35         int idx, type;
36
37         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38         pagefault_disable();
39
40         if (!PageHighMem(page))
41                 return page_address(page);
42
43         type = kmap_atomic_idx_push();
44         idx = type + KM_TYPE_NR*smp_processor_id();
45         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46         BUG_ON(!pte_none(*(kmap_pte-idx)));
47         set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
48         /*arch_flush_lazy_mmu_mode();*/
49
50         return (void *)vaddr;
51 }
52 EXPORT_SYMBOL(kmap_atomic_prot);
53
54 void *kmap_atomic(struct page *page)
55 {
56         return kmap_atomic_prot(page, kmap_prot);
57 }
58 EXPORT_SYMBOL(kmap_atomic);
59
60 /*
61  * This is the same as kmap_atomic() but can map memory that doesn't
62  * have a struct page associated with it.
63  */
64 void *kmap_atomic_pfn(unsigned long pfn)
65 {
66         return kmap_atomic_prot_pfn(pfn, kmap_prot);
67 }
68 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
69
70 void __kunmap_atomic(void *kvaddr)
71 {
72         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
73
74         if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
75             vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
76                 int idx, type;
77
78                 type = kmap_atomic_idx();
79                 idx = type + KM_TYPE_NR * smp_processor_id();
80
81 #ifdef CONFIG_DEBUG_HIGHMEM
82                 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
83 #endif
84                 /*
85                  * Force other mappings to Oops if they'll try to access this
86                  * pte without first remap it.  Keeping stale mappings around
87                  * is a bad idea also, in case the page changes cacheability
88                  * attributes or becomes a protected page in a hypervisor.
89                  */
90                 kpte_clear_flush(kmap_pte-idx, vaddr);
91                 kmap_atomic_idx_pop();
92                 /*arch_flush_lazy_mmu_mode();*/
93         }
94 #ifdef CONFIG_DEBUG_HIGHMEM
95         else {
96                 BUG_ON(vaddr < PAGE_OFFSET);
97                 BUG_ON(vaddr >= (unsigned long)high_memory);
98         }
99 #endif
100
101         pagefault_enable();
102 }
103 EXPORT_SYMBOL(__kunmap_atomic);
104
105 struct page *kmap_atomic_to_page(void *ptr)
106 {
107         unsigned long idx, vaddr = (unsigned long)ptr;
108         pte_t *pte;
109
110         if (vaddr < FIXADDR_START)
111                 return virt_to_page(ptr);
112
113         idx = virt_to_fix(vaddr);
114         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
115         return pte_page(*pte);
116 }
117 EXPORT_SYMBOL(kmap_atomic_to_page);
118
119 void clear_highpage(struct page *page)
120 {
121         void *kaddr;
122
123         if (likely(xen_feature(XENFEAT_highmem_assist))
124             && PageHighMem(page)) {
125                 struct mmuext_op meo;
126
127                 meo.cmd = MMUEXT_CLEAR_PAGE;
128                 meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
129                 if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
130                         return;
131         }
132
133         kaddr = kmap_atomic(page);
134         clear_page(kaddr);
135         kunmap_atomic(kaddr);
136 }
137 EXPORT_SYMBOL(clear_highpage);
138
139 void copy_highpage(struct page *to, struct page *from)
140 {
141         void *vfrom, *vto;
142
143         if (likely(xen_feature(XENFEAT_highmem_assist))
144             && (PageHighMem(from) || PageHighMem(to))) {
145                 unsigned long from_pfn = page_to_pfn(from);
146                 unsigned long to_pfn = page_to_pfn(to);
147                 struct mmuext_op meo;
148
149                 meo.cmd = MMUEXT_COPY_PAGE;
150                 meo.arg1.mfn = pfn_to_mfn(to_pfn);
151                 meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
152                 if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
153                     && mfn_to_pfn(meo.arg1.mfn) == to_pfn
154                     && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
155                         return;
156         }
157
158         vfrom = kmap_atomic(from);
159         vto = kmap_atomic(to);
160         copy_page(vto, vfrom);
161         kunmap_atomic(vfrom);
162         kunmap_atomic(vto);
163 }
164 EXPORT_SYMBOL(copy_highpage);
165
166 void __init set_highmem_pages_init(void)
167 {
168         struct zone *zone;
169         int nid;
170
171         for_each_zone(zone) {
172                 unsigned long zone_start_pfn, zone_end_pfn;
173
174                 if (!is_highmem(zone))
175                         continue;
176
177                 zone_start_pfn = zone->zone_start_pfn;
178                 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
179
180                 nid = zone_to_nid(zone);
181                 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
182                                 zone->name, nid, zone_start_pfn, zone_end_pfn);
183
184                 add_highpages_with_active_regions(nid, zone_start_pfn,
185                                  zone_end_pfn);
186
187                 /* XEN: init high-mem pages outside initial allocation. */
188                 if (zone_start_pfn < xen_start_info->nr_pages)
189                         zone_start_pfn = xen_start_info->nr_pages;
190                 for (; zone_start_pfn < zone_end_pfn; zone_start_pfn++) {
191                         ClearPageReserved(pfn_to_page(zone_start_pfn));
192                         init_page_count(pfn_to_page(zone_start_pfn));
193                 }
194         }
195         totalram_pages += totalhigh_pages;
196 }