- 2.6.17 port work build breaks, but the patch set is relativly stable
[linux-flexiantxendom0-3.2.10.git] / arch / i386 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/sections.h>
17
18 static DEFINE_SPINLOCK(cpa_lock);
19 static struct list_head df_list = LIST_HEAD_INIT(df_list);
20
21
22 pte_t *lookup_address(unsigned long address) 
23
24         pgd_t *pgd = pgd_offset_k(address);
25         pud_t *pud;
26         pmd_t *pmd;
27         if (pgd_none(*pgd))
28                 return NULL;
29         pud = pud_offset(pgd, address);
30         if (pud_none(*pud))
31                 return NULL;
32         pmd = pmd_offset(pud, address);
33         if (pmd_none(*pmd))
34                 return NULL;
35         if (pmd_large(*pmd))
36                 return (pte_t *)pmd;
37         return pte_offset_kernel(pmd, address);
38
39
40 static struct page *split_large_page(unsigned long address, pgprot_t prot,
41                                         pgprot_t ref_prot)
42
43         int i; 
44         unsigned long addr;
45         struct page *base;
46         pte_t *pbase;
47
48         spin_unlock_irq(&cpa_lock);
49         base = alloc_pages(GFP_KERNEL, 0);
50         spin_lock_irq(&cpa_lock);
51         if (!base) 
52                 return NULL;
53
54         /*
55          * page_private is used to track the number of entries in
56          * the page table page that have non standard attributes.
57          */
58         SetPagePrivate(base);
59         page_private(base) = 0;
60
61         address = __pa(address);
62         addr = address & LARGE_PAGE_MASK; 
63         pbase = (pte_t *)page_address(base);
64         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
65                set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
66                                           addr == address ? prot : ref_prot));
67         }
68         return base;
69
70
71 static void flush_kernel_map(void *dummy) 
72
73         /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
74         if (boot_cpu_data.x86_model >= 4) 
75                 wbinvd();
76         /* Flush all to work around Errata in early athlons regarding 
77          * large page flushing. 
78          */
79         __flush_tlb_all();      
80 }
81
82 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
83
84         struct page *page;
85         unsigned long flags;
86
87         set_pte_atomic(kpte, pte);      /* change init_mm */
88 #ifndef HAVE_SHARED_KERNEL_PMD
89         if (PTRS_PER_PMD > 1)
90 #else
91         if (HAVE_SHARED_KERNEL_PMD)
92 #endif
93                 return;
94
95         spin_lock_irqsave(&pgd_lock, flags);
96         for (page = pgd_list; page; page = (struct page *)page->index) {
97                 pgd_t *pgd;
98                 pud_t *pud;
99                 pmd_t *pmd;
100                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
101                 pud = pud_offset(pgd, address);
102                 pmd = pmd_offset(pud, address);
103                 set_pte_atomic((pte_t *)pmd, pte);
104         }
105         spin_unlock_irqrestore(&pgd_lock, flags);
106 }
107
108 /* 
109  * No more special protections in this 2/4MB area - revert to a
110  * large page again. 
111  */
112 static inline void revert_page(struct page *kpte_page, unsigned long address)
113 {
114         pgprot_t ref_prot;
115         pte_t *linear;
116
117         ref_prot =
118         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
119                 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
120
121         linear = (pte_t *)
122                 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
123         set_pmd_pte(linear,  address,
124                     pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
125                             ref_prot));
126 }
127
128 static int
129 __change_page_attr(struct page *page, pgprot_t prot)
130
131         pte_t *kpte; 
132         unsigned long address;
133         struct page *kpte_page;
134
135         BUG_ON(PageHighMem(page));
136         address = (unsigned long)page_address(page);
137
138         kpte = lookup_address(address);
139         if (!kpte)
140                 return -EINVAL;
141         kpte_page = virt_to_page(kpte);
142         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
143                 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
144                         set_pte_atomic(kpte, mk_pte(page, prot)); 
145                 } else {
146                         pgprot_t ref_prot;
147                         struct page *split;
148
149                         ref_prot =
150                         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
151                                 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
152                         split = split_large_page(address, prot, ref_prot);
153                         if (!split)
154                                 return -ENOMEM;
155                         set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
156                         kpte_page = split;
157                 }
158                 page_private(kpte_page)++;
159         } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
160                 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
161                 BUG_ON(page_private(kpte_page) == 0);
162                 page_private(kpte_page)--;
163         } else
164                 BUG();
165
166         /*
167          * If the pte was reserved, it means it was created at boot
168          * time (not via split_large_page) and in turn we must not
169          * replace it with a largepage.
170          */
171         if (!PageReserved(kpte_page)) {
172                 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
173                         ClearPagePrivate(kpte_page);
174                         list_add(&kpte_page->lru, &df_list);
175                         revert_page(kpte_page, address);
176                 }
177         }
178         return 0;
179
180
181 static inline void flush_map(void)
182 {
183         on_each_cpu(flush_kernel_map, NULL, 1, 1);
184 }
185
186 /*
187  * Change the page attributes of an page in the linear mapping.
188  *
189  * This should be used when a page is mapped with a different caching policy
190  * than write-back somewhere - some CPUs do not like it when mappings with
191  * different caching policies exist. This changes the page attributes of the
192  * in kernel linear mapping too.
193  * 
194  * The caller needs to ensure that there are no conflicting mappings elsewhere.
195  * This function only deals with the kernel linear map.
196  * 
197  * Caller must call global_flush_tlb() after this.
198  */
199 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
200 {
201         int err = 0; 
202         int i; 
203         unsigned long flags;
204
205         spin_lock_irqsave(&cpa_lock, flags);
206         for (i = 0; i < numpages; i++, page++) { 
207                 err = __change_page_attr(page, prot);
208                 if (err) 
209                         break; 
210         }       
211         spin_unlock_irqrestore(&cpa_lock, flags);
212         return err;
213 }
214
215 void global_flush_tlb(void)
216
217         LIST_HEAD(l);
218         struct page *pg, *next;
219
220         BUG_ON(irqs_disabled());
221
222         spin_lock_irq(&cpa_lock);
223         list_splice_init(&df_list, &l);
224         spin_unlock_irq(&cpa_lock);
225         flush_map();
226         list_for_each_entry_safe(pg, next, &l, lru)
227                 __free_page(pg);
228
229
230 #ifdef CONFIG_DEBUG_PAGEALLOC
231 void kernel_map_pages(struct page *page, int numpages, int enable)
232 {
233         if (PageHighMem(page))
234                 return;
235         if (!enable)
236                 mutex_debug_check_no_locks_freed(page_address(page),
237                                                  numpages * PAGE_SIZE);
238
239         /* the return value is ignored - the calls cannot fail,
240          * large pages are disabled at boot time.
241          */
242         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
243         /* we should perform an IPI and flush all tlbs,
244          * but that can deadlock->flush only current cpu.
245          */
246         __flush_tlb_all();
247 }
248 #endif
249
250 EXPORT_SYMBOL(change_page_attr);
251 EXPORT_SYMBOL(global_flush_tlb);