Import changeset
[linux-flexiantxendom0-3.2.10.git] / arch / parisc / mm / kmap.c
1 /*
2 ** Stolen mostly from arch/parisc/kernel/pci-dma.c
3 */
4
5 #include <linux/types.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/pci.h>
9
10 #include <linux/malloc.h>
11 #include <linux/vmalloc.h>
12
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
15
16 #include <asm/io.h>
17 #include <asm/page.h>           /* get_order */
18
19 #undef flush_cache_all
20 #define flush_cache_all flush_all_caches
21
22 typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg);
23
24 #if 0
25 /* XXX This routine could be used with iterate_page() to replace
26  * unmap_uncached_page() and save a little code space but I didn't
27  * do that since I'm not certain whether this is the right path. -PB
28  */
29 static void unmap_cached_pte(pte_t * pte, unsigned long arg)
30 {
31         pte_t page = *pte;
32         pte_clear(pte);
33         if (!pte_none(page)) {
34                 if (pte_present(page)) {
35                         unsigned long map_nr = pte_pagenr(page);
36                         if (map_nr < max_mapnr)
37                                 __free_page(mem_map + map_nr);
38                 } else {
39                         printk(KERN_CRIT
40                                "Whee.. Swapped out page in kernel page table\n");
41                 }
42         }
43 }
44 #endif
45
46 /* These two routines should probably check a few things... */
47 static void set_uncached(pte_t * pte, unsigned long arg)
48 {
49         pte_val(*pte) |= _PAGE_NO_CACHE;
50 }
51
52 static void set_cached(pte_t * pte, unsigned long arg)
53 {
54         pte_val(*pte) &= ~_PAGE_NO_CACHE;
55 }
56
57 static inline void iterate_pte(pmd_t * pmd, unsigned long address,
58                                unsigned long size, pte_iterator_t op,
59                                unsigned long arg)
60 {
61         pte_t *pte;
62         unsigned long end;
63
64         if (pmd_none(*pmd))
65                 return;
66         if (pmd_bad(*pmd)) {
67                 pmd_ERROR(*pmd);
68                 pmd_clear(pmd);
69                 return;
70         }
71         pte = pte_offset(pmd, address);
72         address &= ~PMD_MASK;
73         end = address + size;
74         if (end > PMD_SIZE)
75                 end = PMD_SIZE;
76         do {
77                 op(pte, arg);
78                 address += PAGE_SIZE;
79                 pte++;
80         } while (address < end);
81 }
82
83 static inline void iterate_pmd(pgd_t * dir, unsigned long address,
84                                unsigned long size, pte_iterator_t op,
85                                unsigned long arg)
86 {
87         pmd_t *pmd;
88         unsigned long end;
89
90         if (pgd_none(*dir))
91                 return;
92         if (pgd_bad(*dir)) {
93                 pgd_ERROR(*dir);
94                 pgd_clear(dir);
95                 return;
96         }
97         pmd = pmd_offset(dir, address);
98         address &= ~PGDIR_MASK;
99         end = address + size;
100         if (end > PGDIR_SIZE)
101                 end = PGDIR_SIZE;
102         do {
103                 iterate_pte(pmd, address, end - address, op, arg);
104                 address = (address + PMD_SIZE) & PMD_MASK;
105                 pmd++;
106         } while (address < end);
107 }
108
109 static void iterate_pages(unsigned long address, unsigned long size,
110                           pte_iterator_t op, unsigned long arg)
111 {
112         pgd_t *dir;
113         unsigned long end = address + size;
114
115         dir = pgd_offset_k(address);
116         flush_cache_all();
117         do {
118                 iterate_pmd(dir, address, end - address, op, arg);
119                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120                 dir++;
121         } while (address && (address < end));
122         flush_tlb_all();
123 }
124
125 void
126 kernel_set_cachemode(unsigned long vaddr, unsigned long size, int what)
127 {
128         switch (what) {
129         case IOMAP_FULL_CACHING:
130                 iterate_pages(vaddr, size, set_cached, 0);
131                 flush_tlb_range(&init_mm, vaddr, size);
132                 break;
133         case IOMAP_NOCACHE_SER:
134                 iterate_pages(vaddr, size, set_uncached, 0);
135                 flush_tlb_range(&init_mm, vaddr, size);
136                 break;
137         default:
138                 printk(KERN_CRIT
139                        "kernel_set_cachemode mode %d not understood\n",
140                        what);
141                 break;
142         }
143 }