Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / include / asm-m68k / cacheflush.h
1 #ifndef _M68K_CACHEFLUSH_H
2 #define _M68K_CACHEFLUSH_H
3
4 #include <linux/mm.h>
5
6 /*
7  * Cache handling functions
8  */
9
10 #define flush_icache()                                          \
11 ({                                                              \
12         if (CPU_IS_040_OR_060)                                  \
13                 __asm__ __volatile__("nop\n\t"                  \
14                                      ".chip 68040\n\t"          \
15                                      "cinva %%ic\n\t"           \
16                                      ".chip 68k" : );           \
17         else {                                                  \
18                 unsigned long _tmp;                             \
19                 __asm__ __volatile__("movec %%cacr,%0\n\t"      \
20                                      "orw %1,%0\n\t"            \
21                                      "movec %0,%%cacr"          \
22                                      : "=&d" (_tmp)             \
23                                      : "id" (FLUSH_I)); \
24         }                                                       \
25 })
26
27 /*
28  * invalidate the cache for the specified memory range.
29  * It starts at the physical address specified for
30  * the given number of bytes.
31  */
32 extern void cache_clear(unsigned long paddr, int len);
33 /*
34  * push any dirty cache in the specified memory range.
35  * It starts at the physical address specified for
36  * the given number of bytes.
37  */
38 extern void cache_push(unsigned long paddr, int len);
39
40 /*
41  * push and invalidate pages in the specified user virtual
42  * memory range.
43  */
44 extern void cache_push_v(unsigned long vaddr, int len);
45
46 /* cache code */
47 #define FLUSH_I_AND_D   (0x00000808)
48 #define FLUSH_I         (0x00000008)
49
50 /* This is needed whenever the virtual mapping of the current
51    process changes.  */
52 #define __flush_cache_all()                                     \
53 ({                                                              \
54         if (CPU_IS_040_OR_060)                                  \
55                 __asm__ __volatile__("nop\n\t"                  \
56                                      ".chip 68040\n\t"          \
57                                      "cpusha %dc\n\t"           \
58                                      ".chip 68k");              \
59         else {                                                  \
60                 unsigned long _tmp;                             \
61                 __asm__ __volatile__("movec %%cacr,%0\n\t"      \
62                                      "orw %1,%0\n\t"            \
63                                      "movec %0,%%cacr"          \
64                                      : "=&d" (_tmp)             \
65                                      : "di" (FLUSH_I_AND_D));   \
66         }                                                       \
67 })
68
69 #define __flush_cache_030()                                     \
70 ({                                                              \
71         if (CPU_IS_020_OR_030) {                                \
72                 unsigned long _tmp;                             \
73                 __asm__ __volatile__("movec %%cacr,%0\n\t"      \
74                                      "orw %1,%0\n\t"            \
75                                      "movec %0,%%cacr"          \
76                                      : "=&d" (_tmp)             \
77                                      : "di" (FLUSH_I_AND_D));   \
78         }                                                       \
79 })
80
81 #define flush_cache_all() __flush_cache_all()
82
83 #define flush_cache_vmap(start, end)            flush_cache_all()
84 #define flush_cache_vunmap(start, end)          flush_cache_all()
85
86 static inline void flush_cache_mm(struct mm_struct *mm)
87 {
88         if (mm == current->mm)
89                 __flush_cache_030();
90 }
91
92 /* flush_cache_range/flush_cache_page must be macros to avoid
93    a dependency on linux/mm.h, which includes this file... */
94 static inline void flush_cache_range(struct vm_area_struct *vma,
95                                      unsigned long start,
96                                      unsigned long end)
97 {
98         if (vma->vm_mm == current->mm)
99                 __flush_cache_030();
100 }
101
102 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
103 {
104         if (vma->vm_mm == current->mm)
105                 __flush_cache_030();
106 }
107
108
109 /* Push the page at kernel virtual address and clear the icache */
110 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
111 static inline void __flush_page_to_ram(void *vaddr)
112 {
113         if (CPU_IS_040_OR_060) {
114                 __asm__ __volatile__("nop\n\t"
115                                      ".chip 68040\n\t"
116                                      "cpushp %%bc,(%0)\n\t"
117                                      ".chip 68k"
118                                      : : "a" (__pa(vaddr)));
119         } else {
120                 unsigned long _tmp;
121                 __asm__ __volatile__("movec %%cacr,%0\n\t"
122                                      "orw %1,%0\n\t"
123                                      "movec %0,%%cacr"
124                                      : "=&d" (_tmp)
125                                      : "di" (FLUSH_I));
126         }
127 }
128
129 #define flush_dcache_page(page)         __flush_page_to_ram(page_address(page))
130 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
131 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
132 #define flush_icache_page(vma, page)    __flush_page_to_ram(page_address(page))
133 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
134
135 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
136         do {                                                    \
137                 flush_cache_page(vma, vaddr, page_to_pfn(page));\
138                 memcpy(dst, src, len);                          \
139         } while (0)
140
141 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
142         do {                                                    \
143                 flush_cache_page(vma, vaddr, page_to_pfn(page));\
144                 memcpy(dst, src, len);                          \
145         } while (0)
146
147 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
148
149 #endif /* _M68K_CACHEFLUSH_H */