Merge branch '3.4-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[linux-flexiantxendom0-3.2.10.git] / arch / powerpc / kernel / dma.c
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <asm/bug.h>
15 #include <asm/abs_addr.h>
16 #include <asm/machdep.h>
17
18 /*
19  * Generic direct DMA implementation
20  *
21  * This implementation supports a per-device offset that can be applied if
22  * the address at which memory is visible to devices is not 0. Platform code
23  * can set archdata.dma_data to an unsigned long holding the offset. By
24  * default the offset is PCI_DRAM_OFFSET.
25  */
26
27
28 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
29                                 dma_addr_t *dma_handle, gfp_t flag,
30                                 struct dma_attrs *attrs)
31 {
32         void *ret;
33 #ifdef CONFIG_NOT_COHERENT_CACHE
34         ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
35         if (ret == NULL)
36                 return NULL;
37         *dma_handle += get_dma_offset(dev);
38         return ret;
39 #else
40         struct page *page;
41         int node = dev_to_node(dev);
42
43         /* ignore region specifiers */
44         flag  &= ~(__GFP_HIGHMEM);
45
46         page = alloc_pages_node(node, flag, get_order(size));
47         if (page == NULL)
48                 return NULL;
49         ret = page_address(page);
50         memset(ret, 0, size);
51         *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
52
53         return ret;
54 #endif
55 }
56
57 void dma_direct_free_coherent(struct device *dev, size_t size,
58                               void *vaddr, dma_addr_t dma_handle,
59                               struct dma_attrs *attrs)
60 {
61 #ifdef CONFIG_NOT_COHERENT_CACHE
62         __dma_free_coherent(size, vaddr);
63 #else
64         free_pages((unsigned long)vaddr, get_order(size));
65 #endif
66 }
67
68 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
69                              int nents, enum dma_data_direction direction,
70                              struct dma_attrs *attrs)
71 {
72         struct scatterlist *sg;
73         int i;
74
75         for_each_sg(sgl, sg, nents, i) {
76                 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
77                 sg->dma_length = sg->length;
78                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
79         }
80
81         return nents;
82 }
83
84 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
85                                 int nents, enum dma_data_direction direction,
86                                 struct dma_attrs *attrs)
87 {
88 }
89
90 static int dma_direct_dma_supported(struct device *dev, u64 mask)
91 {
92 #ifdef CONFIG_PPC64
93         /* Could be improved so platforms can set the limit in case
94          * they have limited DMA windows
95          */
96         return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
97 #else
98         return 1;
99 #endif
100 }
101
102 static u64 dma_direct_get_required_mask(struct device *dev)
103 {
104         u64 end, mask;
105
106         end = memblock_end_of_DRAM() + get_dma_offset(dev);
107
108         mask = 1ULL << (fls64(end) - 1);
109         mask += mask - 1;
110
111         return mask;
112 }
113
114 static inline dma_addr_t dma_direct_map_page(struct device *dev,
115                                              struct page *page,
116                                              unsigned long offset,
117                                              size_t size,
118                                              enum dma_data_direction dir,
119                                              struct dma_attrs *attrs)
120 {
121         BUG_ON(dir == DMA_NONE);
122         __dma_sync_page(page, offset, size, dir);
123         return page_to_phys(page) + offset + get_dma_offset(dev);
124 }
125
126 static inline void dma_direct_unmap_page(struct device *dev,
127                                          dma_addr_t dma_address,
128                                          size_t size,
129                                          enum dma_data_direction direction,
130                                          struct dma_attrs *attrs)
131 {
132 }
133
134 #ifdef CONFIG_NOT_COHERENT_CACHE
135 static inline void dma_direct_sync_sg(struct device *dev,
136                 struct scatterlist *sgl, int nents,
137                 enum dma_data_direction direction)
138 {
139         struct scatterlist *sg;
140         int i;
141
142         for_each_sg(sgl, sg, nents, i)
143                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
144 }
145
146 static inline void dma_direct_sync_single(struct device *dev,
147                                           dma_addr_t dma_handle, size_t size,
148                                           enum dma_data_direction direction)
149 {
150         __dma_sync(bus_to_virt(dma_handle), size, direction);
151 }
152 #endif
153
154 struct dma_map_ops dma_direct_ops = {
155         .alloc                          = dma_direct_alloc_coherent,
156         .free                           = dma_direct_free_coherent,
157         .map_sg                         = dma_direct_map_sg,
158         .unmap_sg                       = dma_direct_unmap_sg,
159         .dma_supported                  = dma_direct_dma_supported,
160         .map_page                       = dma_direct_map_page,
161         .unmap_page                     = dma_direct_unmap_page,
162         .get_required_mask              = dma_direct_get_required_mask,
163 #ifdef CONFIG_NOT_COHERENT_CACHE
164         .sync_single_for_cpu            = dma_direct_sync_single,
165         .sync_single_for_device         = dma_direct_sync_single,
166         .sync_sg_for_cpu                = dma_direct_sync_sg,
167         .sync_sg_for_device             = dma_direct_sync_sg,
168 #endif
169 };
170 EXPORT_SYMBOL(dma_direct_ops);
171
172 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
173
174 int dma_set_mask(struct device *dev, u64 dma_mask)
175 {
176         struct dma_map_ops *dma_ops = get_dma_ops(dev);
177
178         if (ppc_md.dma_set_mask)
179                 return ppc_md.dma_set_mask(dev, dma_mask);
180         if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
181                 return dma_ops->set_dma_mask(dev, dma_mask);
182         if (!dev->dma_mask || !dma_supported(dev, dma_mask))
183                 return -EIO;
184         *dev->dma_mask = dma_mask;
185         return 0;
186 }
187 EXPORT_SYMBOL(dma_set_mask);
188
189 u64 dma_get_required_mask(struct device *dev)
190 {
191         struct dma_map_ops *dma_ops = get_dma_ops(dev);
192
193         if (ppc_md.dma_get_required_mask)
194                 return ppc_md.dma_get_required_mask(dev);
195
196         if (unlikely(dma_ops == NULL))
197                 return 0;
198
199         if (dma_ops->get_required_mask)
200                 return dma_ops->get_required_mask(dev);
201
202         return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
203 }
204 EXPORT_SYMBOL_GPL(dma_get_required_mask);
205
206 static int __init dma_init(void)
207 {
208        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
209
210        return 0;
211 }
212 fs_initcall(dma_init);
213
214 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
215                       void *cpu_addr, dma_addr_t handle, size_t size)
216 {
217         unsigned long pfn;
218
219 #ifdef CONFIG_NOT_COHERENT_CACHE
220         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
221         pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
222 #else
223         pfn = page_to_pfn(virt_to_page(cpu_addr));
224 #endif
225         return remap_pfn_range(vma, vma->vm_start,
226                                pfn + vma->vm_pgoff,
227                                vma->vm_end - vma->vm_start,
228                                vma->vm_page_prot);
229 }
230 EXPORT_SYMBOL_GPL(dma_mmap_coherent);