- Update to 2.6.25-rc3.
[linux-flexiantxendom0-3.2.10.git] / arch / sh / mm / consistent.c
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/dma-mapping.h>
14 #include <asm/cacheflush.h>
15 #include <asm/addrspace.h>
16 #include <asm/io.h>
17
18 struct dma_coherent_mem {
19         void            *virt_base;
20         u32             device_base;
21         int             size;
22         int             flags;
23         unsigned long   *bitmap;
24 };
25
26 void *dma_alloc_coherent(struct device *dev, size_t size,
27                            dma_addr_t *dma_handle, gfp_t gfp)
28 {
29         void *ret, *ret_nocache;
30         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
31         int order = get_order(size);
32
33         if (mem) {
34                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
35                                                      order);
36                 if (page >= 0) {
37                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
38                         ret = mem->virt_base + (page << PAGE_SHIFT);
39                         memset(ret, 0, size);
40                         return ret;
41                 }
42                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
43                         return NULL;
44         }
45
46         ret = (void *)__get_free_pages(gfp, order);
47         if (!ret)
48                 return NULL;
49
50         memset(ret, 0, size);
51         /*
52          * Pages from the page allocator may have data present in
53          * cache. So flush the cache before using uncached memory.
54          */
55         dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
56
57         ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
58         if (!ret_nocache) {
59                 free_pages((unsigned long)ret, order);
60                 return NULL;
61         }
62
63         *dma_handle = virt_to_phys(ret);
64         return ret_nocache;
65 }
66 EXPORT_SYMBOL(dma_alloc_coherent);
67
68 void dma_free_coherent(struct device *dev, size_t size,
69                          void *vaddr, dma_addr_t dma_handle)
70 {
71         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
72         int order = get_order(size);
73
74         if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
75                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
76
77                 bitmap_release_region(mem->bitmap, page, order);
78         } else {
79                 WARN_ON(irqs_disabled());       /* for portability */
80                 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
81                 free_pages((unsigned long)phys_to_virt(dma_handle), order);
82                 iounmap(vaddr);
83         }
84 }
85 EXPORT_SYMBOL(dma_free_coherent);
86
87 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
88                                 dma_addr_t device_addr, size_t size, int flags)
89 {
90         void __iomem *mem_base = NULL;
91         int pages = size >> PAGE_SHIFT;
92         int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
93
94         if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
95                 goto out;
96         if (!size)
97                 goto out;
98         if (dev->dma_mem)
99                 goto out;
100
101         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
102
103         mem_base = ioremap_nocache(bus_addr, size);
104         if (!mem_base)
105                 goto out;
106
107         dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
108         if (!dev->dma_mem)
109                 goto out;
110         dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
111         if (!dev->dma_mem->bitmap)
112                 goto free1_out;
113
114         dev->dma_mem->virt_base = mem_base;
115         dev->dma_mem->device_base = device_addr;
116         dev->dma_mem->size = pages;
117         dev->dma_mem->flags = flags;
118
119         if (flags & DMA_MEMORY_MAP)
120                 return DMA_MEMORY_MAP;
121
122         return DMA_MEMORY_IO;
123
124  free1_out:
125         kfree(dev->dma_mem);
126  out:
127         if (mem_base)
128                 iounmap(mem_base);
129         return 0;
130 }
131 EXPORT_SYMBOL(dma_declare_coherent_memory);
132
133 void dma_release_declared_memory(struct device *dev)
134 {
135         struct dma_coherent_mem *mem = dev->dma_mem;
136
137         if (!mem)
138                 return;
139         dev->dma_mem = NULL;
140         iounmap(mem->virt_base);
141         kfree(mem->bitmap);
142         kfree(mem);
143 }
144 EXPORT_SYMBOL(dma_release_declared_memory);
145
146 void *dma_mark_declared_memory_occupied(struct device *dev,
147                                         dma_addr_t device_addr, size_t size)
148 {
149         struct dma_coherent_mem *mem = dev->dma_mem;
150         int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
151         int pos, err;
152
153         if (!mem)
154                 return ERR_PTR(-EINVAL);
155
156         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
157         err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
158         if (err != 0)
159                 return ERR_PTR(err);
160         return mem->virt_base + (pos << PAGE_SHIFT);
161 }
162 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
163
164 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
165                     enum dma_data_direction direction)
166 {
167 #ifdef CONFIG_CPU_SH5
168         void *p1addr = vaddr;
169 #else
170         void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
171 #endif
172
173         switch (direction) {
174         case DMA_FROM_DEVICE:           /* invalidate only */
175                 __flush_invalidate_region(p1addr, size);
176                 break;
177         case DMA_TO_DEVICE:             /* writeback only */
178                 __flush_wback_region(p1addr, size);
179                 break;
180         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
181                 __flush_purge_region(p1addr, size);
182                 break;
183         default:
184                 BUG();
185         }
186 }
187 EXPORT_SYMBOL(dma_cache_sync);