2 * Dynamic DMA mapping support.
4 * This implementation is for IA-64 platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
10 * unnecessary i-cache flushing.
13 #include <linux/cache.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h>
28 #define OFFSET(val,align) ((unsigned long) \
29 ( (val) & ( (align) - 1)))
31 #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
32 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
35 * Maximum allowable number of contiguous slabs to map,
36 * must be a power of 2. What is the appropriate value ?
37 * The complexity of {map,unmap}_single is linearly dependent on this value.
39 #define IO_TLB_SEGSIZE 128
42 * log of the size of each IO TLB slab. The number of slabs is command line controllable.
44 #define IO_TLB_SHIFT 11
47 * Used to do a quick range check in swiotlb_unmap_single and swiotlb_sync_single, to see
48 * if the memory was in fact allocated by this API.
50 static char *io_tlb_start, *io_tlb_end;
53 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and io_tlb_end.
54 * This is command line adjustable via setup_io_tlb_npages.
56 static unsigned long io_tlb_nslabs = 1024;
59 * This is a free list describing the number of free entries available from each index
61 static unsigned int *io_tlb_list;
62 static unsigned int io_tlb_index;
65 * We need to save away the original address corresponding to a mapped entry for the sync
68 static unsigned char **io_tlb_orig_addr;
71 * Protect the above data structures in the map and unmap calls
73 static spinlock_t io_tlb_lock = SPIN_LOCK_UNLOCKED;
76 setup_io_tlb_npages (char *str)
78 io_tlb_nslabs = simple_strtoul(str, NULL, 0) << (PAGE_SHIFT - IO_TLB_SHIFT);
80 /* avoid tail segment of size < IO_TLB_SEGSIZE */
81 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
85 __setup("swiotlb=", setup_io_tlb_npages);
89 * Statically reserve bounce buffer space and initialize bounce buffer data structures for
90 * the software IO TLB used to implement the PCI DMA API.
98 * Get IO TLB memory from the low pages
100 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
103 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
106 * Allocate and initialize the free list array. This array is used
107 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
108 * between io_tlb_start and io_tlb_end.
110 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
111 for (i = 0; i < io_tlb_nslabs; i++)
112 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
114 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
116 printk(KERN_INFO "Placing software IO TLB between 0x%p - 0x%p\n",
117 (void *) io_tlb_start, (void *) io_tlb_end);
121 * Allocates bounce buffer and returns its kernel virtual address.
124 map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
128 unsigned int nslots, stride, index, wrap;
132 * For mappings greater than a page size, we limit the stride (and hence alignment)
135 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
136 if (size > (1 << PAGE_SHIFT))
137 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
145 * Find suitable number of IO TLB entries size that will fit this request and
146 * allocate a buffer from that IO TLB pool.
148 spin_lock_irqsave(&io_tlb_lock, flags);
150 wrap = index = ALIGN(io_tlb_index, stride);
152 if (index >= io_tlb_nslabs)
157 * If we find a slot that indicates we have 'nslots' number of
158 * contiguous buffers, we allocate the buffers from that slot and
159 * mark the entries as '0' indicating unavailable.
161 if (io_tlb_list[index] >= nslots) {
164 for (i = index; i < index + nslots; i++)
166 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
167 && io_tlb_list[i]; i--)
168 io_tlb_list[i] = ++count;
169 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
172 * Update the indices to avoid searching in the next round.
174 io_tlb_index = ((index + nslots) < io_tlb_nslabs
175 ? (index + nslots) : 0);
180 if (index >= io_tlb_nslabs)
182 } while (index != wrap);
185 * XXX What is a suitable recovery mechanism here? We cannot
186 * sleep because we are called from with in interrupts!
188 panic("map_single: could not allocate software IO TLB (%ld bytes)", size);
191 spin_unlock_irqrestore(&io_tlb_lock, flags);
194 * Save away the mapping from the original address to the DMA address. This is
195 * needed when we sync the memory. Then we sync the buffer if needed.
197 io_tlb_orig_addr[index] = buffer;
198 if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
199 memcpy(dma_addr, buffer, size);
205 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
208 unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
211 int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
212 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
213 char *buffer = io_tlb_orig_addr[index];
216 * First, sync the memory before unmapping the entry
218 if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
220 * bounce... copy the data back into the original buffer * and delete the
223 memcpy(buffer, dma_addr, size);
226 * Return the buffer to the free list by setting the corresponding entries to
227 * indicate the number of contigous entries available. While returning the
228 * entries to the free list, we merge the entries with slots below and above the
229 * pool being returned.
231 spin_lock_irqsave(&io_tlb_lock, flags);
233 int count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
234 io_tlb_list[index + nslots] : 0);
236 * Step 1: return the slots to the free list, merging the slots with
239 for (i = index + nslots - 1; i >= index; i--)
240 io_tlb_list[i] = ++count;
242 * Step 2: merge the returned slots with the preceeding slots, if
243 * available (non zero)
245 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
247 io_tlb_list[i] = ++count;
249 spin_unlock_irqrestore(&io_tlb_lock, flags);
253 sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
255 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
256 char *buffer = io_tlb_orig_addr[index];
259 * bounce... copy the data back into/from the original buffer
260 * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
262 if (direction == PCI_DMA_FROMDEVICE)
263 memcpy(buffer, dma_addr, size);
264 else if (direction == PCI_DMA_TODEVICE)
265 memcpy(dma_addr, buffer, size);
271 swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
273 unsigned long pci_addr;
274 int gfp = GFP_ATOMIC;
278 * Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA
281 gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
282 ret = (void *)__get_free_pages(gfp, get_order(size));
286 memset(ret, 0, size);
287 pci_addr = virt_to_phys(ret);
288 if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0)
289 panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device");
290 *dma_handle = pci_addr;
295 swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
297 free_pages((unsigned long) vaddr, get_order(size));
301 * Map a single buffer of the indicated size for DMA in streaming mode. The PCI address
302 * to use is returned.
304 * Once the device is given the dma address, the device owns this memory until either
305 * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
308 swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
310 unsigned long pci_addr = virt_to_phys(ptr);
312 if (direction == PCI_DMA_NONE)
315 * Check if the PCI device can DMA to ptr... if so, just return ptr
317 if ((pci_addr & ~hwdev->dma_mask) == 0)
319 * Device is bit capable of DMA'ing to the buffer... just return the PCI
325 * get a bounce buffer:
327 pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction));
330 * Ensure that the address returned is DMA'ble:
332 if ((pci_addr & ~hwdev->dma_mask) != 0)
333 panic("map_single: bounce buffer is not DMA'ble");
339 * Since DMA is i-cache coherent, any (complete) pages that were written via
340 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
341 * flush them when they get mapped into an executable vm-area.
344 mark_clean (void *addr, size_t size)
346 unsigned long pg_addr, end;
348 pg_addr = PAGE_ALIGN((unsigned long) addr);
349 end = (unsigned long) addr + size;
350 while (pg_addr + PAGE_SIZE <= end) {
351 struct page *page = virt_to_page(pg_addr);
352 set_bit(PG_arch_1, &page->flags);
353 pg_addr += PAGE_SIZE;
358 * Unmap a single streaming mode DMA translation. The dma_addr and size must match what
359 * was provided for in a previous swiotlb_map_single call. All other usages are
362 * After this call, reads by the cpu to the buffer are guaranteed to see whatever the
363 * device wrote there.
366 swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
368 char *dma_addr = phys_to_virt(pci_addr);
370 if (direction == PCI_DMA_NONE)
372 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
373 unmap_single(hwdev, dma_addr, size, direction);
374 else if (direction == PCI_DMA_FROMDEVICE)
375 mark_clean(dma_addr, size);
379 * Make physical memory consistent for a single streaming mode DMA translation after a
382 * If you perform a swiotlb_map_single() but wish to interrogate the buffer using the cpu,
383 * yet do not wish to teardown the PCI dma mapping, you must call this function before
384 * doing so. At the next point you give the PCI dma address back to the card, the device
385 * again owns the buffer.
388 swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
390 char *dma_addr = phys_to_virt(pci_addr);
392 if (direction == PCI_DMA_NONE)
394 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
395 sync_single(hwdev, dma_addr, size, direction);
396 else if (direction == PCI_DMA_FROMDEVICE)
397 mark_clean(dma_addr, size);
401 * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
402 * scather-gather version of the above swiotlb_map_single interface. Here the scatter
403 * gather list elements are each tagged with the appropriate dma address and length. They
404 * are obtained via sg_dma_{address,length}(SG).
406 * NOTE: An implementation may be able to use a smaller number of
407 * DMA address/length pairs than there are SG table elements.
408 * (for example via virtual mapping capabilities)
409 * The routine returns the number of addr/length pairs actually
410 * used, at most nents.
412 * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
415 swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
418 unsigned long pci_addr;
421 if (direction == PCI_DMA_NONE)
424 for (i = 0; i < nelems; i++, sg++) {
425 addr = SG_ENT_VIRT_ADDRESS(sg);
426 pci_addr = virt_to_phys(addr);
427 if ((pci_addr & ~hwdev->dma_mask) != 0)
428 sg->dma_address = (dma_addr_t)
429 map_single(hwdev, addr, sg->length, direction);
431 sg->dma_address = pci_addr;
432 sg->dma_length = sg->length;
438 * Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls
439 * here are the same as for swiotlb_unmap_single() above.
442 swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
446 if (direction == PCI_DMA_NONE)
449 for (i = 0; i < nelems; i++, sg++)
450 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
451 unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
452 else if (direction == PCI_DMA_FROMDEVICE)
453 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
457 * Make physical memory consistent for a set of streaming mode DMA translations after a
460 * The same as swiotlb_dma_sync_single but for a scatter-gather list, same rules and
464 swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
468 if (direction == PCI_DMA_NONE)
471 for (i = 0; i < nelems; i++, sg++)
472 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
473 sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
477 * Return whether the given PCI device DMA address mask can be supported properly. For
478 * example, if your device can only drive the low 24-bits during PCI bus mastering, then
479 * you would pass 0x00ffffff as the mask to this function.
482 swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask)
487 EXPORT_SYMBOL(swiotlb_init);
488 EXPORT_SYMBOL(swiotlb_map_single);
489 EXPORT_SYMBOL(swiotlb_unmap_single);
490 EXPORT_SYMBOL(swiotlb_map_sg);
491 EXPORT_SYMBOL(swiotlb_unmap_sg);
492 EXPORT_SYMBOL(swiotlb_sync_single);
493 EXPORT_SYMBOL(swiotlb_sync_sg);
494 EXPORT_SYMBOL(swiotlb_alloc_consistent);
495 EXPORT_SYMBOL(swiotlb_free_consistent);
496 EXPORT_SYMBOL(swiotlb_pci_dma_supported);