2 * Dynamic DMA mapping support.
4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
11 * 08/12/11 beckyb Add highmem support
14 #include <linux/cache.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/swiotlb.h>
21 #include <linux/pfn.h>
22 #include <linux/types.h>
23 #include <linux/ctype.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/iommu-helper.h>
27 #include <linux/highmem.h>
28 #include <linux/gfp.h>
33 #include <asm/uaccess.h>
34 #include <xen/gnttab.h>
35 #include <xen/interface/memory.h>
36 #include <asm/gnttab_dma.h>
38 #define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
44 * Used to do a quick range check in unmap_single and
45 * sync_single_*, to see if the memory was in fact allocated by this
48 static char *io_tlb_start, *io_tlb_end;
51 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
52 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
54 static unsigned long io_tlb_nslabs;
57 * When the IOMMU overflows we return a fallback buffer. This sets the size.
59 static unsigned long io_tlb_overflow = 32*1024;
61 void *io_tlb_overflow_buffer;
64 * This is a free list describing the number of free entries available from
67 static unsigned int *io_tlb_list;
68 static unsigned int io_tlb_index;
71 * We need to save away the original address corresponding to a mapped entry
72 * for the sync operations.
74 static phys_addr_t *io_tlb_orig_addr;
77 * Protect the above data structures in the map and unmap calls
79 static DEFINE_SPINLOCK(io_tlb_lock);
81 static unsigned int dma_bits;
82 static unsigned int __initdata max_dma_bits = 32;
84 setup_dma_bits(char *str)
86 max_dma_bits = simple_strtoul(str, NULL, 0);
89 __setup("dma_bits=", setup_dma_bits);
92 setup_io_tlb_npages(char *str)
94 /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
96 io_tlb_nslabs = simple_strtoul(str, &str, 0) <<
98 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
103 * NB. 'force' enables the swiotlb, but doesn't force its use for
104 * every DMA like it does on native Linux. 'off' forcibly disables
105 * use of the swiotlb.
107 if (!strcmp(str, "force"))
109 else if (!strcmp(str, "off"))
114 __setup("swiotlb=", setup_io_tlb_npages);
115 /* make io_tlb_overflow tunable too? */
117 /* Note that this doesn't work with highmem page */
118 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
119 volatile void *address)
121 return phys_to_dma(hwdev, virt_to_phys(address));
124 void swiotlb_print_info(void)
126 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
128 printk(KERN_INFO "Software IO TLB enabled: \n"
129 " Aperture: %lu megabytes\n"
130 " Address size: %u bits\n"
131 " Kernel range: %p - %p\n",
132 bytes >> 20, dma_bits,
133 io_tlb_start, io_tlb_end);
137 * Statically reserve bounce buffer space and initialize bounce buffer data
138 * structures for the software IO TLB used to implement the PCI DMA API.
141 swiotlb_init_with_default_size(size_t default_size, int verbose)
143 unsigned long i, bytes;
146 if (!io_tlb_nslabs) {
147 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
148 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
151 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
154 * Get IO TLB memory from the low pages
156 io_tlb_start = alloc_bootmem_pages(bytes);
158 panic("Cannot allocate SWIOTLB buffer!\n");
159 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
160 for (i = 0; i < io_tlb_nslabs; i += IO_TLB_SEGSIZE) {
162 rc = xen_create_contiguous_region(
163 (unsigned long)io_tlb_start + (i << IO_TLB_SHIFT),
164 get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
166 } while (rc && dma_bits++ < max_dma_bits);
169 panic("No suitable physical memory available for SWIOTLB buffer!\n"
170 "Use dom0_mem Xen boot parameter to reserve\n"
171 "some DMA memory (e.g., dom0_mem=-128M).\n");
174 free_bootmem(__pa(io_tlb_start + i), bytes - i);
176 for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
177 unsigned int bits = fls64(virt_to_bus(io_tlb_start + i - 1));
185 io_tlb_end = io_tlb_start + bytes;
188 * Allocate and initialize the free list array. This array is used
189 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
191 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
192 for (i = 0; i < io_tlb_nslabs; i++)
193 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
195 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
198 * Get the overflow emergency buffer
200 io_tlb_overflow_buffer = alloc_bootmem(io_tlb_overflow);
201 if (!io_tlb_overflow_buffer)
202 panic("Cannot allocate SWIOTLB overflow buffer!\n");
205 rc = xen_create_contiguous_region(
206 (unsigned long)io_tlb_overflow_buffer,
207 get_order(io_tlb_overflow),
209 } while (rc && dma_bits++ < max_dma_bits);
211 panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
213 swiotlb_print_info();
217 swiotlb_init(int verbose)
219 unsigned long ram_end;
220 size_t defsz = 64 << 20; /* 64MB default size */
222 if (swiotlb_force == 1) {
224 } else if ((swiotlb_force != -1) &&
225 is_running_on_xen() &&
226 is_initial_xendomain()) {
227 /* Domain 0 always has a swiotlb. */
228 ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
229 if (ram_end <= 0x1ffff)
230 defsz = 2 << 20; /* 2MB on <512MB systems. */
231 else if (ram_end <= 0x3ffff)
232 defsz = 4 << 20; /* 4MB on <1GB systems. */
233 else if (ram_end <= 0x7ffff)
234 defsz = 8 << 20; /* 8MB on <2GB systems. */
239 swiotlb_init_with_default_size(defsz, verbose);
241 printk(KERN_INFO "Software IO TLB disabled\n");
244 static inline int range_needs_mapping(phys_addr_t pa, size_t size)
246 return range_straddles_page_boundary(pa, size);
249 static int is_swiotlb_buffer(dma_addr_t addr)
251 unsigned long pfn = mfn_to_local_pfn(PFN_DOWN(addr));
252 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
254 return paddr >= virt_to_phys(io_tlb_start) &&
255 paddr < virt_to_phys(io_tlb_end);
259 * Bounce: copy the swiotlb buffer back to the original dma location
261 * We use __copy_to_user_inatomic to transfer to the host buffer because the
262 * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
263 * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
264 * unnecessary copy from the aperture to the host buffer, and a page fault.
266 static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
267 enum dma_data_direction dir)
269 unsigned long pfn = PFN_DOWN(phys);
271 if (PageHighMem(pfn_to_page(pfn))) {
272 /* The buffer does not have a mapping. Map it in and copy */
273 unsigned int offset = phys & ~PAGE_MASK;
279 sz = min_t(size_t, PAGE_SIZE - offset, size);
281 local_irq_save(flags);
282 buffer = kmap_atomic(pfn_to_page(pfn),
284 if (dir == DMA_TO_DEVICE)
285 memcpy(dma_addr, buffer + offset, sz);
286 else if (__copy_to_user_inatomic(buffer + offset,
289 kunmap_atomic(buffer, KM_BOUNCE_READ);
290 local_irq_restore(flags);
298 if (dir == DMA_TO_DEVICE)
299 memcpy(dma_addr, phys_to_virt(phys), size);
300 else if (__copy_to_user_inatomic(phys_to_virt(phys),
307 * Allocates bounce buffer and returns its kernel virtual address.
310 map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
314 unsigned int nslots, stride, index, wrap;
317 unsigned long offset_slots;
318 unsigned long max_slots;
320 mask = dma_get_seg_boundary(hwdev);
321 offset_slots = -IO_TLB_SEGSIZE;
324 * Carefully handle integer overflow which can occur when mask == ~0UL.
327 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
328 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
331 * For mappings greater than a page, we limit the stride (and
332 * hence alignment) to a page size.
334 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
335 if (size > PAGE_SIZE)
336 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
343 * Find suitable number of IO TLB entries size that will fit this
344 * request and allocate a buffer from that IO TLB pool.
346 spin_lock_irqsave(&io_tlb_lock, flags);
347 index = ALIGN(io_tlb_index, stride);
348 if (index >= io_tlb_nslabs)
353 while (iommu_is_span_boundary(index, nslots, offset_slots,
356 if (index >= io_tlb_nslabs)
363 * If we find a slot that indicates we have 'nslots' number of
364 * contiguous buffers, we allocate the buffers from that slot
365 * and mark the entries as '0' indicating unavailable.
367 if (io_tlb_list[index] >= nslots) {
370 for (i = index; i < (int) (index + nslots); i++)
372 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
373 io_tlb_list[i] = ++count;
374 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
377 * Update the indices to avoid searching in the next
380 io_tlb_index = ((index + nslots) < io_tlb_nslabs
381 ? (index + nslots) : 0);
386 if (index >= io_tlb_nslabs)
388 } while (index != wrap);
391 spin_unlock_irqrestore(&io_tlb_lock, flags);
394 spin_unlock_irqrestore(&io_tlb_lock, flags);
397 * Save away the mapping from the original address to the DMA address.
398 * This is needed when we sync the memory. Then we sync the buffer if
401 for (i = 0; i < nslots; i++)
402 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
403 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
404 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
410 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
413 do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
416 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
417 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
418 phys_addr_t phys = io_tlb_orig_addr[index];
421 * First, sync the memory before unmapping the entry
423 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
424 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
427 * Return the buffer to the free list by setting the corresponding
428 * entries to indicate the number of contiguous entries available.
429 * While returning the entries to the free list, we merge the entries
430 * with slots below and above the pool being returned.
432 spin_lock_irqsave(&io_tlb_lock, flags);
434 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
435 io_tlb_list[index + nslots] : 0);
437 * Step 1: return the slots to the free list, merging the
438 * slots with superceeding slots
440 for (i = index + nslots - 1; i >= index; i--)
441 io_tlb_list[i] = ++count;
443 * Step 2: merge the returned slots with the preceding slots,
444 * if available (non zero)
447 (OFFSET(i, IO_TLB_SEGSIZE) !=
448 IO_TLB_SEGSIZE -1) && io_tlb_list[i];
450 io_tlb_list[i] = ++count;
452 spin_unlock_irqrestore(&io_tlb_lock, flags);
456 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
458 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
459 phys_addr_t phys = io_tlb_orig_addr[index];
461 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
463 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
464 swiotlb_bounce(phys, dma_addr, size, dir);
468 swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
471 * Ran out of IOMMU space for this operation. This is very bad.
472 * Unfortunately the drivers cannot handle this operation properly.
473 * unless they check for pci_dma_mapping_error (most don't)
474 * When the mapping is small enough return a static buffer to limit
475 * the damage, or panic when the transfer is too big.
477 printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %zu bytes at "
478 "device %s\n", size, dev ? dev_name(dev) : "?");
480 if (size <= io_tlb_overflow || !do_panic)
483 if (dir == DMA_BIDIRECTIONAL)
484 panic("DMA: Random memory could be DMA accessed\n");
485 if (dir == DMA_FROM_DEVICE)
486 panic("DMA: Random memory could be DMA written\n");
487 if (dir == DMA_TO_DEVICE)
488 panic("DMA: Random memory could be DMA read\n");
492 * Map a single buffer of the indicated size for DMA in streaming mode. The
493 * PCI address to use is returned.
495 * Once the device is given the dma address, the device owns this memory until
496 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
498 dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
499 unsigned long offset, size_t size,
500 enum dma_data_direction dir,
501 struct dma_attrs *attrs)
503 phys_addr_t phys = page_to_pseudophys(page) + offset;
504 dma_addr_t dev_addr = gnttab_dma_map_page(page) + offset;
507 BUG_ON(dir == DMA_NONE);
510 * If the address happens to be in the device's DMA window,
511 * we can safely return the device addr and not worry about bounce
514 if (dma_capable(dev, dev_addr, size) &&
515 !range_needs_mapping(phys, size))
519 * Oh well, have to allocate and map a bounce buffer.
521 gnttab_dma_unmap_page(dev_addr);
522 map = map_single(dev, phys, size, dir);
524 swiotlb_full(dev, size, dir, 1);
525 map = io_tlb_overflow_buffer;
528 dev_addr = swiotlb_virt_to_bus(dev, map);
531 EXPORT_SYMBOL_GPL(swiotlb_map_page);
534 * Unmap a single streaming mode DMA translation. The dma_addr and size must
535 * match what was provided for in a previous swiotlb_map_page call. All
536 * other usages are undefined.
538 * After this call, reads by the cpu to the buffer are guaranteed to see
539 * whatever the device wrote there.
541 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
542 size_t size, int dir)
544 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
546 BUG_ON(dir == DMA_NONE);
548 if (is_swiotlb_buffer(dev_addr)) {
549 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
553 gnttab_dma_unmap_page(dev_addr);
556 void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
557 size_t size, enum dma_data_direction dir,
558 struct dma_attrs *attrs)
560 unmap_single(hwdev, dev_addr, size, dir);
562 EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
565 * Make physical memory consistent for a single streaming mode DMA translation
568 * If you perform a swiotlb_map_page() but wish to interrogate the buffer
569 * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
570 * call this function before doing so. At the next point you give the PCI dma
571 * address back to the card, you must first perform a
572 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
575 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
576 size_t size, enum dma_data_direction dir)
578 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
580 BUG_ON(dir == DMA_NONE);
582 if (is_swiotlb_buffer(dev_addr))
583 sync_single(hwdev, phys_to_virt(paddr), size, dir);
585 EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
588 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
589 size_t size, enum dma_data_direction dir)
591 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
593 BUG_ON(dir == DMA_NONE);
595 if (is_swiotlb_buffer(dev_addr))
596 sync_single(hwdev, phys_to_virt(paddr), size, dir);
598 EXPORT_SYMBOL(swiotlb_sync_single_for_device);
601 swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
602 unsigned long offset, size_t size,
603 enum dma_data_direction dir)
605 swiotlb_sync_single_for_cpu(hwdev, dev_addr + offset, size, dir);
607 EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
610 swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
611 unsigned long offset, size_t size,
612 enum dma_data_direction dir)
614 swiotlb_sync_single_for_device(hwdev, dev_addr + offset, size, dir);
616 EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
619 * Map a set of buffers described by scatterlist in streaming mode for DMA.
620 * This is the scatter-gather version of the above swiotlb_map_page
621 * interface. Here the scatter gather list elements are each tagged with the
622 * appropriate dma address and length. They are obtained via
623 * sg_dma_{address,length}(SG).
625 * NOTE: An implementation may be able to use a smaller number of
626 * DMA address/length pairs than there are SG table elements.
627 * (for example via virtual mapping capabilities)
628 * The routine returns the number of addr/length pairs actually
629 * used, at most nents.
631 * Device ownership issues as mentioned above for swiotlb_map_page are the
635 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
636 enum dma_data_direction dir, struct dma_attrs *attrs)
638 struct scatterlist *sg;
641 BUG_ON(dir == DMA_NONE);
643 for_each_sg(sgl, sg, nelems, i) {
644 dma_addr_t dev_addr = gnttab_dma_map_page(sg_page(sg))
646 phys_addr_t paddr = page_to_pseudophys(sg_page(sg))
649 if (range_needs_mapping(paddr, sg->length) ||
650 !dma_capable(hwdev, dev_addr, sg->length)) {
653 gnttab_dma_unmap_page(dev_addr);
654 map = map_single(hwdev, paddr,
657 /* Don't panic here, we expect map_sg users
658 to do proper error handling. */
659 swiotlb_full(hwdev, sg->length, dir, 0);
660 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
662 sgl[0].dma_length = 0;
665 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
667 sg->dma_address = dev_addr;
668 sg->dma_length = sg->length;
672 EXPORT_SYMBOL(swiotlb_map_sg_attrs);
675 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
678 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
680 EXPORT_SYMBOL(swiotlb_map_sg);
683 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
684 * concerning calls here are the same as for swiotlb_unmap_page() above.
687 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
688 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
690 struct scatterlist *sg;
693 BUG_ON(dir == DMA_NONE);
695 for_each_sg(sgl, sg, nelems, i)
696 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
699 EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
702 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
705 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
707 EXPORT_SYMBOL(swiotlb_unmap_sg);
710 * Make physical memory consistent for a set of streaming mode DMA translations
713 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
717 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sgl,
718 int nelems, enum dma_data_direction dir)
720 struct scatterlist *sg;
723 for_each_sg(sgl, sg, nelems, i)
724 swiotlb_sync_single_for_cpu(hwdev, sg->dma_address,
725 sg->dma_length, dir);
727 EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
730 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sgl,
731 int nelems, enum dma_data_direction dir)
733 struct scatterlist *sg;
736 for_each_sg(sgl, sg, nelems, i)
737 swiotlb_sync_single_for_device(hwdev, sg->dma_address,
738 sg->dma_length, dir);
740 EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
743 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
745 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
747 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
750 * Return whether the given PCI device DMA address mask can be supported
751 * properly. For example, if your device can only drive the low 24-bits
752 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
756 swiotlb_dma_supported (struct device *hwdev, u64 mask)
758 return (mask >= ((1UL << dma_bits) - 1));
760 EXPORT_SYMBOL(swiotlb_dma_supported);