1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
6 #include <linux/kmemleak.h>
10 #include <asm/iommu.h>
12 #include <asm/calgary.h>
13 #include <asm/amd_iommu.h>
15 static int forbid_dac __read_mostly;
17 struct dma_map_ops *dma_ops;
18 EXPORT_SYMBOL(dma_ops);
20 static int iommu_sac_force __read_mostly;
22 #ifdef CONFIG_IOMMU_DEBUG
23 int panic_on_overflow __read_mostly = 1;
24 int force_iommu __read_mostly = 1;
26 int panic_on_overflow __read_mostly = 0;
27 int force_iommu __read_mostly = 0;
30 int iommu_merge __read_mostly = 0;
32 int no_iommu __read_mostly;
33 /* Set this to 1 if there is a HW IOMMU in the system */
34 int iommu_detected __read_mostly = 0;
37 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
38 * If this variable is 1, IOMMU implementations do no DMA translation for
39 * devices and allow every device to access to whole physical memory. This is
40 * useful if a user want to use an IOMMU only for KVM device assignment to
41 * guests and not for driver dma translation.
43 int iommu_pass_through __read_mostly;
45 dma_addr_t bad_dma_address __read_mostly = 0;
46 EXPORT_SYMBOL(bad_dma_address);
48 /* Dummy device used for NULL arguments (normally ISA). */
49 struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device",
51 .coherent_dma_mask = ISA_DMA_BIT_MASK,
52 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
54 EXPORT_SYMBOL(x86_dma_fallback_dev);
56 /* Number of entries preallocated for DMA-API debugging */
57 #define PREALLOC_DMA_DEBUG_ENTRIES 32768
59 int dma_set_mask(struct device *dev, u64 mask)
61 if (!dev->dma_mask || !dma_supported(dev, mask))
64 *dev->dma_mask = mask;
68 EXPORT_SYMBOL(dma_set_mask);
70 #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
71 static __initdata void *dma32_bootmem_ptr;
72 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
74 static int __init parse_dma32_size_opt(char *p)
78 dma32_bootmem_size = memparse(p, &p);
81 early_param("dma32_size", parse_dma32_size_opt);
83 void __init dma32_reserve_bootmem(void)
85 unsigned long size, align;
86 if (max_pfn <= MAX_DMA32_PFN)
90 * check aperture_64.c allocate_aperture() for reason about
94 size = roundup(dma32_bootmem_size, align);
95 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
98 * Kmemleak should not scan this block as it may not be mapped via the
99 * kernel direct mapping.
101 kmemleak_ignore(dma32_bootmem_ptr);
102 if (dma32_bootmem_ptr)
103 dma32_bootmem_size = size;
105 dma32_bootmem_size = 0;
107 static void __init dma32_free_bootmem(void)
110 if (max_pfn <= MAX_DMA32_PFN)
113 if (!dma32_bootmem_ptr)
116 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
118 dma32_bootmem_ptr = NULL;
119 dma32_bootmem_size = 0;
123 static struct dma_map_ops swiotlb_dma_ops = {
124 .alloc_coherent = dma_generic_alloc_coherent,
125 .free_coherent = dma_generic_free_coherent,
126 .mapping_error = swiotlb_dma_mapping_error,
127 .map_page = swiotlb_map_page,
128 .unmap_page = swiotlb_unmap_page,
129 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
130 .sync_single_for_device = swiotlb_sync_single_for_device,
131 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
132 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
133 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
134 .sync_sg_for_device = swiotlb_sync_sg_for_device,
135 .map_sg = swiotlb_map_sg_attrs,
136 .unmap_sg = swiotlb_unmap_sg_attrs,
137 .dma_supported = swiotlb_dma_supported
140 void __init pci_iommu_alloc(void)
142 #if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
143 /* free the range so iommu could get some range less than 4G */
144 dma32_free_bootmem();
148 * The order of these functions is important for
149 * fall-back/fail-over reasons
151 gart_iommu_hole_init();
155 detect_intel_iommu();
161 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
162 dma_ops = &swiotlb_dma_ops;
166 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
167 dma_addr_t *dma_addr, gfp_t flag)
169 unsigned long dma_mask;
176 unsigned int order = get_order(size);
178 dma_mask = dma_alloc_coherent_mask(dev, flag);
184 flag &= ~(__GFP_DMA | __GFP_DMA32);
186 page = alloc_pages_node(dev_to_node(dev), flag, order);
191 addr = page_to_phys(page);
192 if (addr + size > dma_mask) {
193 __free_pages(page, order);
195 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
196 flag = (flag & ~GFP_DMA32) | GFP_DMA;
204 return page_address(page);
206 memory = page_address(page);
207 if (xen_create_contiguous_region((unsigned long)memory, order,
209 __free_pages(page, order);
213 *dma_addr = virt_to_bus(memory);
214 return memset(memory, 0, size);
219 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
222 unsigned int order = get_order(size);
223 unsigned long va = (unsigned long)vaddr;
225 xen_destroy_contiguous_region(va, order);
226 free_pages(va, order);
231 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
234 static __init int iommu_setup(char *p)
242 if (!strncmp(p, "off", 3))
244 /* gart_parse_options has more force support */
245 if (!strncmp(p, "force", 5))
247 if (!strncmp(p, "noforce", 7)) {
252 if (!strncmp(p, "biomerge", 8)) {
256 if (!strncmp(p, "panic", 5))
257 panic_on_overflow = 1;
258 if (!strncmp(p, "nopanic", 7))
259 panic_on_overflow = 0;
260 if (!strncmp(p, "merge", 5)) {
264 if (!strncmp(p, "nomerge", 7))
266 if (!strncmp(p, "forcesac", 8))
268 if (!strncmp(p, "allowdac", 8))
270 if (!strncmp(p, "nodac", 5))
272 if (!strncmp(p, "usedac", 6)) {
276 #ifdef CONFIG_SWIOTLB
277 if (!strncmp(p, "soft", 4))
280 if (!strncmp(p, "pt", 2))
281 iommu_pass_through = 1;
283 gart_parse_options(p);
285 #ifdef CONFIG_CALGARY_IOMMU
286 if (!strncmp(p, "calgary", 7))
288 #endif /* CONFIG_CALGARY_IOMMU */
290 p += strcspn(p, ",");
296 early_param("iommu", iommu_setup);
298 static int check_pages_physically_contiguous(unsigned long pfn,
302 unsigned long next_mfn;
306 next_mfn = pfn_to_mfn(pfn);
307 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
309 for (i = 1; i < nr_pages; i++) {
310 if (pfn_to_mfn(++pfn) != ++next_mfn)
316 int range_straddles_page_boundary(paddr_t p, size_t size)
318 unsigned long pfn = p >> PAGE_SHIFT;
319 unsigned int offset = p & ~PAGE_MASK;
321 return ((offset + size > PAGE_SIZE) &&
322 !check_pages_physically_contiguous(pfn, offset, size));
325 int dma_supported(struct device *dev, u64 mask)
327 struct dma_map_ops *ops = get_dma_ops(dev);
330 if (mask > 0xffffffff && forbid_dac > 0) {
331 dev_info(dev, "PCI: Disallowing DAC for device\n");
336 if (ops->dma_supported)
337 return ops->dma_supported(dev, mask);
339 /* Copied from i386. Doesn't make much sense, because it will
340 only work for pci_alloc_coherent.
341 The caller just has to use GFP_DMA in this case. */
342 if (mask < DMA_BIT_MASK(24))
345 /* Tell the device to use SAC when IOMMU force is on. This
346 allows the driver to use cheaper accesses in some cases.
348 Problem with this is that if we overflow the IOMMU area and
349 return DAC as fallback address the device may not handle it
352 As a special case some controllers have a 39bit address
353 mode that is as efficient as 32bit (aic79xx). Don't force
354 SAC for these. Assume all masks <= 40 bits are of this
355 type. Normally this doesn't make any difference, but gives
356 more gentle handling of IOMMU overflow. */
357 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
358 dev_info(dev, "Force SAC with mask %Lx\n", mask);
364 EXPORT_SYMBOL(dma_supported);
366 static int __init pci_iommu_init(void)
368 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
371 dma_debug_add_bus(&pci_bus_type);
374 calgary_iommu_init();
386 void pci_iommu_shutdown(void)
388 gart_iommu_shutdown();
390 amd_iommu_shutdown();
392 /* Must execute after PCI subsystem */
393 rootfs_initcall(pci_iommu_init);
396 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
398 static __devinit void via_no_dac(struct pci_dev *dev)
400 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
401 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
405 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
408 * MCP51 PCI bridge corrupts data for DAC. Disable it. Reported in
411 static __devinit void mcp51_no_dac(struct pci_dev *dev)
413 if (forbid_dac == 0) {
415 "PCI: MCP51 PCI bridge detected. Disabling DAC.\n");
419 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x026f, mcp51_no_dac);