- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / pci-nommu-xen.c
1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
4 #include <linux/pci.h>
5
6 #include <xen/gnttab.h>
7
8 #include <asm/iommu.h>
9 #include <asm/proto.h>
10 #include <asm/dma.h>
11 #include <asm/swiotlb.h>
12 #include <asm/tlbflush.h>
13 #include <asm/gnttab_dma.h>
14 #include <asm/bug.h>
15
16 #define IOMMU_BUG_ON(test)                              \
17 do {                                                    \
18         if (unlikely(test)) {                           \
19                 printk(KERN_ALERT "Fatal DMA error! "   \
20                        "Please use 'swiotlb=force'\n"); \
21                 BUG();                                  \
22         }                                               \
23 } while (0)
24
25 static int
26 gnttab_map_sg(struct device *hwdev, struct scatterlist *sgl, int nents,
27               enum dma_data_direction dir, struct dma_attrs *attrs)
28 {
29         unsigned int i;
30         struct scatterlist *sg;
31
32         WARN_ON(nents == 0 || sgl->length == 0);
33
34         for_each_sg(sgl, sg, nents, i) {
35                 BUG_ON(!sg_page(sg));
36                 sg->dma_address =
37                         gnttab_dma_map_page(sg_page(sg)) + sg->offset;
38                 sg->dma_length  = sg->length;
39                 IOMMU_BUG_ON(!dma_capable(
40                         hwdev, sg->dma_address, sg->length));
41                 IOMMU_BUG_ON(range_straddles_page_boundary(
42                         page_to_pseudophys(sg_page(sg)) + sg->offset,
43                         sg->length));
44         }
45
46         return nents;
47 }
48
49 static void
50 gnttab_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nents,
51                 enum dma_data_direction dir, struct dma_attrs *attrs)
52 {
53         unsigned int i;
54         struct scatterlist *sg;
55
56         for_each_sg(sgl, sg, nents, i)
57                 gnttab_dma_unmap_page(sg->dma_address);
58 }
59
60 static dma_addr_t
61 gnttab_map_page(struct device *dev, struct page *page, unsigned long offset,
62                 size_t size, enum dma_data_direction dir,
63                 struct dma_attrs *attrs)
64 {
65         dma_addr_t dma;
66
67         WARN_ON(size == 0);
68
69         dma = gnttab_dma_map_page(page) + offset;
70         IOMMU_BUG_ON(range_straddles_page_boundary(page_to_pseudophys(page) +
71                                                    offset, size));
72         IOMMU_BUG_ON(!dma_capable(dev, dma, size));
73
74         return dma;
75 }
76
77 static void
78 gnttab_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
79                   enum dma_data_direction dir, struct dma_attrs *attrs)
80 {
81         gnttab_dma_unmap_page(dma_addr);
82 }
83
84 static void nommu_sync_single_for_device(struct device *dev,
85                         dma_addr_t addr, size_t size,
86                         enum dma_data_direction dir)
87 {
88         flush_write_buffers();
89 }
90
91
92 static void nommu_sync_sg_for_device(struct device *dev,
93                         struct scatterlist *sg, int nelems,
94                         enum dma_data_direction dir)
95 {
96         flush_write_buffers();
97 }
98
99 static int nommu_dma_supported(struct device *hwdev, u64 mask)
100 {
101         return 1;
102 }
103
104 struct dma_map_ops nommu_dma_ops = {
105         .alloc_coherent         = dma_generic_alloc_coherent,
106         .free_coherent          = dma_generic_free_coherent,
107         .map_page               = gnttab_map_page,
108         .unmap_page             = gnttab_unmap_page,
109         .map_sg                 = gnttab_map_sg,
110         .unmap_sg               = gnttab_unmap_sg,
111         .sync_single_for_device = nommu_sync_single_for_device,
112         .sync_sg_for_device     = nommu_sync_sg_for_device,
113         .dma_supported          = nommu_dma_supported,
114 };