2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
8 * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
9 * a description of how these routines should be used.
12 #include <linux/module.h>
13 #include <asm/sn/pci/pci_bus_cvlink.h>
18 pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
19 void free_pciio_dmamap(pcibr_dmamap_t);
20 static struct pcibr_dmamap_s *find_sn_dma_map(dma_addr_t, unsigned char);
21 void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
26 extern vertex_hdl_t busnum_to_pcibr_vhdl[];
27 extern nasid_t busnum_to_nid[];
28 extern void * busnum_to_atedmamaps[];
31 * get_free_pciio_dmamap - find and allocate an ATE
32 * @pci_bus: PCI bus to get an entry for
34 * Finds and allocates an ATE on the PCI bus specified
38 get_free_pciio_dmamap(vertex_hdl_t pci_bus)
41 struct pcibr_dmamap_s *sn_dma_map = NULL;
44 * Darn, we need to get the maps allocated for this bus.
46 for (i = 0; i < MAX_PCI_XWIDGET; i++) {
47 if (busnum_to_pcibr_vhdl[i] == pci_bus) {
48 sn_dma_map = busnum_to_atedmamaps[i];
53 * Now get a free dmamap entry from this list.
55 for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
56 if (!sn_dma_map->bd_dma_addr) {
57 sn_dma_map->bd_dma_addr = -1;
58 return( (pciio_dmamap_t) sn_dma_map );
66 * free_pciio_dmamap - free an ATE
67 * @dma_map: ATE to free
69 * Frees the ATE specified by @dma_map.
72 free_pciio_dmamap(pcibr_dmamap_t dma_map)
74 dma_map->bd_dma_addr = 0;
78 * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum
79 * @dma_addr: DMA address to look for
80 * @busnum: PCI bus to look on
82 * Finds the ATE associated with @dma_addr and @busnum.
84 static struct pcibr_dmamap_s *
85 find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
88 struct pcibr_dmamap_s *sn_dma_map = NULL;
91 sn_dma_map = busnum_to_atedmamaps[busnum];
93 for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
94 if (sn_dma_map->bd_dma_addr == dma_addr) {
103 * sn_pci_alloc_consistent - allocate memory for coherent DMA
104 * @hwdev: device to allocate for
105 * @size: size of the region
106 * @dma_handle: DMA (bus) address
108 * pci_alloc_consistent() returns a pointer to a memory region suitable for
109 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
110 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
112 * This interface is usually used for "command" streams (e.g. the command
113 * queue for a SCSI controller). See Documentation/DMA-mapping.txt for
116 * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
119 sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
123 struct sn_device_sysdata *device_sysdata;
124 unsigned long phys_addr;
125 pcibr_dmamap_t dma_map = 0;
128 * Get hwgraph vertex for the device
130 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
131 vhdl = device_sysdata->vhdl;
134 * Allocate the memory.
135 * FIXME: We should be doing alloc_pages_node for the node closest
138 if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
141 memset(cpuaddr, 0x0, size);
143 /* physical addr. of the memory we just got */
144 phys_addr = __pa(cpuaddr);
147 * 64 bit address translations should never fail.
148 * 32 bit translations can fail if there are insufficient mapping
149 * resources and the direct map is already wired to a different
151 * 32 bit translations can also return a > 32 bit address, because
152 * pcibr_dmatrans_addr ignores a missing PCIIO_DMA_A64 flag on
155 if (hwdev->consistent_dma_mask == ~0UL)
156 *dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
157 PCIIO_DMA_CMD | PCIIO_DMA_A64);
159 dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_CMD |
160 MINIMAL_ATE_FLAG(phys_addr, size));
162 *dma_handle = (dma_addr_t)
163 pcibr_dmamap_addr(dma_map, phys_addr, size);
164 dma_map->bd_dma_addr = *dma_handle;
167 *dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
172 if (!*dma_handle || *dma_handle > hwdev->consistent_dma_mask) {
174 pcibr_dmamap_done(dma_map);
175 pcibr_dmamap_free(dma_map);
177 free_pages((unsigned long) cpuaddr, get_order(size));
185 * sn_pci_free_consistent - free memory associated with coherent DMAable region
186 * @hwdev: device to free for
187 * @size: size to free
188 * @vaddr: kernel virtual address to free
189 * @dma_handle: DMA address associated with this region
191 * Frees the memory allocated by pci_alloc_consistent(). Also known
192 * as platform_pci_free_consistent() by the IA64 machvec code.
195 sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
197 struct pcibr_dmamap_s *dma_map = NULL;
200 * Get the sn_dma_map entry.
202 if (IS_PCI32_MAPPED(dma_handle))
203 dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
206 * and free it if necessary...
209 pcibr_dmamap_done(dma_map);
210 pcibr_dmamap_free(dma_map);
212 free_pages((unsigned long) vaddr, get_order(size));
216 * sn_pci_map_sg - map a scatter-gather list for DMA
217 * @hwdev: device to map for
218 * @sg: scatterlist to map
219 * @nents: number of entries
220 * @direction: direction of the DMA transaction
222 * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
226 sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
231 unsigned long phys_addr;
232 struct sn_device_sysdata *device_sysdata;
233 pcibr_dmamap_t dma_map;
234 struct scatterlist *saved_sg = sg;
236 /* can't go anywhere w/o a direction in life */
237 if (direction == PCI_DMA_NONE)
241 * Get the hwgraph vertex for the device
243 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
244 vhdl = device_sysdata->vhdl;
247 * Setup a DMA address for each entry in the
250 for (i = 0; i < nents; i++, sg++) {
251 phys_addr = __pa((unsigned long)page_address(sg->page) + sg->offset);
254 * Handle 32-63 bit cards via direct mapping
256 if (IS_PCI32G(hwdev)) {
257 sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
258 sg->length, PCIIO_DMA_DATA);
259 sg->dma_length = sg->length;
261 * See if we got a direct map entry
263 if (sg->dma_address) {
270 * It is a 32 bit card and we cannot do direct mapping,
273 dma_map = pcibr_dmamap_alloc(vhdl, NULL, sg->length, PCIIO_DMA_DATA);
275 printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
276 "anymore 32 bit page map entries.\n");
278 * We will need to free all previously allocated entries.
281 sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
286 sg->dma_address = pcibr_dmamap_addr(dma_map, phys_addr, sg->length);
287 sg->dma_length = sg->length;
288 dma_map->bd_dma_addr = sg->dma_address;
296 * sn_pci_unmap_sg - unmap a scatter-gather list
297 * @hwdev: device to unmap
298 * @sg: scatterlist to unmap
299 * @nents: number of scatterlist entries
300 * @direction: DMA direction
302 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
303 * concerning calls here are the same as for pci_unmap_single() below. Also
304 * known as sn_pci_unmap_sg() by the IA64 machvec code.
307 sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
310 struct pcibr_dmamap_s *dma_map;
312 /* can't go anywhere w/o a direction in life */
313 if (direction == PCI_DMA_NONE)
316 for (i = 0; i < nents; i++, sg++){
318 if (IS_PCI32_MAPPED(sg->dma_address)) {
319 dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
321 pcibr_dmamap_done(dma_map);
322 pcibr_dmamap_free(dma_map);
326 sg->dma_address = (dma_addr_t)NULL;
332 * sn_pci_map_single - map a single region for DMA
333 * @hwdev: device to map for
334 * @ptr: kernel virtual address of the region to map
335 * @size: size of the region
336 * @direction: DMA direction
338 * Map the region pointed to by @ptr for DMA and return the
339 * DMA address. Also known as platform_pci_map_single() by
340 * the IA64 machvec code.
342 * We map this to the one step pcibr_dmamap_trans interface rather than
343 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
344 * no way of saving the dmamap handle from the alloc to later free
345 * (which is pretty much unacceptable).
347 * TODO: simplify our interface;
348 * get rid of dev_desc and vhdl (seems redundant given a pci_dev);
349 * figure out how to save dmamap handle so can use two step.
352 sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
356 unsigned long phys_addr;
357 struct sn_device_sysdata *device_sysdata;
358 pcibr_dmamap_t dma_map = NULL;
360 if (direction == PCI_DMA_NONE)
363 /* SN cannot support DMA addresses smaller than 32 bits. */
364 if (IS_PCI32L(hwdev))
368 * find vertex for the device
370 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
371 vhdl = device_sysdata->vhdl;
374 * Call our dmamap interface
377 phys_addr = __pa(ptr);
380 * Devices that support 32 bit to 63 bit DMA addresses get
381 * 32 bit DMA addresses.
383 * First try to get a 32 bit direct map register.
385 if (IS_PCI32G(hwdev)) {
386 dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
393 * It's a 32 bit card and we cannot do direct mapping so
394 * let's use the PMU instead.
397 dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA |
398 MINIMAL_ATE_FLAG(phys_addr, size));
401 printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
402 "32 bit page map entries.\n");
406 dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size);
407 dma_map->bd_dma_addr = dma_addr;
409 return ((dma_addr_t)dma_addr);
413 * sn_pci_unmap_single - unmap a region used for DMA
414 * @hwdev: device to unmap
415 * @dma_addr: DMA address to unmap
416 * @size: size of region
417 * @direction: DMA direction
419 * Unmaps the region pointed to by @dma_addr. Also known as
420 * platform_pci_unmap_single() by the IA64 machvec code.
423 sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
425 struct pcibr_dmamap_s *dma_map = NULL;
427 if (direction == PCI_DMA_NONE)
431 * Get the sn_dma_map entry.
433 if (IS_PCI32_MAPPED(dma_addr))
434 dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
437 * and free it if necessary...
440 pcibr_dmamap_done(dma_map);
441 pcibr_dmamap_free(dma_map);
446 * sn_pci_dma_sync_single - make sure all DMAs have completed
447 * @hwdev: device to sync
448 * @dma_handle: DMA address to sync
449 * @size: size of region
450 * @direction: DMA direction
452 * This routine is supposed to sync the DMA region specified
453 * by @dma_handle into the 'coherence domain'. We do not need to do
454 * anything on our platform.
457 sn_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
464 * sn_pci_dma_sync_sg - make sure all DMAs have completed
465 * @hwdev: device to sync
466 * @sg: scatterlist to sync
467 * @nents: number of entries in the scatterlist
468 * @direction: DMA direction
470 * This routine is supposed to sync the DMA regions specified
471 * by @sg into the 'coherence domain'. We do not need to do anything
475 sn_pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
482 * sn_dma_supported - test a DMA mask
483 * @hwdev: device to test
484 * @mask: DMA mask to test
486 * Return whether the given PCI device DMA address mask can be supported
487 * properly. For example, if your device can only drive the low 24-bits
488 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
489 * this function. Of course, SN only supports devices that have 32 or more
490 * address bits when using the PMU. We could theoretically support <32 bit
491 * cards using direct mapping, but we'll worry about that later--on the off
492 * chance that someone actually wants to use such a card.
495 sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
497 if (mask < 0xffffffff)
503 * New generic DMA routines just wrap sn2 PCI routines until we
504 * support other bus types (if ever).
508 sn_dma_supported(struct device *dev, u64 mask)
510 BUG_ON(dev->bus != &pci_bus_type);
512 return sn_pci_dma_supported(to_pci_dev(dev), mask);
514 EXPORT_SYMBOL(sn_dma_supported);
517 sn_dma_set_mask(struct device *dev, u64 dma_mask)
519 BUG_ON(dev->bus != &pci_bus_type);
521 if (!sn_dma_supported(dev, dma_mask))
524 *dev->dma_mask = dma_mask;
527 EXPORT_SYMBOL(sn_dma_set_mask);
530 sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
533 BUG_ON(dev->bus != &pci_bus_type);
535 return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
537 EXPORT_SYMBOL(sn_dma_alloc_coherent);
540 sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
541 dma_addr_t dma_handle)
543 BUG_ON(dev->bus != &pci_bus_type);
545 sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
547 EXPORT_SYMBOL(sn_dma_free_coherent);
550 sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
553 BUG_ON(dev->bus != &pci_bus_type);
555 return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
557 EXPORT_SYMBOL(sn_dma_map_single);
560 sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
563 BUG_ON(dev->bus != &pci_bus_type);
565 sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
567 EXPORT_SYMBOL(sn_dma_unmap_single);
570 sn_dma_map_page(struct device *dev, struct page *page,
571 unsigned long offset, size_t size,
574 BUG_ON(dev->bus != &pci_bus_type);
576 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
578 EXPORT_SYMBOL(sn_dma_map_page);
581 sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
584 BUG_ON(dev->bus != &pci_bus_type);
586 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
588 EXPORT_SYMBOL(sn_dma_unmap_page);
591 sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
594 BUG_ON(dev->bus != &pci_bus_type);
596 return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
598 EXPORT_SYMBOL(sn_dma_map_sg);
601 sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
604 BUG_ON(dev->bus != &pci_bus_type);
606 sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
608 EXPORT_SYMBOL(sn_dma_unmap_sg);
611 sn_dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
614 BUG_ON(dev->bus != &pci_bus_type);
616 sn_pci_dma_sync_single(to_pci_dev(dev), dma_handle, size, (int)direction);
618 EXPORT_SYMBOL(sn_dma_sync_single);
621 sn_dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
624 BUG_ON(dev->bus != &pci_bus_type);
626 sn_pci_dma_sync_sg(to_pci_dev(dev), sg, nelems, (int)direction);
628 EXPORT_SYMBOL(sn_dma_sync_sg);
630 EXPORT_SYMBOL(sn_pci_unmap_single);
631 EXPORT_SYMBOL(sn_pci_map_single);
632 EXPORT_SYMBOL(sn_pci_dma_sync_single);
633 EXPORT_SYMBOL(sn_pci_map_sg);
634 EXPORT_SYMBOL(sn_pci_unmap_sg);
635 EXPORT_SYMBOL(sn_pci_alloc_consistent);
636 EXPORT_SYMBOL(sn_pci_free_consistent);
637 EXPORT_SYMBOL(sn_pci_dma_supported);