2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2003 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2003 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/acpi.h>
35 #include <linux/efi.h>
37 #include <asm/delay.h> /* ia64_get_itc() */
39 #include <asm/page.h> /* PAGE_OFFSET */
41 #include <asm/system.h> /* wmb() */
43 #include <asm/acpi-ext.h>
48 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
49 ** not defined, all DMA will be 32bit and go through the TLB.
51 #define ALLOW_IOV_BYPASS
54 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
55 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
56 ** disconnect on 4k boundaries and prevent such issues. If the device is
57 ** particularly agressive, this option will keep the entire pdir valid such
58 ** that prefetching will hit a valid address. This could severely impact
59 ** error containment, and is therefore off by default. The page that is
60 ** used for spill-over is poisoned, so that should help debugging somewhat.
62 #undef FULL_VALID_PDIR
64 #define ENABLE_MARK_CLEAN
67 ** The number of debug flags is a clue - this code is fragile.
71 #undef DEBUG_SBA_RUN_SG
72 #undef DEBUG_SBA_RESOURCE
73 #undef ASSERT_PDIR_SANITY
74 #undef DEBUG_LARGE_SG_ENTRIES
77 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
78 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
81 #define SBA_INLINE __inline__
82 /* #define SBA_INLINE */
85 #define DBG_INIT(x...) printk(x)
87 #define DBG_INIT(x...)
91 #define DBG_RUN(x...) printk(x)
96 #ifdef DEBUG_SBA_RUN_SG
97 #define DBG_RUN_SG(x...) printk(x)
99 #define DBG_RUN_SG(x...)
103 #ifdef DEBUG_SBA_RESOURCE
104 #define DBG_RES(x...) printk(x)
106 #define DBG_RES(x...)
110 #define DBG_BYPASS(x...) printk(x)
112 #define DBG_BYPASS(x...)
115 #ifdef ASSERT_PDIR_SANITY
116 #define ASSERT(expr) \
118 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
126 ** The number of pdir entries to "free" before issuing
127 ** a read to PCOM register to flush out PCOM writes.
128 ** Interacts with allocation granularity (ie 4 or 8 entries
129 ** allocated and free'd/purged at a time might make this
130 ** less interesting).
132 #define DELAYED_RESOURCE_CNT 16
134 #define DEFAULT_DMA_HINT_REG 0
136 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
137 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
138 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
140 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
142 #define IOC_FUNC_ID 0x000
143 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
144 #define IOC_IBASE 0x300 /* IO TLB */
145 #define IOC_IMASK 0x308
146 #define IOC_PCOM 0x310
147 #define IOC_TCNFG 0x318
148 #define IOC_PDIR_BASE 0x320
150 /* AGP GART driver looks for this */
151 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
154 ** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
155 ** It's safer (avoid memory corruption) to keep DMA page mappings
156 ** equivalently sized to VM PAGE_SIZE.
158 ** We really can't avoid generating a new mapping for each
159 ** page since the Virtual Coherence Index has to be generated
160 ** and updated for each page.
162 ** IOVP_SIZE could only be greater than PAGE_SIZE if we are
163 ** confident the drivers really only touch the next physical
164 ** page iff that driver instance owns it.
166 #define IOVP_SIZE PAGE_SIZE
167 #define IOVP_SHIFT PAGE_SHIFT
168 #define IOVP_MASK PAGE_MASK
171 void *ioc_hpa; /* I/O MMU base address */
172 char *res_map; /* resource map, bit == pdir entry */
173 u64 *pdir_base; /* physical base address */
174 unsigned long ibase; /* pdir IOV Space base */
175 unsigned long imask; /* pdir IOV Space mask */
177 unsigned long *res_hint; /* next avail IOVP - circular search */
179 unsigned long hint_mask_pdir; /* bits used for DMA hints */
180 unsigned int res_bitshift; /* from the RIGHT! */
181 unsigned int res_size; /* size of resource map in bytes */
182 unsigned int hint_shift_pdir;
183 unsigned long dma_mask;
184 #if DELAYED_RESOURCE_CNT > 0
186 struct sba_dma_pair {
189 } saved[DELAYED_RESOURCE_CNT];
192 #ifdef CONFIG_PROC_FS
193 #define SBA_SEARCH_SAMPLE 0x100
194 unsigned long avg_search[SBA_SEARCH_SAMPLE];
195 unsigned long avg_idx; /* current index into avg_search */
196 unsigned long used_pages;
197 unsigned long msingle_calls;
198 unsigned long msingle_pages;
199 unsigned long msg_calls;
200 unsigned long msg_pages;
201 unsigned long usingle_calls;
202 unsigned long usingle_pages;
203 unsigned long usg_calls;
204 unsigned long usg_pages;
205 #ifdef ALLOW_IOV_BYPASS
206 unsigned long msingle_bypass;
207 unsigned long usingle_bypass;
208 unsigned long msg_bypass;
212 /* Stuff we don't need in performance path */
213 struct ioc *next; /* list of IOC's in system */
214 acpi_handle handle; /* for multiple IOC's */
216 unsigned int func_id;
217 unsigned int rev; /* HW revision of chip */
219 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
220 struct pci_dev *sac_only_dev;
223 static struct ioc *ioc_list;
224 static int reserve_sba_gart = 1;
226 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
227 #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
229 #define sba_sg_address(sg) ((sg)->address ? (sg)->address : \
230 page_address((sg)->page) + (sg)->offset)
233 #ifdef FULL_VALID_PDIR
234 static u64 prefetch_spill_page;
238 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
239 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
241 # define GET_IOC(dev) NULL
245 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
246 ** (or rather not merge) DMA's into managable chunks.
247 ** On parisc, this is more of the software/tuning constraint
248 ** rather than the HW. I/O MMU allocation alogorithms can be
249 ** faster with smaller size is (to some degree).
251 #define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
253 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
255 /************************************
256 ** SBA register read and write support
258 ** BE WARNED: register writes are posted.
259 ** (ie follow writes which must reach HW with a read)
262 #define READ_REG(addr) __raw_readq(addr)
263 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
265 #ifdef DEBUG_SBA_INIT
268 * sba_dump_tlb - debugging only - print IOMMU operating parameters
269 * @hpa: base address of the IOMMU
271 * Print the size/location of the IO MMU PDIR.
274 sba_dump_tlb(char *hpa)
276 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
277 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
278 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
279 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
280 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
286 #ifdef ASSERT_PDIR_SANITY
289 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
290 * @ioc: IO MMU structure which owns the pdir we are interested in.
291 * @msg: text to print ont the output line.
294 * Print one entry of the IO MMU PDIR in human readable form.
297 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
299 /* start printing from lowest pde in rval */
300 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
301 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
304 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
305 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
308 while (rcnt < BITS_PER_LONG) {
309 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
310 (rcnt == (pide & (BITS_PER_LONG - 1)))
312 rcnt, ptr, (unsigned long long) *ptr );
316 printk(KERN_DEBUG "%s", msg);
321 * sba_check_pdir - debugging only - consistency checker
322 * @ioc: IO MMU structure which owns the pdir we are interested in.
323 * @msg: text to print ont the output line.
325 * Verify the resource map and pdir state is consistent
328 sba_check_pdir(struct ioc *ioc, char *msg)
330 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
331 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
332 u64 *pptr = ioc->pdir_base; /* pdir ptr */
335 while (rptr < rptr_end) {
337 int rcnt; /* number of bits we might check */
343 /* Get last byte and highest bit from that */
344 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
345 if ((rval & 0x1) ^ pde)
348 ** BUMMER! -- res_map != pdir --
349 ** Dump rval and matching pdir entries
351 sba_dump_pdir_entry(ioc, msg, pide);
355 rval >>= 1; /* try the next bit */
359 rptr++; /* look at next word of res_map */
361 /* It'd be nice if we always got here :^) */
367 * sba_dump_sg - debugging only - print Scatter-Gather list
368 * @ioc: IO MMU structure which owns the pdir we are interested in.
369 * @startsg: head of the SG list
370 * @nents: number of entries in SG list
372 * print the SG list so we can verify it's correct by hand.
375 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
377 while (nents-- > 0) {
378 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
379 startsg->dma_address, startsg->dma_length,
380 sba_sg_address(startsg));
386 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
388 struct scatterlist *the_sg = startsg;
389 int the_nents = nents;
391 while (the_nents-- > 0) {
392 if (sba_sg_address(the_sg) == 0x0UL)
393 sba_dump_sg(NULL, startsg, nents);
398 #endif /* ASSERT_PDIR_SANITY */
403 /**************************************************************
405 * I/O Pdir Resource Management
407 * Bits set in the resource map are in use.
408 * Each bit can represent a number of pages.
409 * LSbs represent lower addresses (IOVA's).
411 ***************************************************************/
412 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
414 /* Convert from IOVP to IOVA and vice versa. */
415 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | \
416 ((hint_reg)<<(ioc->hint_shift_pdir)))
417 #define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
419 /* FIXME : review these macros to verify correctness and usage */
420 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
422 #define RESMAP_MASK(n) ~(~0UL << (n))
423 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
427 * sba_search_bitmap - find free space in IO PDIR resource bitmap
428 * @ioc: IO MMU structure which owns the pdir we are interested in.
429 * @bits_wanted: number of entries we need.
431 * Find consecutive free bits in resource bitmap.
432 * Each bit represents one entry in the IO Pdir.
433 * Cool perf optimization: search for log2(size) bits at a time.
435 static SBA_INLINE unsigned long
436 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
438 unsigned long *res_ptr = ioc->res_hint;
439 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
440 unsigned long pide = ~0UL;
442 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
443 ASSERT(res_ptr < res_end);
444 if (bits_wanted > (BITS_PER_LONG/2)) {
445 /* Search word at a time - no mask needed */
446 for(; res_ptr < res_end; ++res_ptr) {
448 *res_ptr = RESMAP_MASK(bits_wanted);
449 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
450 pide <<= 3; /* convert to bit address */
454 /* point to the next word on next pass */
456 ioc->res_bitshift = 0;
459 ** Search the resource bit map on well-aligned values.
460 ** "o" is the alignment.
461 ** We need the alignment to invalidate I/O TLB using
462 ** SBA HW features in the unmap path.
464 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
465 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
468 if (bitshiftcnt >= BITS_PER_LONG) {
472 mask = RESMAP_MASK(bits_wanted) << bitshiftcnt;
474 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
475 while(res_ptr < res_end)
477 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
479 if(0 == ((*res_ptr) & mask)) {
480 *res_ptr |= mask; /* mark resources busy! */
481 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
482 pide <<= 3; /* convert to bit address */
489 mask = RESMAP_MASK(bits_wanted);
494 /* look in the same word on the next pass */
495 ioc->res_bitshift = bitshiftcnt + bits_wanted;
499 if (res_end <= res_ptr) {
500 ioc->res_hint = (unsigned long *) ioc->res_map;
501 ioc->res_bitshift = 0;
503 ioc->res_hint = res_ptr;
510 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
511 * @ioc: IO MMU structure which owns the pdir we are interested in.
512 * @size: number of bytes to create a mapping for
514 * Given a size, find consecutive unmarked and then mark those bits in the
518 sba_alloc_range(struct ioc *ioc, size_t size)
520 unsigned int pages_needed = size >> IOVP_SHIFT;
521 #ifdef CONFIG_PROC_FS
522 unsigned long itc_start = ia64_get_itc();
526 ASSERT(pages_needed);
527 ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
528 ASSERT(pages_needed <= BITS_PER_LONG);
529 ASSERT(0 == (size & ~IOVP_MASK));
532 ** "seek and ye shall find"...praying never hurts either...
535 pide = sba_search_bitmap(ioc, pages_needed);
536 if (pide >= (ioc->res_size << 3)) {
537 pide = sba_search_bitmap(ioc, pages_needed);
538 if (pide >= (ioc->res_size << 3))
539 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
543 #ifdef ASSERT_PDIR_SANITY
544 /* verify the first enable bit is clear */
545 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
546 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
550 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
551 __FUNCTION__, size, pages_needed, pide,
552 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
555 #ifdef CONFIG_PROC_FS
557 unsigned long itc_end = ia64_get_itc();
558 unsigned long tmp = itc_end - itc_start;
559 /* check for roll over */
560 itc_start = (itc_end < itc_start) ? -(tmp) : (tmp);
562 ioc->avg_search[ioc->avg_idx++] = itc_start;
563 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
565 ioc->used_pages += pages_needed;
573 * sba_free_range - unmark bits in IO PDIR resource bitmap
574 * @ioc: IO MMU structure which owns the pdir we are interested in.
575 * @iova: IO virtual address which was previously allocated.
576 * @size: number of bytes to create a mapping for
578 * clear bits in the ioc's resource map
580 static SBA_INLINE void
581 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
583 unsigned long iovp = SBA_IOVP(ioc, iova);
584 unsigned int pide = PDIR_INDEX(iovp);
585 unsigned int ridx = pide >> 3; /* convert bit to byte address */
586 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
588 int bits_not_wanted = size >> IOVP_SHIFT;
590 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
591 unsigned long m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
593 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
594 __FUNCTION__, (uint) iova, size,
595 bits_not_wanted, m, pide, res_ptr, *res_ptr);
597 #ifdef CONFIG_PROC_FS
598 ioc->used_pages -= bits_not_wanted;
602 ASSERT(bits_not_wanted);
603 ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE);
604 ASSERT(bits_not_wanted <= BITS_PER_LONG);
605 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
610 /**************************************************************
612 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
614 ***************************************************************/
616 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
620 * sba_io_pdir_entry - fill in one IO PDIR entry
621 * @pdir_ptr: pointer to IO PDIR entry
622 * @vba: Virtual CPU address of buffer to map
624 * SBA Mapping Routine
626 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
627 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
628 * Each IO Pdir entry consists of 8 bytes as shown below
632 * +-+---------------------+----------------------------------+----+--------+
633 * |V| U | PPN[39:12] | U | FF |
634 * +-+---------------------+----------------------------------+----+--------+
638 * PPN == Physical Page Number
640 * The physical address fields are filled with the results of virt_to_phys()
645 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
646 | 0x8000000000000000ULL)
649 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
651 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
655 #ifdef ENABLE_MARK_CLEAN
657 * Since DMA is i-cache coherent, any (complete) pages that were written via
658 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
659 * flush them when they get mapped into an executable vm-area.
662 mark_clean (void *addr, size_t size)
664 unsigned long pg_addr, end;
666 pg_addr = PAGE_ALIGN((unsigned long) addr);
667 end = (unsigned long) addr + size;
668 while (pg_addr + PAGE_SIZE <= end) {
669 struct page *page = virt_to_page((void *)pg_addr);
670 set_bit(PG_arch_1, &page->flags);
671 pg_addr += PAGE_SIZE;
677 * sba_mark_invalid - invalidate one or more IO PDIR entries
678 * @ioc: IO MMU structure which owns the pdir we are interested in.
679 * @iova: IO Virtual Address mapped earlier
680 * @byte_cnt: number of bytes this mapping covers.
682 * Marking the IO PDIR entry(ies) as Invalid and invalidate
683 * corresponding IO TLB entry. The PCOM (Purge Command Register)
684 * is to purge stale entries in the IO TLB when unmapping entries.
686 * The PCOM register supports purging of multiple pages, with a minium
687 * of 1 page and a maximum of 2GB. Hardware requires the address be
688 * aligned to the size of the range being purged. The size of the range
689 * must be a power of 2. The "Cool perf optimization" in the
690 * allocation routine helps keep that true.
692 static SBA_INLINE void
693 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
695 u32 iovp = (u32) SBA_IOVP(ioc,iova);
697 int off = PDIR_INDEX(iovp);
699 /* Must be non-zero and rounded up */
700 ASSERT(byte_cnt > 0);
701 ASSERT(0 == (byte_cnt & ~IOVP_MASK));
703 #ifdef ASSERT_PDIR_SANITY
704 /* Assert first pdir entry is set */
705 if (!(ioc->pdir_base[off] >> 60)) {
706 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
710 if (byte_cnt <= IOVP_SIZE)
712 ASSERT(off < ioc->pdir_size);
714 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
716 #ifndef FULL_VALID_PDIR
718 ** clear I/O PDIR entry "valid" bit
719 ** Do NOT clear the rest - save it for debugging.
720 ** We should only clear bits that have previously
723 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
726 ** If we want to maintain the PDIR as valid, put in
727 ** the spill page so devices prefetching won't
728 ** cause a hard fail.
730 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
733 u32 t = get_order(byte_cnt) + PAGE_SHIFT;
736 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
739 /* verify this pdir entry is enabled */
740 ASSERT(ioc->pdir_base[off] >> 63);
741 #ifndef FULL_VALID_PDIR
742 /* clear I/O Pdir entry "valid" bit first */
743 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
745 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
748 byte_cnt -= IOVP_SIZE;
749 } while (byte_cnt > 0);
752 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
756 * sba_map_single - map one buffer and return IOVA for DMA
757 * @dev: instance of PCI owned by the driver that's asking.
758 * @addr: driver buffer to map.
759 * @size: number of bytes to map in driver buffer.
762 * See Documentation/DMA-mapping.txt
765 sba_map_single(struct device *dev, void *addr, size_t size, int dir)
773 #ifdef ALLOW_IOV_BYPASS
774 unsigned long pci_addr = virt_to_phys(addr);
780 #ifdef ALLOW_IOV_BYPASS
782 ** Check if the PCI device can DMA to ptr... if so, just return ptr
784 if (dev && dev->dma_mask && (pci_addr & ~*dev->dma_mask) == 0) {
786 ** Device is bit capable of DMA'ing to the buffer...
787 ** just return the PCI address of ptr
789 #ifdef CONFIG_PROC_FS
790 spin_lock_irqsave(&ioc->res_lock, flags);
791 ioc->msingle_bypass++;
792 spin_unlock_irqrestore(&ioc->res_lock, flags);
794 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
795 *dev->dma_mask, pci_addr);
801 ASSERT(size <= DMA_CHUNK_SIZE);
803 /* save offset bits */
804 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
806 /* round up to nearest IOVP_SIZE */
807 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
809 spin_lock_irqsave(&ioc->res_lock, flags);
810 #ifdef ASSERT_PDIR_SANITY
811 if (sba_check_pdir(ioc,"Check before sba_map_single()"))
812 panic("Sanity check failed");
815 #ifdef CONFIG_PROC_FS
816 ioc->msingle_calls++;
817 ioc->msingle_pages += size >> IOVP_SHIFT;
819 pide = sba_alloc_range(ioc, size);
820 iovp = (dma_addr_t) pide << IOVP_SHIFT;
822 DBG_RUN("%s() 0x%p -> 0x%lx\n",
823 __FUNCTION__, addr, (long) iovp | offset);
825 pdir_start = &(ioc->pdir_base[pide]);
828 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
829 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
831 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
837 /* force pdir update */
840 /* form complete address */
841 #ifdef ASSERT_PDIR_SANITY
842 sba_check_pdir(ioc,"Check after sba_map_single()");
844 spin_unlock_irqrestore(&ioc->res_lock, flags);
845 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
849 * sba_unmap_single - unmap one IOVA and free resources
850 * @dev: instance of PCI owned by the driver that's asking.
851 * @iova: IOVA of driver buffer previously mapped.
852 * @size: number of bytes mapped in driver buffer.
855 * See Documentation/DMA-mapping.txt
857 void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
860 #if DELAYED_RESOURCE_CNT > 0
861 struct sba_dma_pair *d;
869 #ifdef ALLOW_IOV_BYPASS
870 if ((iova & ioc->imask) != ioc->ibase) {
872 ** Address does not fall w/in IOVA, must be bypassing
874 #ifdef CONFIG_PROC_FS
875 spin_lock_irqsave(&ioc->res_lock, flags);
876 ioc->usingle_bypass++;
877 spin_unlock_irqrestore(&ioc->res_lock, flags);
879 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
881 #ifdef ENABLE_MARK_CLEAN
882 if (dir == DMA_FROM_DEVICE) {
883 mark_clean(phys_to_virt(iova), size);
889 offset = iova & ~IOVP_MASK;
891 DBG_RUN("%s() iovp 0x%lx/%x\n",
892 __FUNCTION__, (long) iova, size);
894 iova ^= offset; /* clear offset bits */
896 size = ROUNDUP(size, IOVP_SIZE);
898 spin_lock_irqsave(&ioc->res_lock, flags);
899 #ifdef CONFIG_PROC_FS
900 ioc->usingle_calls++;
901 ioc->usingle_pages += size >> IOVP_SHIFT;
904 #if DELAYED_RESOURCE_CNT > 0
905 d = &(ioc->saved[ioc->saved_cnt]);
908 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
909 int cnt = ioc->saved_cnt;
911 sba_mark_invalid(ioc, d->iova, d->size);
912 sba_free_range(ioc, d->iova, d->size);
916 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
918 #else /* DELAYED_RESOURCE_CNT == 0 */
919 sba_mark_invalid(ioc, iova, size);
920 sba_free_range(ioc, iova, size);
921 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
922 #endif /* DELAYED_RESOURCE_CNT == 0 */
923 #ifdef ENABLE_MARK_CLEAN
924 if (dir == DMA_FROM_DEVICE) {
925 u32 iovp = (u32) SBA_IOVP(ioc,iova);
926 int off = PDIR_INDEX(iovp);
929 if (size <= IOVP_SIZE) {
930 addr = phys_to_virt(ioc->pdir_base[off] &
931 ~0xE000000000000FFFULL);
932 mark_clean(addr, size);
934 size_t byte_cnt = size;
937 addr = phys_to_virt(ioc->pdir_base[off] &
938 ~0xE000000000000FFFULL);
939 mark_clean(addr, min(byte_cnt, IOVP_SIZE));
941 byte_cnt -= IOVP_SIZE;
943 } while (byte_cnt > 0);
947 spin_unlock_irqrestore(&ioc->res_lock, flags);
949 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
950 ** For Astro based systems this isn't a big deal WRT performance.
951 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
952 ** we don't need the syncdma. The issue here is I/O MMU cachelines
953 ** are *not* coherent in all cases. May be hwrev dependent.
954 ** Need to investigate more.
955 asm volatile("syncdma");
961 * sba_alloc_consistent - allocate/map shared mem for DMA
962 * @hwdev: instance of PCI owned by the driver that's asking.
963 * @size: number of bytes mapped in driver buffer.
964 * @dma_handle: IOVA of new buffer.
966 * See Documentation/DMA-mapping.txt
969 sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
974 addr = (void *) __get_free_pages(flags, get_order(size));
979 * REVISIT: if sba_map_single starts needing more than dma_mask from the
980 * device, this needs to be updated.
982 ioc = GET_IOC(hwdev);
984 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
986 memset(addr, 0, size);
992 * sba_free_consistent - free/unmap shared mem for DMA
993 * @hwdev: instance of PCI owned by the driver that's asking.
994 * @size: number of bytes mapped in driver buffer.
995 * @vaddr: virtual address IOVA of "consistent" buffer.
996 * @dma_handler: IO virtual address of "consistent" buffer.
998 * See Documentation/DMA-mapping.txt
1000 void sba_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
1002 sba_unmap_single(hwdev, dma_handle, size, 0);
1003 free_pages((unsigned long) vaddr, get_order(size));
1008 ** Since 0 is a valid pdir_base index value, can't use that
1009 ** to determine if a value is valid or not. Use a flag to indicate
1010 ** the SG list entry contains a valid pdir index.
1012 #define PIDE_FLAG 0x1UL
1014 #ifdef DEBUG_LARGE_SG_ENTRIES
1015 int dump_run_sg = 0;
1020 * sba_fill_pdir - write allocated SG entries into IO PDIR
1021 * @ioc: IO MMU structure which owns the pdir we are interested in.
1022 * @startsg: list of IOVA/size pairs
1023 * @nents: number of entries in startsg list
1025 * Take preprocessed SG list and write corresponding entries
1029 static SBA_INLINE int
1032 struct scatterlist *startsg,
1035 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1038 unsigned long dma_offset = 0;
1041 while (nents-- > 0) {
1042 int cnt = startsg->dma_length;
1043 startsg->dma_length = 0;
1045 #ifdef DEBUG_LARGE_SG_ENTRIES
1047 printk(" %2d : %08lx/%05x %p\n",
1048 nents, startsg->dma_address, cnt,
1049 sba_sg_address(startsg));
1051 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1052 nents, startsg->dma_address, cnt,
1053 sba_sg_address(startsg));
1056 ** Look for the start of a new DMA stream
1058 if (startsg->dma_address & PIDE_FLAG) {
1059 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1060 dma_offset = (unsigned long) pide & ~IOVP_MASK;
1061 startsg->dma_address = 0;
1063 dma_sg->dma_address = pide | ioc->ibase;
1064 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
1069 ** Look for a VCONTIG chunk
1072 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1075 /* Since multiple Vcontig blocks could make up
1076 ** one DMA stream, *add* cnt to dma_len.
1078 dma_sg->dma_length += cnt;
1080 dma_offset=0; /* only want offset on first chunk */
1081 cnt = ROUNDUP(cnt, IOVP_SIZE);
1082 #ifdef CONFIG_PROC_FS
1083 ioc->msg_pages += cnt >> IOVP_SHIFT;
1086 sba_io_pdir_entry(pdirp, vaddr);
1094 /* force pdir update */
1097 #ifdef DEBUG_LARGE_SG_ENTRIES
1105 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1106 ** "start of next" are both on a page boundry.
1108 ** (shift left is a quick trick to mask off upper bits)
1110 #define DMA_CONTIG(__X, __Y) \
1111 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
1115 * sba_coalesce_chunks - preprocess the SG list
1116 * @ioc: IO MMU structure which owns the pdir we are interested in.
1117 * @startsg: list of IOVA/size pairs
1118 * @nents: number of entries in startsg list
1120 * First pass is to walk the SG list and determine where the breaks are
1121 * in the DMA stream. Allocates PDIR entries but does not fill them.
1122 * Returns the number of DMA chunks.
1124 * Doing the fill seperate from the coalescing/allocation keeps the
1125 * code simpler. Future enhancement could make one pass through
1126 * the sglist do both.
1128 static SBA_INLINE int
1129 sba_coalesce_chunks( struct ioc *ioc,
1130 struct scatterlist *startsg,
1133 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1134 unsigned long vcontig_len; /* len of VCONTIG chunk */
1135 unsigned long vcontig_end;
1136 struct scatterlist *dma_sg; /* next DMA stream head */
1137 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1141 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1144 ** Prepare for first/next DMA stream
1146 dma_sg = vcontig_sg = startsg;
1147 dma_len = vcontig_len = vcontig_end = startsg->length;
1148 vcontig_end += vaddr;
1149 dma_offset = vaddr & ~IOVP_MASK;
1151 /* PARANOID: clear entries */
1152 startsg->dma_address = startsg->dma_length = 0;
1155 ** This loop terminates one iteration "early" since
1156 ** it's always looking one "ahead".
1158 while (--nents > 0) {
1159 unsigned long vaddr; /* tmp */
1164 startsg->dma_address = startsg->dma_length = 0;
1166 /* catch brokenness in SCSI layer */
1167 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1170 ** First make sure current dma stream won't
1171 ** exceed DMA_CHUNK_SIZE if we coalesce the
1174 if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK)
1179 ** Then look for virtually contiguous blocks.
1181 ** append the next transaction?
1183 vaddr = (unsigned long) sba_sg_address(startsg);
1184 if (vcontig_end == vaddr)
1186 vcontig_len += startsg->length;
1187 vcontig_end += startsg->length;
1188 dma_len += startsg->length;
1192 #ifdef DEBUG_LARGE_SG_ENTRIES
1193 dump_run_sg = (vcontig_len > IOVP_SIZE);
1197 ** Not virtually contigous.
1198 ** Terminate prev chunk.
1199 ** Start a new chunk.
1201 ** Once we start a new VCONTIG chunk, dma_offset
1202 ** can't change. And we need the offset from the first
1203 ** chunk - not the last one. Ergo Successive chunks
1204 ** must start on page boundaries and dove tail
1205 ** with it's predecessor.
1207 vcontig_sg->dma_length = vcontig_len;
1209 vcontig_sg = startsg;
1210 vcontig_len = startsg->length;
1213 ** 3) do the entries end/start on page boundaries?
1214 ** Don't update vcontig_end until we've checked.
1216 if (DMA_CONTIG(vcontig_end, vaddr))
1218 vcontig_end = vcontig_len + vaddr;
1219 dma_len += vcontig_len;
1227 ** End of DMA Stream
1228 ** Terminate last VCONTIG block.
1229 ** Allocate space for DMA stream.
1231 vcontig_sg->dma_length = vcontig_len;
1232 dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;
1233 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1234 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1235 | (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT)
1245 * sba_map_sg - map Scatter/Gather list
1246 * @dev: instance of PCI owned by the driver that's asking.
1247 * @sglist: array of buffer/length pairs
1248 * @nents: number of entries in list
1249 * @dir: R/W or both.
1251 * See Documentation/DMA-mapping.txt
1253 int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
1256 int coalesced, filled = 0;
1257 unsigned long flags;
1258 #ifdef ALLOW_IOV_BYPASS
1259 struct scatterlist *sg;
1262 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1266 #ifdef ALLOW_IOV_BYPASS
1267 if (dev && dev->dma_mask && (ioc->dma_mask & ~*dev->dma_mask) == 0) {
1268 for (sg = sglist ; filled < nents ; filled++, sg++){
1269 sg->dma_length = sg->length;
1270 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1272 #ifdef CONFIG_PROC_FS
1273 spin_lock_irqsave(&ioc->res_lock, flags);
1275 spin_unlock_irqrestore(&ioc->res_lock, flags);
1280 /* Fast path single entry scatterlists. */
1282 sglist->dma_length = sglist->length;
1283 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length,
1285 #ifdef CONFIG_PROC_FS
1287 ** Should probably do some stats counting, but trying to
1288 ** be precise quickly starts wasting CPU time.
1294 spin_lock_irqsave(&ioc->res_lock, flags);
1296 #ifdef ASSERT_PDIR_SANITY
1297 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1299 sba_dump_sg(ioc, sglist, nents);
1300 panic("Check before sba_map_sg()");
1304 #ifdef CONFIG_PROC_FS
1309 ** First coalesce the chunks and allocate I/O pdir space
1311 ** If this is one DMA stream, we can properly map using the
1312 ** correct virtual address associated with each DMA page.
1313 ** w/o this association, we wouldn't have coherent DMA!
1314 ** Access to the virtual address is what forces a two pass algorithm.
1316 coalesced = sba_coalesce_chunks(ioc, sglist, nents);
1319 ** Program the I/O Pdir
1321 ** map the virtual addresses to the I/O Pdir
1322 ** o dma_address will contain the pdir index
1323 ** o dma_len will contain the number of bytes to map
1324 ** o address contains the virtual address.
1326 filled = sba_fill_pdir(ioc, sglist, nents);
1328 #ifdef ASSERT_PDIR_SANITY
1329 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1331 sba_dump_sg(ioc, sglist, nents);
1332 panic("Check after sba_map_sg()\n");
1336 spin_unlock_irqrestore(&ioc->res_lock, flags);
1338 ASSERT(coalesced == filled);
1339 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1346 * sba_unmap_sg - unmap Scatter/Gather list
1347 * @dev: instance of PCI owned by the driver that's asking.
1348 * @sglist: array of buffer/length pairs
1349 * @nents: number of entries in list
1350 * @dir: R/W or both.
1352 * See Documentation/DMA-mapping.txt
1354 void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
1357 #ifdef ASSERT_PDIR_SANITY
1358 unsigned long flags;
1361 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1362 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
1367 #ifdef CONFIG_PROC_FS
1371 #ifdef ASSERT_PDIR_SANITY
1372 spin_lock_irqsave(&ioc->res_lock, flags);
1373 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1374 spin_unlock_irqrestore(&ioc->res_lock, flags);
1377 while (nents && sglist->dma_length) {
1379 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
1380 #ifdef CONFIG_PROC_FS
1382 ** This leaves inconsistent data in the stats, but we can't
1383 ** tell which sg lists were mapped by map_single and which
1384 ** were coalesced to a single entry. The stats are fun,
1385 ** but speed is more important.
1387 ioc->usg_pages += ((sglist->dma_address & ~IOVP_MASK) + sglist->dma_length
1388 + IOVP_SIZE - 1) >> PAGE_SHIFT;
1394 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1396 #ifdef ASSERT_PDIR_SANITY
1397 spin_lock_irqsave(&ioc->res_lock, flags);
1398 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1399 spin_unlock_irqrestore(&ioc->res_lock, flags);
1404 /**************************************************************
1406 * Initialization and claim
1408 ***************************************************************/
1411 ioc_iova_init(struct ioc *ioc)
1413 u32 iova_space_mask;
1414 int iov_order, tcnfg;
1416 struct pci_dev *device;
1417 #ifdef FULL_VALID_PDIR
1418 unsigned long index;
1422 ** Firmware programs the base and size of a "safe IOVA space"
1423 ** (one that doesn't overlap memory or LMMIO space) in the
1424 ** IBASE and IMASK registers.
1426 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1427 ioc->iov_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1430 ** iov_order is always based on a 1GB IOVA space since we want to
1431 ** turn on the other half for AGP GART.
1433 iov_order = get_order(ioc->iov_size >> (IOVP_SHIFT - PAGE_SHIFT));
1434 ioc->pdir_size = (ioc->iov_size / IOVP_SIZE) * sizeof(u64);
1436 DBG_INIT("%s() hpa %p IOV %dMB (%d bits) PDIR size 0x%x\n",
1437 __FUNCTION__, ioc->ioc_hpa, ioc->iov_size >> 20,
1438 iov_order + PAGE_SHIFT, ioc->pdir_size);
1440 /* FIXME : DMA HINTs not used */
1441 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1442 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1444 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1445 get_order(ioc->pdir_size));
1446 if (!ioc->pdir_base)
1447 panic(PFX "Couldn't allocate I/O Page Table\n");
1449 memset(ioc->pdir_base, 0, ioc->pdir_size);
1451 DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
1452 __FUNCTION__, ioc->pdir_base, ioc->pdir_size,
1453 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1455 ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
1456 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1458 DBG_INIT(" base %p\n", ioc->pdir_base);
1460 /* build IMASK for IOC and Elroy */
1461 iova_space_mask = 0xffffffff;
1462 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1463 ioc->imask = iova_space_mask;
1465 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1466 __FUNCTION__, ioc->ibase, ioc->imask);
1469 ** FIXME: Hint registers are programmed with default hint
1470 ** values during boot, so hints should be sane even if we
1471 ** can't reprogram them the way drivers want.
1473 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1476 ** Setting the upper bits makes checking for bypass addresses
1477 ** a little faster later on.
1479 ioc->imask |= 0xFFFFFFFF00000000UL;
1481 /* Set I/O PDIR Page size to system page size */
1482 switch (PAGE_SHIFT) {
1483 case 12: tcnfg = 0; break; /* 4K */
1484 case 13: tcnfg = 1; break; /* 8K */
1485 case 14: tcnfg = 2; break; /* 16K */
1486 case 16: tcnfg = 3; break; /* 64K */
1488 panic(PFX "Unsupported system page size %d",
1492 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1495 ** Program the IOC's ibase and enable IOVA translation
1496 ** Bit zero == enable bit.
1498 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1501 ** Clear I/O TLB of any possible entries.
1502 ** (Yes. This is a bit paranoid...but so what)
1504 WRITE_REG(ioc->ibase | (iov_order+PAGE_SHIFT), ioc->ioc_hpa + IOC_PCOM);
1507 ** If an AGP device is present, only use half of the IOV space
1508 ** for PCI DMA. Unfortunately we can't know ahead of time
1509 ** whether GART support will actually be used, for now we
1510 ** can just key on an AGP device found in the system.
1511 ** We program the next pdir index after we stop w/ a key for
1512 ** the GART code to handshake on.
1514 pci_for_each_dev(device)
1515 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1517 if (agp_found && reserve_sba_gart) {
1518 DBG_INIT("%s: AGP device found, reserving half of IOVA for GART support\n",
1520 ioc->pdir_size /= 2;
1521 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1523 #ifdef FULL_VALID_PDIR
1525 ** Check to see if the spill page has been allocated, we don't need more than
1526 ** one across multiple SBAs.
1528 if (!prefetch_spill_page) {
1529 char *spill_poison = "SBAIOMMU POISON";
1530 int poison_size = 16;
1531 void *poison_addr, *addr;
1533 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(IOVP_SIZE));
1535 panic(PFX "Couldn't allocate PDIR spill page\n");
1538 for ( ; (u64) poison_addr < addr + IOVP_SIZE; poison_addr += poison_size)
1539 memcpy(poison_addr, spill_poison, poison_size);
1541 prefetch_spill_page = virt_to_phys(addr);
1543 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
1546 ** Set all the PDIR entries valid w/ the spill page as the target
1548 for (index = 0 ; index < (ioc->pdir_size / sizeof(u64)) ; index++)
1549 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1555 ioc_resource_init(struct ioc *ioc)
1557 spin_lock_init(&ioc->res_lock);
1559 /* resource map size dictated by pdir_size */
1560 ioc->res_size = ioc->pdir_size / sizeof(u64); /* entries */
1561 ioc->res_size >>= 3; /* convert bit count to byte count */
1562 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1564 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1565 get_order(ioc->res_size));
1567 panic(PFX "Couldn't allocate resource map\n");
1569 memset(ioc->res_map, 0, ioc->res_size);
1570 /* next available IOVP - circular search */
1571 ioc->res_hint = (unsigned long *) ioc->res_map;
1573 #ifdef ASSERT_PDIR_SANITY
1574 /* Mark first bit busy - ie no IOVA 0 */
1575 ioc->res_map[0] = 0x1;
1576 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1578 #ifdef FULL_VALID_PDIR
1579 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1580 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1581 ioc->pdir_base[(ioc->pdir_size / sizeof(u64)) - 1] = (0x80000000000000FF
1582 | prefetch_spill_page);
1585 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
1586 ioc->res_size, (void *) ioc->res_map);
1590 ioc_sac_init(struct ioc *ioc)
1592 struct pci_dev *sac = NULL;
1593 struct pci_controller *controller = NULL;
1596 * pci_alloc_coherent() must return a DMA address which is
1597 * SAC (single address cycle) addressable, so allocate a
1598 * pseudo-device to enforce that.
1600 sac = kmalloc(sizeof(*sac), GFP_KERNEL);
1602 panic(PFX "Couldn't allocate struct pci_dev");
1603 memset(sac, 0, sizeof(*sac));
1605 controller = kmalloc(sizeof(*controller), GFP_KERNEL);
1607 panic(PFX "Couldn't allocate struct pci_controller");
1608 memset(controller, 0, sizeof(*controller));
1610 controller->iommu = ioc;
1611 sac->sysdata = controller;
1612 sac->dma_mask = 0xFFFFFFFFUL;
1614 sac->dev.bus = &pci_bus_type;
1616 ioc->sac_only_dev = sac;
1620 ioc_zx1_init(struct ioc *ioc)
1622 if (ioc->rev < 0x20)
1623 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1625 ioc->dma_mask = 0xFFFFFFFFFFUL;
1628 typedef void (initfunc)(struct ioc *);
1636 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1637 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1638 { REO_IOC_ID, "REO" },
1639 { SX1000_IOC_ID, "sx1000" },
1642 static struct ioc * __init
1643 ioc_init(u64 hpa, void *handle)
1646 struct ioc_iommu *info;
1648 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
1652 memset(ioc, 0, sizeof(*ioc));
1654 ioc->next = ioc_list;
1657 ioc->handle = handle;
1658 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1660 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1661 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1662 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1664 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1665 if (ioc->func_id == info->func_id) {
1666 ioc->name = info->name;
1673 ioc->name = kmalloc(24, GFP_KERNEL);
1675 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1676 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1678 ioc->name = "Unknown";
1682 ioc_resource_init(ioc);
1685 printk(KERN_INFO PFX
1686 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1687 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1688 hpa, ioc->iov_size >> 20, ioc->ibase);
1695 /**************************************************************************
1697 ** SBA initialization code (HW and SW)
1699 ** o identify SBA chip itself
1700 ** o FIXME: initialize DMA hints for reasonable defaults
1702 **************************************************************************/
1704 #ifdef CONFIG_PROC_FS
1706 ioc_start(struct seq_file *s, loff_t *pos)
1711 for (ioc = ioc_list; ioc; ioc = ioc->next)
1719 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1721 struct ioc *ioc = v;
1728 ioc_stop(struct seq_file *s, void *v)
1733 ioc_show(struct seq_file *s, void *v)
1735 struct ioc *ioc = v;
1736 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1737 unsigned long i = 0, avg = 0, min, max;
1739 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1740 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1741 seq_printf(s, "IO PDIR size : %d bytes (%d entries)\n",
1742 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1745 seq_printf(s, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1746 total_pages - ioc->used_pages, ioc->used_pages,
1747 (int) (ioc->used_pages * 100 / total_pages));
1749 seq_printf(s, "Resource bitmap : %d bytes (%d pages)\n",
1750 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1752 min = max = ioc->avg_search[0];
1753 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1754 avg += ioc->avg_search[i];
1755 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1756 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1758 avg /= SBA_SEARCH_SAMPLE;
1759 seq_printf(s, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max);
1761 seq_printf(s, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1762 ioc->msingle_calls, ioc->msingle_pages,
1763 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1764 #ifdef ALLOW_IOV_BYPASS
1765 seq_printf(s, "pci_map_single(): %12ld bypasses\n", ioc->msingle_bypass);
1768 seq_printf(s, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1769 ioc->usingle_calls, ioc->usingle_pages,
1770 (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));
1771 #ifdef ALLOW_IOV_BYPASS
1772 seq_printf(s, "pci_unmap_single: %12ld bypasses\n", ioc->usingle_bypass);
1775 seq_printf(s, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1776 ioc->msg_calls, ioc->msg_pages,
1777 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1778 #ifdef ALLOW_IOV_BYPASS
1779 seq_printf(s, "pci_map_sg() : %12ld bypasses\n", ioc->msg_bypass);
1782 seq_printf(s, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1783 ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1788 static struct seq_operations ioc_seq_ops = {
1796 ioc_open(struct inode *inode, struct file *file)
1798 return seq_open(file, &ioc_seq_ops);
1801 static struct file_operations ioc_fops = {
1804 .llseek = seq_lseek,
1805 .release = seq_release
1809 ioc_map_show(struct seq_file *s, void *v)
1811 struct ioc *ioc = v;
1812 unsigned int i, *res_ptr = (unsigned int *)ioc->res_map;
1814 for (i = 0; i < ioc->res_size / sizeof(unsigned int); ++i, ++res_ptr)
1815 seq_printf(s, "%s%08x", (i & 7) ? " " : "\n ", *res_ptr);
1816 seq_printf(s, "\n");
1821 static struct seq_operations ioc_map_ops = {
1825 .show = ioc_map_show
1829 ioc_map_open(struct inode *inode, struct file *file)
1831 return seq_open(file, &ioc_map_ops);
1834 static struct file_operations ioc_map_fops = {
1835 .open = ioc_map_open,
1837 .llseek = seq_lseek,
1838 .release = seq_release
1845 struct proc_dir_entry *dir, *entry;
1847 dir = proc_mkdir("bus/mckinley", 0);
1848 entry = create_proc_entry(ioc_list->name, 0, dir);
1850 entry->proc_fops = &ioc_fops;
1852 entry = create_proc_entry("bitmap", 0, dir);
1854 entry->proc_fops = &ioc_map_fops;
1860 sba_connect_bus(struct pci_bus *bus)
1862 acpi_handle handle, parent;
1866 if (!PCI_CONTROLLER(bus))
1867 panic(PFX "no sysdata on bus %d!\n",bus->number);
1869 if (PCI_CONTROLLER(bus)->iommu)
1872 handle = PCI_CONTROLLER(bus)->acpi_handle;
1877 * The IOC scope encloses PCI root bridges in the ACPI
1878 * namespace, so work our way out until we find an IOC we
1879 * claimed previously.
1882 for (ioc = ioc_list; ioc; ioc = ioc->next)
1883 if (ioc->handle == handle) {
1884 PCI_CONTROLLER(bus)->iommu = ioc;
1888 status = acpi_get_parent(handle, &parent);
1890 } while (ACPI_SUCCESS(status));
1892 printk(KERN_WARNING "No IOC for PCI Bus %02x:%02x in ACPI\n", PCI_SEGMENT(bus), bus->number);
1896 acpi_sba_ioc_add(struct acpi_device *device)
1901 struct acpi_device_info dev_info;
1903 status = hp_acpi_csr_space(device->handle, &hpa, &length);
1904 if (ACPI_FAILURE(status))
1907 status = acpi_get_object_info(device->handle, &dev_info);
1908 if (ACPI_FAILURE(status))
1912 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1913 * root bridges, and its CSR space includes the IOC function.
1915 if (strncmp("HWP0001", dev_info.hardware_id, 7) == 0)
1916 hpa += ZX1_IOC_OFFSET;
1918 ioc = ioc_init(hpa, device->handle);
1925 static struct acpi_driver acpi_sba_ioc_driver = {
1926 name: "IOC IOMMU Driver",
1927 ids: "HWP0001,HWP0004",
1929 add: acpi_sba_ioc_add,
1936 MAX_DMA_ADDRESS = ~0UL;
1938 acpi_bus_register_driver(&acpi_sba_ioc_driver);
1948 #ifdef CONFIG_PROC_FS
1954 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
1957 nosbagart(char *str)
1959 reserve_sba_gart = 0;
1964 sba_dma_supported (struct device *dev, u64 mask)
1966 /* make sure it's at least 32bit capable */
1967 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
1970 __setup("nosbagart", nosbagart);
1972 EXPORT_SYMBOL(sba_map_single);
1973 EXPORT_SYMBOL(sba_unmap_single);
1974 EXPORT_SYMBOL(sba_map_sg);
1975 EXPORT_SYMBOL(sba_unmap_sg);
1976 EXPORT_SYMBOL(sba_dma_supported);
1977 EXPORT_SYMBOL(sba_alloc_coherent);
1978 EXPORT_SYMBOL(sba_free_coherent);