2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002 Alex Williamson
5 ** (c) Copyright 2002 Grant Grundler
6 ** (c) Copyright 2002 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/proc_fs.h>
33 #include <linux/efi.h>
35 #include <asm/delay.h> /* ia64_get_itc() */
37 #include <asm/page.h> /* PAGE_OFFSET */
40 #define DRIVER_NAME "SBA"
42 #define ALLOW_IOV_BYPASS
43 #define ENABLE_MARK_CLEAN
45 ** The number of debug flags is a clue - this code is fragile.
49 #undef DEBUG_SBA_RUN_SG
50 #undef DEBUG_SBA_RESOURCE
51 #undef ASSERT_PDIR_SANITY
52 #undef DEBUG_LARGE_SG_ENTRIES
55 #define SBA_INLINE __inline__
56 /* #define SBA_INLINE */
59 #define DBG_INIT(x...) printk(x)
61 #define DBG_INIT(x...)
65 #define DBG_RUN(x...) printk(x)
70 #ifdef DEBUG_SBA_RUN_SG
71 #define DBG_RUN_SG(x...) printk(x)
73 #define DBG_RUN_SG(x...)
77 #ifdef DEBUG_SBA_RESOURCE
78 #define DBG_RES(x...) printk(x)
84 #define DBG_BYPASS(x...) printk(x)
86 #define DBG_BYPASS(x...)
89 #ifdef ASSERT_PDIR_SANITY
90 #define ASSERT(expr) \
92 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
99 #define KB(x) ((x) * 1024)
100 #define MB(x) (KB (KB (x)))
101 #define GB(x) (MB (KB (x)))
104 ** The number of pdir entries to "free" before issueing
105 ** a read to PCOM register to flush out PCOM writes.
106 ** Interacts with allocation granularity (ie 4 or 8 entries
107 ** allocated and free'd/purged at a time might make this
108 ** less interesting).
110 #define DELAYED_RESOURCE_CNT 16
112 #define DEFAULT_DMA_HINT_REG(d) 0
114 #define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
115 #define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
117 #define SBA_FUNC_ID 0x0000 /* function id */
118 #define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
120 #define SBA_FUNC_SIZE 0x10000 /* SBA configuration function reg set */
122 unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
125 #define SBA_IOC_OFFSET 0x1000
127 #define MAX_IOC 1 /* we only have 1 for now*/
129 #define IOC_IBASE 0x300 /* IO TLB */
130 #define IOC_IMASK 0x308
131 #define IOC_PCOM 0x310
132 #define IOC_TCNFG 0x318
133 #define IOC_PDIR_BASE 0x320
135 #define IOC_IOVA_SPACE_BASE 0x40000000 /* IOVA ranges start at 1GB */
138 ** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
139 ** It's safer (avoid memory corruption) to keep DMA page mappings
140 ** equivalently sized to VM PAGE_SIZE.
142 ** We really can't avoid generating a new mapping for each
143 ** page since the Virtual Coherence Index has to be generated
144 ** and updated for each page.
146 ** IOVP_SIZE could only be greater than PAGE_SIZE if we are
147 ** confident the drivers really only touch the next physical
148 ** page iff that driver instance owns it.
150 #define IOVP_SIZE PAGE_SIZE
151 #define IOVP_SHIFT PAGE_SHIFT
152 #define IOVP_MASK PAGE_MASK
155 unsigned long ioc_hpa; /* I/O MMU base address */
156 char *res_map; /* resource map, bit == pdir entry */
157 u64 *pdir_base; /* physical base address */
158 unsigned long ibase; /* pdir IOV Space base */
159 unsigned long imask; /* pdir IOV Space mask */
161 unsigned long *res_hint; /* next avail IOVP - circular search */
163 unsigned long hint_mask_pdir; /* bits used for DMA hints */
164 unsigned int res_bitshift; /* from the RIGHT! */
165 unsigned int res_size; /* size of resource map in bytes */
166 unsigned int hint_shift_pdir;
167 unsigned long dma_mask;
168 #if DELAYED_RESOURCE_CNT > 0
170 struct sba_dma_pair {
173 } saved[DELAYED_RESOURCE_CNT];
176 #ifdef CONFIG_PROC_FS
177 #define SBA_SEARCH_SAMPLE 0x100
178 unsigned long avg_search[SBA_SEARCH_SAMPLE];
179 unsigned long avg_idx; /* current index into avg_search */
180 unsigned long used_pages;
181 unsigned long msingle_calls;
182 unsigned long msingle_pages;
183 unsigned long msg_calls;
184 unsigned long msg_pages;
185 unsigned long usingle_calls;
186 unsigned long usingle_pages;
187 unsigned long usg_calls;
188 unsigned long usg_pages;
189 #ifdef ALLOW_IOV_BYPASS
190 unsigned long msingle_bypass;
191 unsigned long usingle_bypass;
192 unsigned long msg_bypass;
196 /* STUFF We don't need in performance path */
197 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
201 struct sba_device *next; /* list of SBA's in system */
203 unsigned long sba_hpa; /* base address */
205 unsigned int flags; /* state/functionality enabled */
206 unsigned int hw_rev; /* HW revision of chip */
208 unsigned int num_ioc; /* number of on-board IOC's */
209 struct ioc ioc[MAX_IOC];
213 static struct sba_device *sba_list;
214 static int sba_count;
215 static int reserve_sba_gart = 1;
216 static struct pci_dev sac_only_dev;
218 #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
219 #define sba_sg_len(sg) (sg->length)
220 #define sba_sg_iova(sg) (sg->dma_address)
221 #define sba_sg_iova_len(sg) (sg->dma_length)
223 /* REVISIT - fix me for multiple SBAs/IOCs */
224 #define GET_IOC(dev) (sba_list->ioc)
225 #define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1)
226 #define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1)
229 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
230 ** (or rather not merge) DMA's into managable chunks.
231 ** On parisc, this is more of the software/tuning constraint
232 ** rather than the HW. I/O MMU allocation alogorithms can be
233 ** faster with smaller size is (to some degree).
235 #define DMA_CHUNK_SIZE (BITS_PER_LONG*IOVP_SIZE)
237 /* Looks nice and keeps the compiler happy */
238 #define SBA_DEV(d) ((struct sba_device *) (d))
240 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
242 /************************************
243 ** SBA register read and write support
245 ** BE WARNED: register writes are posted.
246 ** (ie follow writes which must reach HW with a read)
249 #define READ_REG(addr) __raw_readq(addr)
250 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
252 #ifdef DEBUG_SBA_INIT
255 * sba_dump_tlb - debugging only - print IOMMU operating parameters
256 * @hpa: base address of the IOMMU
258 * Print the size/location of the IO MMU Pdir.
261 sba_dump_tlb(char *hpa)
263 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
264 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
265 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
266 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
267 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
273 #ifdef ASSERT_PDIR_SANITY
276 * sba_dump_pdir_entry - debugging only - print one IOMMU Pdir entry
277 * @ioc: IO MMU structure which owns the pdir we are interested in.
278 * @msg: text to print ont the output line.
281 * Print one entry of the IO MMU Pdir in human readable form.
284 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
286 /* start printing from lowest pde in rval */
287 u64 *ptr = &(ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]);
288 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
291 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
292 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
295 while (rcnt < BITS_PER_LONG) {
296 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
297 (rcnt == (pide & (BITS_PER_LONG - 1)))
303 printk(KERN_DEBUG "%s", msg);
308 * sba_check_pdir - debugging only - consistency checker
309 * @ioc: IO MMU structure which owns the pdir we are interested in.
310 * @msg: text to print ont the output line.
312 * Verify the resource map and pdir state is consistent
315 sba_check_pdir(struct ioc *ioc, char *msg)
317 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
318 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
319 u64 *pptr = ioc->pdir_base; /* pdir ptr */
322 while (rptr < rptr_end) {
324 int rcnt; /* number of bits we might check */
330 /* Get last byte and highest bit from that */
331 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
332 if ((rval & 0x1) ^ pde)
335 ** BUMMER! -- res_map != pdir --
336 ** Dump rval and matching pdir entries
338 sba_dump_pdir_entry(ioc, msg, pide);
342 rval >>= 1; /* try the next bit */
346 rptr++; /* look at next word of res_map */
348 /* It'd be nice if we always got here :^) */
354 * sba_dump_sg - debugging only - print Scatter-Gather list
355 * @ioc: IO MMU structure which owns the pdir we are interested in.
356 * @startsg: head of the SG list
357 * @nents: number of entries in SG list
359 * print the SG list so we can verify it's correct by hand.
362 sba_dump_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
364 while (nents-- > 0) {
365 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
366 (unsigned long) sba_sg_iova(startsg), sba_sg_iova_len(startsg),
367 sba_sg_address(startsg));
372 sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
374 struct scatterlist *the_sg = startsg;
375 int the_nents = nents;
377 while (the_nents-- > 0) {
378 if (sba_sg_address(the_sg) == 0x0UL)
379 sba_dump_sg(NULL, startsg, nents);
384 #endif /* ASSERT_PDIR_SANITY */
389 /**************************************************************
391 * I/O Pdir Resource Management
393 * Bits set in the resource map are in use.
394 * Each bit can represent a number of pages.
395 * LSbs represent lower addresses (IOVA's).
397 ***************************************************************/
398 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
400 /* Convert from IOVP to IOVA and vice versa. */
401 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
402 #define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
404 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
406 #define RESMAP_MASK(n) ~(~0UL << (n))
407 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
411 * sba_search_bitmap - find free space in IO Pdir resource bitmap
412 * @ioc: IO MMU structure which owns the pdir we are interested in.
413 * @bits_wanted: number of entries we need.
415 * Find consecutive free bits in resource bitmap.
416 * Each bit represents one entry in the IO Pdir.
417 * Cool perf optimization: search for log2(size) bits at a time.
419 static SBA_INLINE unsigned long
420 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
422 unsigned long *res_ptr = ioc->res_hint;
423 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
424 unsigned long pide = ~0UL;
426 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
427 ASSERT(res_ptr < res_end);
428 if (bits_wanted > (BITS_PER_LONG/2)) {
429 /* Search word at a time - no mask needed */
430 for(; res_ptr < res_end; ++res_ptr) {
432 *res_ptr = RESMAP_MASK(bits_wanted);
433 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
434 pide <<= 3; /* convert to bit address */
438 /* point to the next word on next pass */
440 ioc->res_bitshift = 0;
443 ** Search the resource bit map on well-aligned values.
444 ** "o" is the alignment.
445 ** We need the alignment to invalidate I/O TLB using
446 ** SBA HW features in the unmap path.
448 unsigned long o = 1UL << get_order(bits_wanted << IOVP_SHIFT);
449 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
452 if (bitshiftcnt >= BITS_PER_LONG) {
456 mask = RESMAP_MASK(bits_wanted) << bitshiftcnt;
458 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
459 while(res_ptr < res_end)
461 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
463 if(0 == ((*res_ptr) & mask)) {
464 *res_ptr |= mask; /* mark resources busy! */
465 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
466 pide <<= 3; /* convert to bit address */
473 mask = RESMAP_MASK(bits_wanted);
478 /* look in the same word on the next pass */
479 ioc->res_bitshift = bitshiftcnt + bits_wanted;
483 if (res_end <= res_ptr) {
484 ioc->res_hint = (unsigned long *) ioc->res_map;
485 ioc->res_bitshift = 0;
487 ioc->res_hint = res_ptr;
494 * sba_alloc_range - find free bits and mark them in IO Pdir resource bitmap
495 * @ioc: IO MMU structure which owns the pdir we are interested in.
496 * @size: number of bytes to create a mapping for
498 * Given a size, find consecutive unmarked and then mark those bits in the
502 sba_alloc_range(struct ioc *ioc, size_t size)
504 unsigned int pages_needed = size >> IOVP_SHIFT;
505 #ifdef CONFIG_PROC_FS
506 unsigned long itc_start = ia64_get_itc();
510 ASSERT(pages_needed);
511 ASSERT((pages_needed * IOVP_SIZE) <= DMA_CHUNK_SIZE);
512 ASSERT(pages_needed <= BITS_PER_LONG);
513 ASSERT(0 == (size & ~IOVP_MASK));
516 ** "seek and ye shall find"...praying never hurts either...
519 pide = sba_search_bitmap(ioc, pages_needed);
520 if (pide >= (ioc->res_size << 3)) {
521 pide = sba_search_bitmap(ioc, pages_needed);
522 if (pide >= (ioc->res_size << 3))
523 panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa);
526 #ifdef ASSERT_PDIR_SANITY
527 /* verify the first enable bit is clear */
528 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
529 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
533 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
534 __FUNCTION__, size, pages_needed, pide,
535 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
538 #ifdef CONFIG_PROC_FS
540 unsigned long itc_end = ia64_get_itc();
541 unsigned long tmp = itc_end - itc_start;
542 /* check for roll over */
543 itc_start = (itc_end < itc_start) ? -(tmp) : (tmp);
545 ioc->avg_search[ioc->avg_idx++] = itc_start;
546 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
548 ioc->used_pages += pages_needed;
556 * sba_free_range - unmark bits in IO Pdir resource bitmap
557 * @ioc: IO MMU structure which owns the pdir we are interested in.
558 * @iova: IO virtual address which was previously allocated.
559 * @size: number of bytes to create a mapping for
561 * clear bits in the ioc's resource map
563 static SBA_INLINE void
564 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
566 unsigned long iovp = SBA_IOVP(ioc, iova);
567 unsigned int pide = PDIR_INDEX(iovp);
568 unsigned int ridx = pide >> 3; /* convert bit to byte address */
569 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
571 int bits_not_wanted = size >> IOVP_SHIFT;
573 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
574 unsigned long m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
576 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
577 __FUNCTION__, (uint) iova, size,
578 bits_not_wanted, m, pide, res_ptr, *res_ptr);
580 #ifdef CONFIG_PROC_FS
581 ioc->used_pages -= bits_not_wanted;
585 ASSERT(bits_not_wanted);
586 ASSERT((bits_not_wanted * IOVP_SIZE) <= DMA_CHUNK_SIZE);
587 ASSERT(bits_not_wanted <= BITS_PER_LONG);
588 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
593 /**************************************************************
595 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
597 ***************************************************************/
599 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
603 * sba_io_pdir_entry - fill in one IO Pdir entry
604 * @pdir_ptr: pointer to IO Pdir entry
605 * @phys_page: phys CPU address of page to map
607 * SBA Mapping Routine
609 * Given a physical address (phys_page, arg1) sba_io_pdir_entry()
610 * loads the I/O Pdir entry pointed to by pdir_ptr (arg0).
611 * Each IO Pdir entry consists of 8 bytes as shown below
615 * +-+---------------------+----------------------------------+----+--------+
616 * |V| U | PPN[39:12] | U | FF |
617 * +-+---------------------+----------------------------------+----+--------+
621 * PPN == Physical Page Number
624 #define SBA_VALID_MASK 0x80000000000000FFULL
625 #define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK)
626 #define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK)
629 #ifdef ENABLE_MARK_CLEAN
631 * Since DMA is i-cache coherent, any (complete) pages that were written via
632 * DMA can be marked as "clean" so that update_mmu_cache() doesn't have to
633 * flush them when they get mapped into an executable vm-area.
636 mark_clean (void *addr, size_t size)
638 unsigned long pg_addr, end;
640 pg_addr = PAGE_ALIGN((unsigned long) addr);
641 end = (unsigned long) addr + size;
642 while (pg_addr + PAGE_SIZE <= end) {
643 struct page *page = virt_to_page(pg_addr);
644 set_bit(PG_arch_1, &page->flags);
645 pg_addr += PAGE_SIZE;
651 * sba_mark_invalid - invalidate one or more IO Pdir entries
652 * @ioc: IO MMU structure which owns the pdir we are interested in.
653 * @iova: IO Virtual Address mapped earlier
654 * @byte_cnt: number of bytes this mapping covers.
656 * Marking the IO Pdir entry(ies) as Invalid and invalidate
657 * corresponding IO TLB entry. The PCOM (Purge Command Register)
658 * is to purge stale entries in the IO TLB when unmapping entries.
660 * The PCOM register supports purging of multiple pages, with a minium
661 * of 1 page and a maximum of 2GB. Hardware requires the address be
662 * aligned to the size of the range being purged. The size of the range
663 * must be a power of 2. The "Cool perf optimization" in the
664 * allocation routine helps keep that true.
666 static SBA_INLINE void
667 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
669 u32 iovp = (u32) SBA_IOVP(ioc,iova);
671 int off = PDIR_INDEX(iovp);
673 /* Must be non-zero and rounded up */
674 ASSERT(byte_cnt > 0);
675 ASSERT(0 == (byte_cnt & ~IOVP_MASK));
677 #ifdef ASSERT_PDIR_SANITY
678 /* Assert first pdir entry is set */
679 if (!(ioc->pdir_base[off] >> 60)) {
680 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
684 if (byte_cnt <= IOVP_SIZE)
686 ASSERT(off < ioc->pdir_size);
688 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
691 ** clear I/O Pdir entry "valid" bit
692 ** Do NOT clear the rest - save it for debugging.
693 ** We should only clear bits that have previously
696 ioc->pdir_base[off] &= ~SBA_VALID_MASK;
698 u32 t = get_order(byte_cnt) + IOVP_SHIFT;
701 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
704 /* verify this pdir entry is enabled */
705 ASSERT(ioc->pdir_base[off] >> 63);
706 /* clear I/O Pdir entry "valid" bit first */
707 ioc->pdir_base[off] &= ~SBA_VALID_MASK;
709 byte_cnt -= IOVP_SIZE;
710 } while (byte_cnt > 0);
713 WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);
717 * sba_map_single - map one buffer and return IOVA for DMA
718 * @dev: instance of PCI owned by the driver that's asking.
719 * @addr: driver buffer to map.
720 * @size: number of bytes to map in driver buffer.
721 * @direction: R/W or both.
723 * See Documentation/DMA-mapping.txt
726 sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
734 #ifdef ALLOW_IOV_BYPASS
735 unsigned long phys_addr = virt_to_phys(addr);
739 panic("sba_map_single: no SBA found!\n");
744 #ifdef ALLOW_IOV_BYPASS
746 ** Check if the PCI device can DMA to ptr... if so, just return ptr
748 if ((phys_addr & ~dev->dma_mask) == 0) {
750 ** Device is bit capable of DMA'ing to the buffer...
751 ** just return the PCI address of ptr
753 #ifdef CONFIG_PROC_FS
754 spin_lock_irqsave(&ioc->res_lock, flags);
755 ioc->msingle_bypass++;
756 spin_unlock_irqrestore(&ioc->res_lock, flags);
758 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
759 dev->dma_mask, phys_addr);
765 ASSERT(size <= DMA_CHUNK_SIZE);
767 /* save offset bits */
768 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
770 /* round up to nearest IOVP_SIZE */
771 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
773 spin_lock_irqsave(&ioc->res_lock, flags);
774 #ifdef ASSERT_PDIR_SANITY
775 if (sba_check_pdir(ioc,"Check before sba_map_single()"))
776 panic("Sanity check failed");
779 #ifdef CONFIG_PROC_FS
780 ioc->msingle_calls++;
781 ioc->msingle_pages += size >> IOVP_SHIFT;
783 pide = sba_alloc_range(ioc, size);
784 iovp = (dma_addr_t) pide << IOVP_SHIFT;
786 DBG_RUN("%s() 0x%p -> 0x%lx\n",
787 __FUNCTION__, addr, (long) iovp | offset);
789 pdir_start = &(ioc->pdir_base[pide]);
792 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
794 sba_io_pdir_entry(pdir_start, virt_to_phys(addr));
796 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
802 /* form complete address */
803 #ifdef ASSERT_PDIR_SANITY
804 sba_check_pdir(ioc,"Check after sba_map_single()");
806 spin_unlock_irqrestore(&ioc->res_lock, flags);
807 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG(direction));
811 * sba_unmap_single - unmap one IOVA and free resources
812 * @dev: instance of PCI owned by the driver that's asking.
813 * @iova: IOVA of driver buffer previously mapped.
814 * @size: number of bytes mapped in driver buffer.
815 * @direction: R/W or both.
817 * See Documentation/DMA-mapping.txt
819 void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
823 #if DELAYED_RESOURCE_CNT > 0
824 struct sba_dma_pair *d;
830 panic("sba_map_single: no SBA found!\n");
835 #ifdef ALLOW_IOV_BYPASS
836 if ((iova & ioc->imask) != ioc->ibase) {
838 ** Address does not fall w/in IOVA, must be bypassing
840 #ifdef CONFIG_PROC_FS
841 spin_lock_irqsave(&ioc->res_lock, flags);
842 ioc->usingle_bypass++;
843 spin_unlock_irqrestore(&ioc->res_lock, flags);
845 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
847 #ifdef ENABLE_MARK_CLEAN
848 if (direction == PCI_DMA_FROMDEVICE) {
849 mark_clean(phys_to_virt(iova), size);
855 offset = iova & ~IOVP_MASK;
857 DBG_RUN("%s() iovp 0x%lx/%x\n",
858 __FUNCTION__, (long) iova, size);
860 iova ^= offset; /* clear offset bits */
862 size = ROUNDUP(size, IOVP_SIZE);
864 #ifdef ENABLE_MARK_CLEAN
866 ** Don't need to hold the spinlock while telling VM pages are "clean".
867 ** The pages are "busy" in the resource map until we mark them free.
868 ** But tell VM pages are clean *before* releasing the resource
869 ** in order to avoid race conditions.
871 if (direction == PCI_DMA_FROMDEVICE) {
872 u32 iovp = (u32) SBA_IOVP(ioc,iova);
873 unsigned int pide = PDIR_INDEX(iovp);
874 u64 *pdirp = &(ioc->pdir_base[pide]);
875 size_t byte_cnt = size;
879 addr = phys_to_virt(sba_io_page(pdirp));
880 mark_clean(addr, min(byte_cnt, IOVP_SIZE));
882 byte_cnt -= IOVP_SIZE;
883 } while (byte_cnt > 0);
887 spin_lock_irqsave(&ioc->res_lock, flags);
888 #ifdef CONFIG_PROC_FS
889 ioc->usingle_calls++;
890 ioc->usingle_pages += size >> IOVP_SHIFT;
893 #if DELAYED_RESOURCE_CNT > 0
894 d = &(ioc->saved[ioc->saved_cnt]);
897 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
898 int cnt = ioc->saved_cnt;
900 sba_mark_invalid(ioc, d->iova, d->size);
901 sba_free_range(ioc, d->iova, d->size);
905 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
907 #else /* DELAYED_RESOURCE_CNT == 0 */
908 sba_mark_invalid(ioc, iova, size);
909 sba_free_range(ioc, iova, size);
910 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
911 #endif /* DELAYED_RESOURCE_CNT == 0 */
912 spin_unlock_irqrestore(&ioc->res_lock, flags);
917 * sba_alloc_consistent - allocate/map shared mem for DMA
918 * @hwdev: instance of PCI owned by the driver that's asking.
919 * @size: number of bytes mapped in driver buffer.
920 * @dma_handle: IOVA of new buffer.
922 * See Documentation/DMA-mapping.txt
925 sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
930 /* only support PCI */
935 ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size));
938 memset(ret, 0, size);
940 * REVISIT: if sba_map_single starts needing more
941 * than dma_mask from the device, this needs to be
944 *dma_handle = sba_map_single(&sac_only_dev, ret, size, 0);
952 * sba_free_consistent - free/unmap shared mem for DMA
953 * @hwdev: instance of PCI owned by the driver that's asking.
954 * @size: number of bytes mapped in driver buffer.
955 * @vaddr: virtual address IOVA of "consistent" buffer.
956 * @dma_handler: IO virtual address of "consistent" buffer.
958 * See Documentation/DMA-mapping.txt
960 void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
961 dma_addr_t dma_handle)
963 sba_unmap_single(hwdev, dma_handle, size, 0);
964 free_pages((unsigned long) vaddr, get_order(size));
968 #ifdef DEBUG_LARGE_SG_ENTRIES
972 #define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
973 #define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG))
977 * sba_coalesce_chunks - preprocess the SG list
978 * @ioc: IO MMU structure which owns the pdir we are interested in.
979 * @startsg: input=SG list output=DMA addr/len pairs filled in
980 * @nents: number of entries in startsg list
981 * @direction: R/W or both.
983 * Walk the SG list and determine where the breaks are in the DMA stream.
984 * Allocate IO Pdir resources and fill them in separate loop.
985 * Returns the number of DMA streams used for output IOVA list.
986 * Note each DMA stream can consume multiple IO Pdir entries.
988 * Code is written assuming some coalescing is possible.
990 static SBA_INLINE int
991 sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
992 int nents, int direction)
994 struct scatterlist *dma_sg = startsg; /* return array */
1000 unsigned int dma_cnt = 1; /* number of pages in DMA stream */
1001 unsigned int pide; /* index into IO Pdir array */
1002 u64 *pdirp; /* pointer into IO Pdir array */
1003 unsigned long dma_offset, dma_len; /* cumulative DMA stream */
1006 ** Prepare for first/next DMA stream
1008 dma_len = sba_sg_len(startsg);
1009 dma_offset = (unsigned long) sba_sg_address(startsg);
1014 ** We want to know how many entries can be coalesced
1015 ** before trying to allocate IO Pdir space.
1016 ** IOVAs can then be allocated "naturally" aligned
1017 ** to take advantage of the block IO TLB flush.
1020 unsigned long end_offset = dma_offset + dma_len;
1022 /* prev entry must end on a page boundary */
1023 if (end_offset & IOVP_MASK)
1026 /* next entry start on a page boundary? */
1027 if (startsg->offset)
1031 ** make sure current dma stream won't exceed
1032 ** DMA_CHUNK_SIZE if coalescing entries.
1034 if (((end_offset + startsg->length + ~IOVP_MASK)
1039 dma_len += sba_sg_len(startsg);
1045 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1047 /* allocate IO Pdir resource.
1048 ** returns index into (u64) IO Pdir array.
1049 ** IOVA is formed from this.
1051 pide = sba_alloc_range(ioc, dma_cnt << IOVP_SHIFT);
1052 pdirp = &(ioc->pdir_base[pide]);
1054 /* fill_pdir: write stream into IO Pdir */
1056 sba_io_pdir_entry(pdirp, SG_ENT_PHYS_PAGE(startsg));
1062 sba_sg_iova(dma_sg) = SBA_IOVA(ioc,
1063 ((dma_addr_t) pide << IOVP_SHIFT),
1065 DEFAULT_DMA_HINT_REG(direction));
1066 sba_sg_iova_len(dma_sg) = dma_len;
1077 * sba_map_sg - map Scatter/Gather list
1078 * @dev: instance of PCI device owned by the driver that's asking.
1079 * @sglist: array of buffer/length pairs
1080 * @nents: number of entries in list
1081 * @direction: R/W or both.
1083 * See Documentation/DMA-mapping.txt
1085 int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
1090 unsigned long flags;
1091 #ifdef ALLOW_IOV_BYPASS
1092 struct scatterlist *sg;
1095 DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", __FUNCTION__, nents,
1096 sba_sg_address(sglist), sba_sg_len(sglist));
1099 panic("sba_map_single: no SBA found!\n");
1104 #ifdef ALLOW_IOV_BYPASS
1105 if (dev->dma_mask >= ioc->dma_mask) {
1106 for (sg = sglist ; filled < nents ; filled++, sg++) {
1107 sba_sg_iova(sg) = virt_to_phys(sba_sg_address(sg));
1108 sba_sg_iova_len(sg) = sba_sg_len(sg);
1110 #ifdef CONFIG_PROC_FS
1111 spin_lock_irqsave(&ioc->res_lock, flags);
1113 spin_unlock_irqrestore(&ioc->res_lock, flags);
1115 DBG_RUN_SG("%s() DONE %d mappings bypassed\n", __FUNCTION__, filled);
1119 /* Fast path single entry scatterlists. */
1121 sba_sg_iova(sglist) = sba_map_single(dev,
1122 (void *) sba_sg_iova(sglist),
1123 sba_sg_len(sglist), direction);
1124 sba_sg_iova_len(sglist) = sba_sg_len(sglist);
1125 #ifdef CONFIG_PROC_FS
1127 ** Should probably do some stats counting, but trying to
1128 ** be precise quickly starts wasting CPU time.
1131 DBG_RUN_SG("%s() DONE 1 mapping\n", __FUNCTION__);
1135 spin_lock_irqsave(&ioc->res_lock, flags);
1137 #ifdef ASSERT_PDIR_SANITY
1138 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1140 sba_dump_sg(ioc, sglist, nents);
1141 panic("Check before sba_map_sg()");
1145 #ifdef CONFIG_PROC_FS
1150 ** coalesce and program the I/O Pdir
1152 filled = sba_coalesce_chunks(ioc, sglist, nents, direction);
1154 #ifdef ASSERT_PDIR_SANITY
1155 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1157 sba_dump_sg(ioc, sglist, nents);
1158 panic("Check after sba_map_sg()\n");
1162 spin_unlock_irqrestore(&ioc->res_lock, flags);
1164 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1171 * sba_unmap_sg - unmap Scatter/Gather list
1172 * @dev: instance of PCI owned by the driver that's asking.
1173 * @sglist: array of buffer/length pairs
1174 * @nents: number of entries in list
1175 * @direction: R/W or both.
1177 * See Documentation/DMA-mapping.txt
1179 void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
1183 #ifdef ASSERT_PDIR_SANITY
1184 unsigned long flags;
1187 DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n",
1188 __FUNCTION__, nents, sba_sg_address(sglist), sba_sg_len(sglist));
1191 panic("sba_map_single: no SBA found!\n");
1196 #ifdef CONFIG_PROC_FS
1200 #ifdef ASSERT_PDIR_SANITY
1201 spin_lock_irqsave(&ioc->res_lock, flags);
1202 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1203 spin_unlock_irqrestore(&ioc->res_lock, flags);
1206 while (sba_sg_len(sglist) && nents--) {
1208 sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist),
1209 sba_sg_iova_len(sglist), direction);
1210 #ifdef CONFIG_PROC_FS
1212 ** This leaves inconsistent data in the stats, but we can't
1213 ** tell which sg lists were mapped by map_single and which
1214 ** were coalesced to a single entry. The stats are fun,
1215 ** but speed is more important.
1217 ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> IOVP_SHIFT;
1222 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1224 #ifdef ASSERT_PDIR_SANITY
1225 spin_lock_irqsave(&ioc->res_lock, flags);
1226 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1227 spin_unlock_irqrestore(&ioc->res_lock, flags);
1233 sba_dma_address (struct scatterlist *sg)
1235 return ((unsigned long)sba_sg_iova(sg));
1239 sba_dma_supported (struct pci_dev *dev, u64 mask)
1244 /**************************************************************
1246 * Initialization and claim
1248 ***************************************************************/
1252 sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
1254 u32 iova_space_size, iova_space_mask;
1256 int pdir_size, iov_order, tcnfg;
1259 ** Firmware programs the maximum IOV space size into the imask reg
1261 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1264 ** iov_order is always based on a 1GB IOVA space since we want to
1265 ** turn on the other half for AGP GART.
1267 iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT));
1268 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1270 DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n",
1271 __FUNCTION__, ioc->ioc_hpa, iova_space_size>>20,
1272 iov_order + PAGE_SHIFT, ioc->pdir_size);
1274 /* XXX DMA HINTs not used */
1275 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1276 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1278 ioc->pdir_base = pdir_base =
1279 (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size));
1280 if (NULL == pdir_base)
1282 panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
1284 memset(pdir_base, 0, pdir_size);
1286 DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
1287 __FUNCTION__, pdir_base, pdir_size,
1288 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1290 ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base);
1291 WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1293 DBG_INIT(" base %p\n", pdir_base);
1295 /* build IMASK for IOC and Elroy */
1296 iova_space_mask = 0xffffffff;
1297 iova_space_mask <<= (iov_order + IOVP_SHIFT);
1299 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & 0xFFFFFFFEUL;
1301 ioc->imask = iova_space_mask; /* save it */
1303 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1304 __FUNCTION__, ioc->ibase, ioc->imask);
1307 ** XXX DMA HINT registers are programmed with default hint
1308 ** values during boot, so hints should be sane even if we
1309 ** can't reprogram them the way drivers want.
1312 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1315 ** Setting the upper bits makes checking for bypass addresses
1316 ** a little faster later on.
1318 ioc->imask |= 0xFFFFFFFF00000000UL;
1320 /* Set I/O Pdir page size to system page size */
1321 switch (IOVP_SHIFT) {
1335 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1338 ** Program the IOC's ibase and enable IOVA translation
1339 ** Bit zero == enable bit.
1341 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1344 ** Clear I/O TLB of any possible entries.
1345 ** (Yes. This is a bit paranoid...but so what)
1347 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1350 ** If an AGP device is present, only use half of the IOV space
1351 ** for PCI DMA. Unfortunately we can't know ahead of time
1352 ** whether GART support will actually be used, for now we
1353 ** can just key on an AGP device found in the system.
1354 ** We program the next pdir index after we stop w/ a key for
1355 ** the GART code to handshake on.
1357 if (SBA_GET_AGP(sba_dev)) {
1358 DBG_INIT("%s() AGP Device found, reserving 512MB for GART support\n", __FUNCTION__);
1359 ioc->pdir_size /= 2;
1360 ((u64 *)pdir_base)[PDIR_INDEX(iova_space_size/2)] = 0x0000badbadc0ffeeULL;
1363 DBG_INIT("%s() DONE\n", __FUNCTION__);
1368 /**************************************************************************
1370 ** SBA initialization code (HW and SW)
1372 ** o identify SBA chip itself
1373 ** o FIXME: initialize DMA hints for reasonable defaults
1375 **************************************************************************/
1378 sba_hw_init(struct sba_device *sba_dev)
1386 ** Identify the SBA so we can set the dma_mask. We can make a virtual
1387 ** dma_mask of the memory subsystem such that devices not implmenting
1388 ** a full 64bit mask might still be able to bypass efficiently.
1390 func_id = READ_REG(sba_dev->sba_hpa + SBA_FUNC_ID);
1392 if (func_id == ZX1_FUNC_ID_VALUE) {
1393 dma_mask = 0xFFFFFFFFFFUL;
1395 dma_mask = 0xFFFFFFFFFFFFFFFFUL;
1398 DBG_INIT("%s(): ioc->dma_mask == 0x%lx\n", __FUNCTION__, dma_mask);
1401 ** Leaving in the multiple ioc code from parisc for the future,
1402 ** currently there are no muli-ioc mckinley sbas
1404 sba_dev->ioc[0].ioc_hpa = SBA_IOC_OFFSET;
1407 sba_dev->num_ioc = num_ioc;
1408 for (i = 0; i < num_ioc; i++) {
1409 sba_dev->ioc[i].dma_mask = dma_mask;
1410 sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa;
1411 sba_ioc_init(sba_dev, &(sba_dev->ioc[i]), i);
1416 sba_common_init(struct sba_device *sba_dev)
1420 /* add this one to the head of the list (order doesn't matter)
1421 ** This will be useful for debugging - especially if we get coredumps
1423 sba_dev->next = sba_list;
1427 for(i=0; i< sba_dev->num_ioc; i++) {
1430 /* resource map size dictated by pdir_size */
1431 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1432 res_size >>= 3; /* convert bit count to byte count */
1433 DBG_INIT("%s() res_size 0x%x\n",
1434 __FUNCTION__, res_size);
1436 sba_dev->ioc[i].res_size = res_size;
1437 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1439 if (NULL == sba_dev->ioc[i].res_map)
1441 panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ );
1444 memset(sba_dev->ioc[i].res_map, 0, res_size);
1445 /* next available IOVP - circular search */
1446 if ((sba_dev->hw_rev & 0xFF) >= 0x20) {
1447 sba_dev->ioc[i].res_hint = (unsigned long *)
1448 sba_dev->ioc[i].res_map;
1452 /* Yet another 1.x hack */
1453 printk(KERN_DEBUG "zx1 1.x: Starting resource hint offset into "
1454 "IOV space to avoid initial zero value IOVA\n");
1455 sba_dev->ioc[i].res_hint = (unsigned long *)
1456 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1458 sba_dev->ioc[i].res_map[0] = 0x1;
1459 sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
1461 for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) {
1462 u64 *res_ptr = (u64 *) sba_dev->ioc[i].res_map;
1463 int index = PDIR_INDEX(reserved_iov);
1467 res_word = (int)(index / BITS_PER_LONG);
1468 mask = 0x1UL << (index - (res_word * BITS_PER_LONG));
1469 res_ptr[res_word] |= mask;
1470 sba_dev->ioc[i].pdir_base[PDIR_INDEX(reserved_iov)] = (SBA_VALID_MASK | reserved_iov);
1475 #ifdef ASSERT_PDIR_SANITY
1476 /* Mark first bit busy - ie no IOVA 0 */
1477 sba_dev->ioc[i].res_map[0] = 0x1;
1478 sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
1481 DBG_INIT("%s() %d res_map %x %p\n", __FUNCTION__,
1482 i, res_size, (void *)sba_dev->ioc[i].res_map);
1485 sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;
1488 #ifdef CONFIG_PROC_FS
1489 static int sba_proc_info(char *buf, char **start, off_t offset, int len)
1491 struct sba_device *sba_dev;
1494 unsigned long i = 0, avg = 0, min, max;
1496 for (sba_dev = sba_list; sba_dev; sba_dev = sba_dev->next) {
1497 ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1498 total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1500 sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
1501 ((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
1502 sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf,
1503 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
1505 sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf,
1506 total_pages - ioc->used_pages, ioc->used_pages,
1507 (int) (ioc->used_pages * 100 / total_pages));
1509 sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
1510 buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1512 min = max = ioc->avg_search[0];
1513 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1514 avg += ioc->avg_search[i];
1515 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1516 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1518 avg /= SBA_SEARCH_SAMPLE;
1519 sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1520 buf, min, avg, max);
1522 sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1523 buf, ioc->msingle_calls, ioc->msingle_pages,
1524 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1525 #ifdef ALLOW_IOV_BYPASS
1526 sprintf(buf, "%spci_map_single(): %12ld bypasses\n",
1527 buf, ioc->msingle_bypass);
1530 sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1531 buf, ioc->usingle_calls, ioc->usingle_pages,
1532 (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));
1533 #ifdef ALLOW_IOV_BYPASS
1534 sprintf(buf, "%spci_unmap_single: %12ld bypasses\n",
1535 buf, ioc->usingle_bypass);
1538 sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1539 buf, ioc->msg_calls, ioc->msg_pages,
1540 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1541 #ifdef ALLOW_IOV_BYPASS
1542 sprintf(buf, "%spci_map_sg() : %12ld bypasses\n",
1543 buf, ioc->msg_bypass);
1546 sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1547 buf, ioc->usg_calls, ioc->usg_pages,
1548 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1554 sba_resource_map(char *buf, char **start, off_t offset, int len)
1556 struct ioc *ioc = sba_list->ioc; /* FIXME: Multi-IOC support! */
1557 unsigned int *res_ptr;
1563 res_ptr = (unsigned int *)ioc->res_map;
1565 for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
1568 sprintf(buf, "%s %08x", buf, *res_ptr);
1577 ** Determine if sba should claim this chip (return 0) or not (return 1).
1578 ** If so, initialize the chip and tell other partners in crime they
1581 void __init sba_init(void)
1583 struct sba_device *sba_dev;
1584 u32 func_id, hw_rev;
1585 u32 *func_offset = NULL;
1586 int i, agp_found = 0;
1587 static char sba_rev[6];
1588 struct pci_dev *device = NULL;
1591 if (!(device = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_SBA, NULL)))
1594 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1595 if (pci_resource_flags(device, i) == IORESOURCE_MEM) {
1596 hpa = (u64) ioremap(pci_resource_start(device, i),
1597 pci_resource_len(device, i));
1602 func_id = READ_REG(hpa + SBA_FUNC_ID);
1603 if (func_id != ZX1_FUNC_ID_VALUE)
1606 strcpy(sba_rev, "zx1");
1607 func_offset = zx1_func_offsets;
1609 /* Read HW Rev First */
1610 hw_rev = READ_REG(hpa + SBA_FCLASS) & 0xFFUL;
1613 * Not all revision registers of the chipset are updated on every
1614 * turn. Must scan through all functions looking for the highest rev
1617 for (i = 0 ; func_offset[i] != -1 ; i++) {
1620 func_rev = READ_REG(hpa + SBA_FCLASS + func_offset[i]) & 0xFFUL;
1621 DBG_INIT("%s() func offset: 0x%x rev: 0x%x\n",
1622 __FUNCTION__, func_offset[i], func_rev);
1623 if (func_rev > hw_rev)
1628 printk(KERN_INFO "%s found %s %d.%d at %s, HPA 0x%lx\n", DRIVER_NAME,
1629 sba_rev, ((hw_rev >> 4) & 0xF), (hw_rev & 0xF),
1630 device->slot_name, hpa);
1632 if ((hw_rev & 0xFF) < 0x20) {
1633 printk(KERN_INFO "%s: SBA rev less than 2.0 not supported", DRIVER_NAME);
1637 sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
1638 if (NULL == sba_dev) {
1639 printk(KERN_ERR DRIVER_NAME " - couldn't alloc sba_device\n");
1643 memset(sba_dev, 0, sizeof(struct sba_device));
1645 for(i=0; i<MAX_IOC; i++)
1646 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1648 sba_dev->hw_rev = hw_rev;
1649 sba_dev->sba_hpa = hpa;
1652 * We pass this fake device from alloc_consistent to ensure
1653 * we only use SAC for alloc_consistent mappings.
1655 sac_only_dev.dma_mask = 0xFFFFFFFFUL;
1658 * We need to check for an AGP device, if we find one, then only
1659 * use part of the IOVA space for PCI DMA, the rest is for GART.
1660 * REVISIT for multiple IOC.
1662 pci_for_each_dev(device)
1663 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1665 if (agp_found && reserve_sba_gart)
1666 SBA_SET_AGP(sba_dev);
1668 sba_hw_init(sba_dev);
1669 sba_common_init(sba_dev);
1671 #ifdef CONFIG_PROC_FS
1673 struct proc_dir_entry * proc_mckinley_root;
1675 proc_mckinley_root = proc_mkdir("bus/mckinley",0);
1676 create_proc_info_entry(sba_rev, 0, proc_mckinley_root, sba_proc_info);
1677 create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map);
1683 nosbagart (char *str)
1685 reserve_sba_gart = 0;
1689 __setup("nosbagart",nosbagart);
1691 EXPORT_SYMBOL(sba_init);
1692 EXPORT_SYMBOL(sba_map_single);
1693 EXPORT_SYMBOL(sba_unmap_single);
1694 EXPORT_SYMBOL(sba_map_sg);
1695 EXPORT_SYMBOL(sba_unmap_sg);
1696 EXPORT_SYMBOL(sba_dma_address);
1697 EXPORT_SYMBOL(sba_dma_supported);
1698 EXPORT_SYMBOL(sba_alloc_consistent);
1699 EXPORT_SYMBOL(sba_free_consistent);