5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
12 #define log2(x) ffz(~(x))
15 #define HP_ZX1_IOVA_BASE GB(1UL)
16 #define HP_ZX1_IOVA_SIZE GB(1UL)
17 #define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
18 #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
20 #define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
21 #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
23 static struct aper_size_info_fixed hp_zx1_sizes[] =
25 {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
28 static struct gatt_mask hp_zx1_masks[] =
30 {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
33 static struct _hp_private {
35 volatile u8 *registers;
36 u64 *io_pdir; // PDIR for entire IOVA
37 u64 *gatt; // PDIR just for GART (subset of above)
43 int io_pdir_owner; // do we own it, or share it with sba_iommu?
46 int io_tlb_ps; // IOC ps config
47 int io_pages_per_kpage;
50 static int __init hp_zx1_ioc_shared(void)
52 struct _hp_private *hp = &hp_private;
54 printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
57 * IOC already configured by sba_iommu module; just use
58 * its setup. We assume:
59 * - IOVA space is 1Gb in size
60 * - first 512Mb is IOMMU, second 512Mb is GART
62 hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
63 switch (hp->io_tlb_ps) {
64 case 0: hp->io_tlb_shift = 12; break;
65 case 1: hp->io_tlb_shift = 13; break;
66 case 2: hp->io_tlb_shift = 14; break;
67 case 3: hp->io_tlb_shift = 16; break;
69 printk(KERN_ERR PFX "Invalid IOTLB page size "
70 "configuration 0x%x\n", hp->io_tlb_ps);
75 hp->io_page_size = 1 << hp->io_tlb_shift;
76 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
78 hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
79 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
81 hp->gart_size = HP_ZX1_GART_SIZE;
82 hp->gatt_entries = hp->gart_size / hp->io_page_size;
84 hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
85 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
87 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
90 printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
98 static int __init hp_zx1_ioc_owner(u8 ioc_rev)
100 struct _hp_private *hp = &hp_private;
102 printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
105 * Select an IOV page size no larger than system page size.
107 if (PAGE_SIZE >= KB(64)) {
108 hp->io_tlb_shift = 16;
110 } else if (PAGE_SIZE >= KB(16)) {
111 hp->io_tlb_shift = 14;
113 } else if (PAGE_SIZE >= KB(8)) {
114 hp->io_tlb_shift = 13;
117 hp->io_tlb_shift = 12;
120 hp->io_page_size = 1 << hp->io_tlb_shift;
121 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
123 hp->iova_base = HP_ZX1_IOVA_BASE;
124 hp->gart_size = HP_ZX1_GART_SIZE;
125 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
127 hp->gatt_entries = hp->gart_size / hp->io_page_size;
128 hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
133 static int __init hp_zx1_ioc_init(void)
135 struct _hp_private *hp = &hp_private;
140 ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
142 printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
147 pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
149 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
150 if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
151 hp->registers = (u8 *) ioremap(pci_resource_start(ioc, i),
152 pci_resource_len(ioc, i));
156 if (!hp->registers) {
157 printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
162 * If the IOTLB is currently disabled, we can take it over.
163 * Otherwise, we have to share with sba_iommu.
165 hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
167 if (hp->io_pdir_owner)
168 return hp_zx1_ioc_owner(ioc_rev);
170 return hp_zx1_ioc_shared();
173 static int hp_zx1_fetch_size(void)
177 size = hp_private.gart_size / MB(1);
178 hp_zx1_sizes[0].size = size;
179 agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
183 static int hp_zx1_configure(void)
185 struct _hp_private *hp = &hp_private;
187 agp_bridge->gart_bus_addr = hp->gart_base;
188 agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP);
189 pci_read_config_dword(agp_bridge->dev,
190 agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode);
192 if (hp->io_pdir_owner) {
193 OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
194 virt_to_phys(hp->io_pdir));
195 OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
196 OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
197 OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
198 OUTREG64(hp->registers, HP_ZX1_PCOM,
199 hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
200 INREG64(hp->registers, HP_ZX1_PCOM);
206 static void hp_zx1_cleanup(void)
208 struct _hp_private *hp = &hp_private;
210 if (hp->io_pdir_owner)
211 OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
212 iounmap((void *) hp->registers);
215 static void hp_zx1_tlbflush(struct agp_memory *mem)
217 struct _hp_private *hp = &hp_private;
219 OUTREG64(hp->registers, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
220 INREG64(hp->registers, HP_ZX1_PCOM);
223 static int hp_zx1_create_gatt_table(void)
225 struct _hp_private *hp = &hp_private;
228 if (hp->io_pdir_owner) {
229 hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
230 get_order(hp->io_pdir_size));
232 printk(KERN_ERR PFX "Couldn't allocate contiguous "
233 "memory for I/O PDIR\n");
235 hp->gatt_entries = 0;
238 memset(hp->io_pdir, 0, hp->io_pdir_size);
240 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
243 for (i = 0; i < hp->gatt_entries; i++) {
244 hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
250 static int hp_zx1_free_gatt_table(void)
252 struct _hp_private *hp = &hp_private;
254 if (hp->io_pdir_owner)
255 free_pages((unsigned long) hp->io_pdir,
256 get_order(hp->io_pdir_size));
258 hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
262 static int hp_zx1_insert_memory(struct agp_memory *mem, off_t pg_start,
265 struct _hp_private *hp = &hp_private;
267 off_t j, io_pg_start;
270 if (type != 0 || mem->type != 0) {
274 io_pg_start = hp->io_pages_per_kpage * pg_start;
275 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
276 if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
281 while (j < (io_pg_start + io_pg_count)) {
288 if (mem->is_flushed == FALSE) {
289 global_cache_flush();
290 mem->is_flushed = TRUE;
293 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
296 paddr = mem->memory[i];
298 k < hp->io_pages_per_kpage;
299 k++, j++, paddr += hp->io_page_size) {
300 hp->gatt[j] = agp_bridge->driver->mask_memory(paddr, type);
304 agp_bridge->driver->tlb_flush(mem);
308 static int hp_zx1_remove_memory(struct agp_memory *mem, off_t pg_start,
311 struct _hp_private *hp = &hp_private;
312 int i, io_pg_start, io_pg_count;
314 if (type != 0 || mem->type != 0) {
318 io_pg_start = hp->io_pages_per_kpage * pg_start;
319 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
320 for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
321 hp->gatt[i] = agp_bridge->scratch_page;
324 agp_bridge->driver->tlb_flush(mem);
328 static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
330 return HP_ZX1_PDIR_VALID_BIT | addr;
333 struct agp_bridge_driver hp_zx1_driver = {
334 .owner = THIS_MODULE,
335 .size_type = FIXED_APER_SIZE,
336 .configure = hp_zx1_configure,
337 .fetch_size = hp_zx1_fetch_size,
338 .cleanup = hp_zx1_cleanup,
339 .tlb_flush = hp_zx1_tlbflush,
340 .mask_memory = hp_zx1_mask_memory,
341 .masks = hp_zx1_masks,
342 .agp_enable = agp_generic_enable,
343 .cache_flush = global_cache_flush,
344 .create_gatt_table = hp_zx1_create_gatt_table,
345 .free_gatt_table = hp_zx1_free_gatt_table,
346 .insert_memory = hp_zx1_insert_memory,
347 .remove_memory = hp_zx1_remove_memory,
348 .alloc_by_type = agp_generic_alloc_by_type,
349 .free_by_type = agp_generic_free_by_type,
350 .agp_alloc_page = agp_generic_alloc_page,
351 .agp_destroy_page = agp_generic_destroy_page,
352 .cant_use_aperture = 1,
355 static int __init agp_hp_probe(struct pci_dev *pdev,
356 const struct pci_device_id *ent)
358 struct agp_bridge_data *bridge;
361 /* ZX1 LBAs can be either PCI or AGP bridges */
362 if (!pci_find_capability(pdev, PCI_CAP_ID_AGP))
365 printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n",
368 error = hp_zx1_ioc_init();
372 bridge = agp_alloc_bridge();
376 bridge->driver = &hp_zx1_driver;
379 pci_set_drvdata(pdev, bridge);
380 return agp_add_bridge(bridge);
383 static void __devexit agp_hp_remove(struct pci_dev *pdev)
385 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
387 agp_remove_bridge(bridge);
388 agp_put_bridge(bridge);
391 static struct pci_device_id agp_hp_pci_table[] __initdata = {
393 .class = (PCI_CLASS_BRIDGE_HOST << 8),
395 .vendor = PCI_VENDOR_ID_HP,
396 .device = PCI_DEVICE_ID_HP_ZX1_LBA,
397 .subvendor = PCI_ANY_ID,
398 .subdevice = PCI_ANY_ID,
403 MODULE_DEVICE_TABLE(pci, agp_hp_pci_table);
405 static struct pci_driver agp_hp_pci_driver = {
406 .name = "agpgart-hp",
407 .id_table = agp_hp_pci_table,
408 .probe = agp_hp_probe,
409 .remove = agp_hp_remove,
412 static int __init agp_hp_init(void)
414 return pci_module_init(&agp_hp_pci_driver);
417 static void __exit agp_hp_cleanup(void)
419 pci_unregister_driver(&agp_hp_pci_driver);
422 module_init(agp_hp_init);
423 module_exit(agp_hp_cleanup);
425 MODULE_LICENSE("GPL and additional rights");