commented early_printk patch because of rejects.
[linux-flexiantxendom0-3.2.10.git] / drivers / char / agp / hp-agp.c
1 /*
2  * HP AGPGART routines.
3  *      Copyright (C) 2002-2003 Hewlett-Packard Co
4  *              Bjorn Helgaas <bjorn_helgaas@hp.com>
5  */
6
7 #include <linux/acpi.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/init.h>
11 #include <linux/agp_backend.h>
12
13 #include <asm/acpi-ext.h>
14
15 #include "agp.h"
16
17 #ifndef log2
18 #define log2(x)         ffz(~(x))
19 #endif
20
21 #define HP_ZX1_IOC_OFFSET       0x1000  /* ACPI reports SBA, we want IOC */
22
23 /* HP ZX1 IOC registers */
24 #define HP_ZX1_IBASE            0x300
25 #define HP_ZX1_IMASK            0x308
26 #define HP_ZX1_PCOM             0x310
27 #define HP_ZX1_TCNFG            0x318
28 #define HP_ZX1_PDIR_BASE        0x320
29
30 /* HP ZX1 LBA registers */
31 #define HP_ZX1_AGP_STATUS       0x64
32 #define HP_ZX1_AGP_COMMAND      0x68
33
34 #define HP_ZX1_IOVA_BASE        GB(1UL)
35 #define HP_ZX1_IOVA_SIZE        GB(1UL)
36 #define HP_ZX1_GART_SIZE        (HP_ZX1_IOVA_SIZE / 2)
37 #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
38
39 #define HP_ZX1_PDIR_VALID_BIT   0x8000000000000000UL
40 #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
41
42 /* AGP bridge need not be PCI device, but DRM thinks it is. */
43 static struct pci_dev fake_bridge_dev;
44
45 static struct aper_size_info_fixed hp_zx1_sizes[] =
46 {
47         {0, 0, 0},              /* filled in by hp_zx1_fetch_size() */
48 };
49
50 static struct gatt_mask hp_zx1_masks[] =
51 {
52         {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
53 };
54
55 static struct _hp_private {
56         volatile u8 *ioc_regs;
57         volatile u8 *lba_regs;
58         u64 *io_pdir;           // PDIR for entire IOVA
59         u64 *gatt;              // PDIR just for GART (subset of above)
60         u64 gatt_entries;
61         u64 iova_base;
62         u64 gart_base;
63         u64 gart_size;
64         u64 io_pdir_size;
65         int io_pdir_owner;      // do we own it, or share it with sba_iommu?
66         int io_page_size;
67         int io_tlb_shift;
68         int io_tlb_ps;          // IOC ps config
69         int io_pages_per_kpage;
70 } hp_private;
71
72 static int __init hp_zx1_ioc_shared(void)
73 {
74         struct _hp_private *hp = &hp_private;
75
76         printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
77
78         /*
79          * IOC already configured by sba_iommu module; just use
80          * its setup.  We assume:
81          *      - IOVA space is 1Gb in size
82          *      - first 512Mb is IOMMU, second 512Mb is GART
83          */
84         hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG);
85         switch (hp->io_tlb_ps) {
86                 case 0: hp->io_tlb_shift = 12; break;
87                 case 1: hp->io_tlb_shift = 13; break;
88                 case 2: hp->io_tlb_shift = 14; break;
89                 case 3: hp->io_tlb_shift = 16; break;
90                 default:
91                         printk(KERN_ERR PFX "Invalid IOTLB page size "
92                                "configuration 0x%x\n", hp->io_tlb_ps);
93                         hp->gatt = 0;
94                         hp->gatt_entries = 0;
95                         return -ENODEV;
96         }
97         hp->io_page_size = 1 << hp->io_tlb_shift;
98         hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
99
100         hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1;
101         hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
102
103         hp->gart_size = HP_ZX1_GART_SIZE;
104         hp->gatt_entries = hp->gart_size / hp->io_page_size;
105
106         hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
107         hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
108
109         if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
110                 hp->gatt = 0;
111                 hp->gatt_entries = 0;
112                 printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
113                        "GART disabled\n");
114                 return -ENODEV;
115         }
116
117         return 0;
118 }
119
120 static int __init
121 hp_zx1_ioc_owner (void)
122 {
123         struct _hp_private *hp = &hp_private;
124
125         printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
126
127         /*
128          * Select an IOV page size no larger than system page size.
129          */
130         if (PAGE_SIZE >= KB(64)) {
131                 hp->io_tlb_shift = 16;
132                 hp->io_tlb_ps = 3;
133         } else if (PAGE_SIZE >= KB(16)) {
134                 hp->io_tlb_shift = 14;
135                 hp->io_tlb_ps = 2;
136         } else if (PAGE_SIZE >= KB(8)) {
137                 hp->io_tlb_shift = 13;
138                 hp->io_tlb_ps = 1;
139         } else {
140                 hp->io_tlb_shift = 12;
141                 hp->io_tlb_ps = 0;
142         }
143         hp->io_page_size = 1 << hp->io_tlb_shift;
144         hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
145
146         hp->iova_base = HP_ZX1_IOVA_BASE;
147         hp->gart_size = HP_ZX1_GART_SIZE;
148         hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
149
150         hp->gatt_entries = hp->gart_size / hp->io_page_size;
151         hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
152
153         return 0;
154 }
155
156 static int __init
157 hp_zx1_ioc_init (u64 ioc_hpa, u64 lba_hpa)
158 {
159         struct _hp_private *hp = &hp_private;
160
161         hp->ioc_regs = ioremap(ioc_hpa, 1024);
162         hp->lba_regs = ioremap(lba_hpa, 256);
163
164         /*
165          * If the IOTLB is currently disabled, we can take it over.
166          * Otherwise, we have to share with sba_iommu.
167          */
168         hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0;
169
170         if (hp->io_pdir_owner)
171                 return hp_zx1_ioc_owner();
172
173         return hp_zx1_ioc_shared();
174 }
175
176 static int
177 hp_zx1_fetch_size(void)
178 {
179         int size;
180
181         size = hp_private.gart_size / MB(1);
182         hp_zx1_sizes[0].size = size;
183         agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
184         return size;
185 }
186
187 static int
188 hp_zx1_configure (void)
189 {
190         struct _hp_private *hp = &hp_private;
191
192         agp_bridge->gart_bus_addr = hp->gart_base;
193 #if 0
194         /* ouch!! can't do that with a non-PCI AGP bridge... */
195         agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP);
196 #else
197         agp_bridge->capndx = 0;
198 #endif
199         agp_bridge->mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
200
201         if (hp->io_pdir_owner) {
202                 OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir));
203                 OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
204                 OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
205                 OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
206                 OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
207                 INREG64(hp->ioc_regs, HP_ZX1_PCOM);
208         }
209
210         return 0;
211 }
212
213 static void
214 hp_zx1_cleanup (void)
215 {
216         struct _hp_private *hp = &hp_private;
217
218         if (hp->io_pdir_owner)
219                 OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0);
220         iounmap((void *) hp->ioc_regs);
221 }
222
223 static void
224 hp_zx1_tlbflush (struct agp_memory *mem)
225 {
226         struct _hp_private *hp = &hp_private;
227
228         OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
229         INREG64(hp->ioc_regs, HP_ZX1_PCOM);
230 }
231
232 static int
233 hp_zx1_create_gatt_table (void)
234 {
235         struct _hp_private *hp = &hp_private;
236         int i;
237
238         if (hp->io_pdir_owner) {
239                 hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
240                                                 get_order(hp->io_pdir_size));
241                 if (!hp->io_pdir) {
242                         printk(KERN_ERR PFX "Couldn't allocate contiguous "
243                                 "memory for I/O PDIR\n");
244                         hp->gatt = 0;
245                         hp->gatt_entries = 0;
246                         return -ENOMEM;
247                 }
248                 memset(hp->io_pdir, 0, hp->io_pdir_size);
249
250                 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
251         }
252
253         for (i = 0; i < hp->gatt_entries; i++) {
254                 hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
255         }
256
257         return 0;
258 }
259
260 static int
261 hp_zx1_free_gatt_table (void)
262 {
263         struct _hp_private *hp = &hp_private;
264
265         if (hp->io_pdir_owner)
266                 free_pages((unsigned long) hp->io_pdir,
267                             get_order(hp->io_pdir_size));
268         else
269                 hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
270         return 0;
271 }
272
273 static int
274 hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
275 {
276         struct _hp_private *hp = &hp_private;
277         int i, k;
278         off_t j, io_pg_start;
279         int io_pg_count;
280
281         if (type != 0 || mem->type != 0) {
282                 return -EINVAL;
283         }
284
285         io_pg_start = hp->io_pages_per_kpage * pg_start;
286         io_pg_count = hp->io_pages_per_kpage * mem->page_count;
287         if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
288                 return -EINVAL;
289         }
290
291         j = io_pg_start;
292         while (j < (io_pg_start + io_pg_count)) {
293                 if (hp->gatt[j]) {
294                         return -EBUSY;
295                 }
296                 j++;
297         }
298
299         if (mem->is_flushed == FALSE) {
300                 global_cache_flush();
301                 mem->is_flushed = TRUE;
302         }
303
304         for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
305                 unsigned long paddr;
306
307                 paddr = mem->memory[i];
308                 for (k = 0;
309                      k < hp->io_pages_per_kpage;
310                      k++, j++, paddr += hp->io_page_size) {
311                         hp->gatt[j] = agp_bridge->driver->mask_memory(paddr, type);
312                 }
313         }
314
315         agp_bridge->driver->tlb_flush(mem);
316         return 0;
317 }
318
319 static int
320 hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
321 {
322         struct _hp_private *hp = &hp_private;
323         int i, io_pg_start, io_pg_count;
324
325         if (type != 0 || mem->type != 0) {
326                 return -EINVAL;
327         }
328
329         io_pg_start = hp->io_pages_per_kpage * pg_start;
330         io_pg_count = hp->io_pages_per_kpage * mem->page_count;
331         for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
332                 hp->gatt[i] = agp_bridge->scratch_page;
333         }
334
335         agp_bridge->driver->tlb_flush(mem);
336         return 0;
337 }
338
339 static unsigned long
340 hp_zx1_mask_memory (unsigned long addr, int type)
341 {
342         return HP_ZX1_PDIR_VALID_BIT | addr;
343 }
344
345 static void
346 hp_zx1_enable (u32 mode)
347 {
348         struct _hp_private *hp = &hp_private;
349         u32 command;
350
351         command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
352
353         command = agp_collect_device_status(mode, command);
354         command |= 0x00000100;
355
356         OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command);
357
358         agp_device_command(command, 0);
359 }
360
361 struct agp_bridge_driver hp_zx1_driver = {
362         .owner                  = THIS_MODULE,
363         .size_type              = FIXED_APER_SIZE,
364         .configure              = hp_zx1_configure,
365         .fetch_size             = hp_zx1_fetch_size,
366         .cleanup                = hp_zx1_cleanup,
367         .tlb_flush              = hp_zx1_tlbflush,
368         .mask_memory            = hp_zx1_mask_memory,
369         .masks                  = hp_zx1_masks,
370         .agp_enable             = hp_zx1_enable,
371         .cache_flush            = global_cache_flush,
372         .create_gatt_table      = hp_zx1_create_gatt_table,
373         .free_gatt_table        = hp_zx1_free_gatt_table,
374         .insert_memory          = hp_zx1_insert_memory,
375         .remove_memory          = hp_zx1_remove_memory,
376         .alloc_by_type          = agp_generic_alloc_by_type,
377         .free_by_type           = agp_generic_free_by_type,
378         .agp_alloc_page         = agp_generic_alloc_page,
379         .agp_destroy_page       = agp_generic_destroy_page,
380         .cant_use_aperture      = 1,
381 };
382
383 static int __init
384 hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
385 {
386         struct agp_bridge_data *bridge;
387         int error;
388
389         printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset (ioc=%lx, lba=%lx)\n", ioc_hpa, lba_hpa);
390
391         error = hp_zx1_ioc_init(ioc_hpa, lba_hpa);
392         if (error)
393                 return error;
394
395         bridge = agp_alloc_bridge();
396         if (!bridge)
397                 return -ENOMEM;
398         bridge->driver = &hp_zx1_driver;
399
400         fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
401         fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
402         bridge->dev = &fake_bridge_dev;
403
404         return agp_add_bridge(bridge);
405 }
406
407 static acpi_status __init
408 zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
409 {
410         acpi_handle handle, parent;
411         acpi_status status;
412         struct acpi_buffer buffer;
413         struct acpi_device_info *info;
414         u64 lba_hpa, sba_hpa, length;
415         int match;
416
417         status = hp_acpi_csr_space(obj, &lba_hpa, &length);
418         if (ACPI_FAILURE(status))
419                 return 1;
420
421         /* Look for an enclosing IOC scope and find its CSR space */
422         handle = obj;
423         do {
424                 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
425                 status = acpi_get_object_info(handle, &buffer);
426                 if (ACPI_SUCCESS(status)) {
427                         /* TBD check _CID also */
428                         info = buffer.pointer;
429                         info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
430                         match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
431                         ACPI_MEM_FREE(info);
432                         if (match) {
433                                 status = hp_acpi_csr_space(handle, &sba_hpa, &length);
434                                 if (ACPI_SUCCESS(status))
435                                         break;
436                                 else {
437                                         printk(KERN_ERR PFX "Detected HP ZX1 "
438                                                "AGP LBA but no IOC.\n");
439                                         return status;
440                                 }
441                         }
442                 }
443
444                 status = acpi_get_parent(handle, &parent);
445                 handle = parent;
446         } while (ACPI_SUCCESS(status));
447
448         if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
449                 return 1;
450         return 0;
451 }
452
453 static int __init
454 agp_hp_init (void)
455 {
456         acpi_status status;
457
458         status = acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003 AGP LBA", NULL);
459         if (!(ACPI_SUCCESS(status))) {
460                 agp_bridge->type = NOT_SUPPORTED;
461                 printk(KERN_INFO PFX "Failed to initialize zx1 AGP.\n");
462                 return -ENODEV;
463         }
464         return 0;
465 }
466
467 static void __exit
468 agp_hp_cleanup (void)
469 {
470 }
471
472 module_init(agp_hp_init);
473 module_exit(agp_hp_cleanup);
474
475 MODULE_LICENSE("GPL and additional rights");